From 3b518a054ad311918dc18c1a3a3b65f26824516c Mon Sep 17 00:00:00 2001 From: n3il Date: Sun, 27 May 2018 16:11:00 -0400 Subject: [PATCH 01/27] Upgrade kappa to 0.7.0 --- requirements.txt | 2 +- zappa/utilities.py | 30 ++++++++++-------------------- 2 files changed, 11 insertions(+), 21 deletions(-) diff --git a/requirements.txt b/requirements.txt index 09ce4e787..3759a2ec0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ future==0.16.0 futures==3.2.0; python_version < '3' hjson==3.0.1 jmespath==0.9.3 -kappa==0.6.0 +kappa==0.7.0 lambda-packages==0.19.0 pip==9.0.1 python-dateutil>=2.6.1, <2.7.0 diff --git a/zappa/utilities.py b/zappa/utilities.py index ca3f58b4a..e10ad5d10 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -239,10 +239,17 @@ def add(self, function): if self.filters: self.add_filters(function) + class ExtendedS3EventSource(kappa.event_source.s3.S3EventSource): + # until https://github.com/garnaat/kappa/pull/120 + def _make_notification_id(self, function_name=None): + import uuid + uid = str(uuid.uuid4()).split('-')[0] + return 'Kappa-%s-notification' % uid + event_source_map = { 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, 'kinesis': kappa.event_source.kinesis.KinesisEventSource, - 's3': kappa.event_source.s3.S3EventSource, + 's3': ExtendedS3EventSource, 'sns': ExtendedSnsEventSource, 'events': kappa.event_source.cloudwatch.CloudWatchEventSource } @@ -264,24 +271,7 @@ def autoreturn(self, function_name): funk = PseudoFunction() funk.name = lambda_arn - - # Kappa 0.6.0 requires this nasty hacking, - # hopefully we can remove at least some of this soon. - # Kappa 0.7.0 introduces a whole host over other changes we don't - # really want, so we're stuck here for a little while. - - # Related: https://github.com/Miserlou/Zappa/issues/684 - # https://github.com/Miserlou/Zappa/issues/688 - # https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313 - if svc == 's3': - split_arn = lambda_arn.split(':') - arn_front = ':'.join(split_arn[:-1]) - arn_back = split_arn[-1] - ctx.environment = arn_back - funk.arn = arn_front - funk.name = ':'.join([arn_back, target_function]) - else: - funk.arn = lambda_arn + funk.arn = lambda_arn funk._context = ctx @@ -420,4 +410,4 @@ def titlecase_keys(d): """ Takes a dict with keys of type str and returns a new dict with all keys titlecased. """ - return {k.title(): v for k, v in d.items()} \ No newline at end of file + return {k.title(): v for k, v in d.items()} From 8dbe6d81ee16662b9340d07d3d380c1072183b71 Mon Sep 17 00:00:00 2001 From: n3il Date: Tue, 29 May 2018 20:38:28 -0400 Subject: [PATCH 02/27] Attempt at adding to test coverage --- ...3.GetBucketNotificationConfiguration_1.json | 18 ++++++++++++++++++ tests/tests_placebo.py | 4 ++-- 2 files changed, 20 insertions(+), 2 deletions(-) create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json new file mode 100644 index 000000000..8f61a5923 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "items": [ + { + "description": "Api Key for rdrde6pecd", + "enabled": true, + "stageKeys": [ + "rdrde6pecd/devor" + ], + "lastUpdatedDate": 1470384739, + "createdDate": 1470384739, + "id": "W9QTptojpE3bsg816Rrjh4was83amtaAx5s6NXaAl", + "name": "devor_rdrde6pecd" + } + ] + } +} diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py index 1bea096e7..9a2ce456f 100644 --- a/tests/tests_placebo.py +++ b/tests/tests_placebo.py @@ -9,7 +9,7 @@ from zappa.cli import ZappaCLI from zappa.handler import LambdaHandler -from zappa.utilities import (add_event_source, remove_event_source) +from zappa.utilities import (add_event_source, get_event_source_status, remove_event_source) from zappa.core import Zappa @@ -462,8 +462,8 @@ def test_add_event_source(self, session): "s3:ObjectCreated:*" ]} add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) @placebo_session def test_cognito_trigger(self, session): From d476affd5b4e1973146149d0ac4f7b20a58c8e80 Mon Sep 17 00:00:00 2001 From: n3il Date: Wed, 30 May 2018 08:49:57 -0400 Subject: [PATCH 03/27] newline --- zappa/utilities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zappa/utilities.py b/zappa/utilities.py index e10ad5d10..1425d423c 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -410,4 +410,4 @@ def titlecase_keys(d): """ Takes a dict with keys of type str and returns a new dict with all keys titlecased. """ - return {k.title(): v for k, v in d.items()} + return {k.title(): v for k, v in d.items()} \ No newline at end of file From df3e791c081c297a31033e829d728fdd94af8f24 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Fri, 24 Aug 2018 11:37:17 -0500 Subject: [PATCH 04/27] . --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 3759a2ec0..23462920a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -19,6 +19,6 @@ six>=1.11.0 toml>=0.9.4 tqdm==4.19.1 troposphere>=1.9.0 -Werkzeug==0.14 +Werkzeug~=0.14 wheel>=0.30.0 wsgi-request-logger==0.4.6 From f4d424dd2bd168b0c2faab7914bd5a695a3c5356 Mon Sep 17 00:00:00 2001 From: Neil Date: Mon, 27 Aug 2018 11:55:14 -0400 Subject: [PATCH 05/27] Upgrade kappa to 0.7.0 --- requirements.txt | 4 +-- ....GetBucketNotificationConfiguration_1.json | 18 ++++++++++++ tests/tests_placebo.py | 4 +-- zappa/utilities.py | 28 ++++++------------- 4 files changed, 31 insertions(+), 23 deletions(-) create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json diff --git a/requirements.txt b/requirements.txt index 811575d72..6eac9dfee 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,8 +8,8 @@ future==0.16.0 futures==3.2.0; python_version < '3' hjson==3.0.1 jmespath==0.9.3 -kappa==0.6.0 -lambda-packages==0.20.0 +kappa==0.7.0 +lambda-packages==0.19.0 pip>=9.0.1, <=10.1.0 python-dateutil>=2.6.1, <2.7.0 python-slugify==1.2.4 diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json new file mode 100644 index 000000000..8f61a5923 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "items": [ + { + "description": "Api Key for rdrde6pecd", + "enabled": true, + "stageKeys": [ + "rdrde6pecd/devor" + ], + "lastUpdatedDate": 1470384739, + "createdDate": 1470384739, + "id": "W9QTptojpE3bsg816Rrjh4was83amtaAx5s6NXaAl", + "name": "devor_rdrde6pecd" + } + ] + } +} diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py index 1bea096e7..9a2ce456f 100644 --- a/tests/tests_placebo.py +++ b/tests/tests_placebo.py @@ -9,7 +9,7 @@ from zappa.cli import ZappaCLI from zappa.handler import LambdaHandler -from zappa.utilities import (add_event_source, remove_event_source) +from zappa.utilities import (add_event_source, get_event_source_status, remove_event_source) from zappa.core import Zappa @@ -462,8 +462,8 @@ def test_add_event_source(self, session): "s3:ObjectCreated:*" ]} add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) @placebo_session def test_cognito_trigger(self, session): diff --git a/zappa/utilities.py b/zappa/utilities.py index e13188c42..b30383bb7 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -240,10 +240,17 @@ def add(self, function): if self.filters: self.add_filters(function) + class ExtendedS3EventSource(kappa.event_source.s3.S3EventSource): + # until https://github.com/garnaat/kappa/pull/120 + def _make_notification_id(self, function_name=None): + import uuid + uid = str(uuid.uuid4()).split('-')[0] + return 'Kappa-%s-notification' % uid + event_source_map = { 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, 'kinesis': kappa.event_source.kinesis.KinesisEventSource, - 's3': kappa.event_source.s3.S3EventSource, + 's3': ExtendedS3EventSource, 'sns': ExtendedSnsEventSource, 'events': kappa.event_source.cloudwatch.CloudWatchEventSource } @@ -265,24 +272,7 @@ def autoreturn(self, function_name): funk = PseudoFunction() funk.name = lambda_arn - - # Kappa 0.6.0 requires this nasty hacking, - # hopefully we can remove at least some of this soon. - # Kappa 0.7.0 introduces a whole host over other changes we don't - # really want, so we're stuck here for a little while. - - # Related: https://github.com/Miserlou/Zappa/issues/684 - # https://github.com/Miserlou/Zappa/issues/688 - # https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313 - if svc == 's3': - split_arn = lambda_arn.split(':') - arn_front = ':'.join(split_arn[:-1]) - arn_back = split_arn[-1] - ctx.environment = arn_back - funk.arn = arn_front - funk.name = ':'.join([arn_back, target_function]) - else: - funk.arn = lambda_arn + funk.arn = lambda_arn funk._context = ctx From f33decf6780036b285285ce565c50040e1cda89e Mon Sep 17 00:00:00 2001 From: Matt Land Date: Mon, 27 Aug 2018 17:01:40 -0500 Subject: [PATCH 06/27] use fork with fix --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ebb612a36..96f477b63 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ future==0.16.0 futures==3.2.0; python_version < '3' hjson==3.0.1 jmespath==0.9.3 -kappa==0.7.0 +-e git+https://github.com/SpareFoot/kappa.git@master#egg=zappa lambda-packages==0.20.0 pip>=9.0.1, <=10.1.0 python-dateutil>=2.6.1, <2.7.0 From 5755b8909f84d0e07f924b1704c522029556a7fe Mon Sep 17 00:00:00 2001 From: Matt Land Date: Mon, 27 Aug 2018 17:17:43 -0500 Subject: [PATCH 07/27] fix setup --- setup.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 720838a08..59fc3dc45 100755 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ README = convert('README.md', 'rst') except ImportError: README = open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r', encoding="utf-8").read() - +dependency_links = [] with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f: if sys.version_info[0] == 2: required = f.read().splitlines() @@ -22,7 +22,11 @@ required = [] for package in f.read().splitlines(): if 'futures' not in package: - required.append(package) + if '://' in package: + dependency_links.append(package.replace('-e ', '') + '-0') + required.append(package.split('egg=')[1]) + else: + required.append(package) with open(os.path.join(os.path.dirname(__file__), 'test_requirements.txt')) as f: test_required = f.read().splitlines() @@ -31,6 +35,7 @@ name='zappa', version=__version__, packages=['zappa'], + dependency_links=dependency_links, install_requires=required, tests_require=test_required, test_suite='nose.collector', From bd224e0e774b4190bcfcefd3264a89a5a4e58d0a Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 09:23:13 -0500 Subject: [PATCH 08/27] fix --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 96f477b63..cf921a55b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ future==0.16.0 futures==3.2.0; python_version < '3' hjson==3.0.1 jmespath==0.9.3 --e git+https://github.com/SpareFoot/kappa.git@master#egg=zappa +-e git+https://github.com/SpareFoot/kappa.git@master#egg=kappa lambda-packages==0.20.0 pip>=9.0.1, <=10.1.0 python-dateutil>=2.6.1, <2.7.0 From d4904f7d08d749929a1ae72c88af0c2a024fd0e9 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 09:56:43 -0500 Subject: [PATCH 09/27] drop 3.2 support dateutil --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index cf921a55b..dd460b447 100644 --- a/requirements.txt +++ b/requirements.txt @@ -11,7 +11,7 @@ jmespath==0.9.3 -e git+https://github.com/SpareFoot/kappa.git@master#egg=kappa lambda-packages==0.20.0 pip>=9.0.1, <=10.1.0 -python-dateutil>=2.6.1, <2.7.0 +python-dateutil>=2.6.1, <2.8.0 python-slugify==1.2.4 PyYAML==3.12 requests>=2.10.0 From c4e617a5ef306c245ff8fbeaf802432f6716f1f4 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 15:11:40 -0500 Subject: [PATCH 10/27] Add debug --- zappa/handler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/zappa/handler.py b/zappa/handler.py index bcf6a588a..b1fd22d45 100644 --- a/zappa/handler.py +++ b/zappa/handler.py @@ -227,6 +227,7 @@ def import_module_and_get_function(whole_function): Given a modular path to a function, import that module and return the function. """ + logging.debug('whole fcn is {}'.format(whole_function)) module, function = whole_function.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) From 58503624aa3eca8d94deafac15a98602cf4dfaf6 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 15:30:09 -0500 Subject: [PATCH 11/27] need some deug to fix --- zappa/handler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/zappa/handler.py b/zappa/handler.py index b1fd22d45..84cbc9e8d 100644 --- a/zappa/handler.py +++ b/zappa/handler.py @@ -228,6 +228,7 @@ def import_module_and_get_function(whole_function): and return the function. """ logging.debug('whole fcn is {}'.format(whole_function)) + module, function = whole_function.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) @@ -343,7 +344,8 @@ def handler(self, event, context): # If in DEBUG mode, log all raw incoming events. if settings.DEBUG: logger.debug('Zappa Event: {}'.format(event)) - + logger.debug('settings {}'.format(self.settings)) + logger.debug('setting_name {}'.format(self.settings_name)) # Set any API Gateway defined Stage Variables # as env vars if event.get('stageVariables'): From d1711ce9bd5b1a08f644e9fef4eed351cf3894f9 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 15:49:41 -0500 Subject: [PATCH 12/27] maybe break --- zappa/handler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/zappa/handler.py b/zappa/handler.py index 84cbc9e8d..3dd1c4dc8 100644 --- a/zappa/handler.py +++ b/zappa/handler.py @@ -344,7 +344,7 @@ def handler(self, event, context): # If in DEBUG mode, log all raw incoming events. if settings.DEBUG: logger.debug('Zappa Event: {}'.format(event)) - logger.debug('settings {}'.format(self.settings)) + logger.debug('settings {}'.format(dir(self.settings))) logger.debug('setting_name {}'.format(self.settings_name)) # Set any API Gateway defined Stage Variables # as env vars @@ -411,7 +411,8 @@ def handler(self, event, context): records = event.get('Records') result = None - whole_function = self.get_function_for_aws_event(records[0]) + #whole_function = self.get_function_for_aws_event(records[0]) + whole_function = '{}.{}'.format(self.settings.APP_MODULE, self.settings.APP_FUNCTION) if whole_function: app_function = self.import_module_and_get_function(whole_function) result = self.run_function(app_function, event, context) From 6bfe869cbf5436d132b07581b00e693f8a5437c4 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 15:57:52 -0500 Subject: [PATCH 13/27] no more debugs needed --- zappa/handler.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/zappa/handler.py b/zappa/handler.py index 3dd1c4dc8..3635f9454 100644 --- a/zappa/handler.py +++ b/zappa/handler.py @@ -227,8 +227,6 @@ def import_module_and_get_function(whole_function): Given a modular path to a function, import that module and return the function. """ - logging.debug('whole fcn is {}'.format(whole_function)) - module, function = whole_function.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) @@ -344,8 +342,6 @@ def handler(self, event, context): # If in DEBUG mode, log all raw incoming events. if settings.DEBUG: logger.debug('Zappa Event: {}'.format(event)) - logger.debug('settings {}'.format(dir(self.settings))) - logger.debug('setting_name {}'.format(self.settings_name)) # Set any API Gateway defined Stage Variables # as env vars if event.get('stageVariables'): From e89de5ede66ea8caec8646066a26134959f3f2b9 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Tue, 28 Aug 2018 15:59:22 -0500 Subject: [PATCH 14/27] add comment --- zappa/handler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/zappa/handler.py b/zappa/handler.py index 3635f9454..d23f7481e 100644 --- a/zappa/handler.py +++ b/zappa/handler.py @@ -408,6 +408,7 @@ def handler(self, event, context): records = event.get('Records') result = None #whole_function = self.get_function_for_aws_event(records[0]) + # that didn't work. Lets just get it from the the Zappa settings instead whole_function = '{}.{}'.format(self.settings.APP_MODULE, self.settings.APP_FUNCTION) if whole_function: app_function = self.import_module_and_get_function(whole_function) From 4e56ce1723fd07ac3a9708a7ede84edddeb67d8d Mon Sep 17 00:00:00 2001 From: Matt Land Date: Thu, 7 Feb 2019 15:51:51 -0600 Subject: [PATCH 15/27] fix for deploy issue, stop trying to make this other role when manage_roles is false --- zappa/cli.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/zappa/cli.py b/zappa/cli.py index 78a45e713..426b23b52 100755 --- a/zappa/cli.py +++ b/zappa/cli.py @@ -2053,6 +2053,8 @@ def load_settings(self, settings_file=None, session=None): self.tags = self.stage_config.get('tags', {}) desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole" + if self.desired_role_arn: + desired_role_name = self.desired_role_arn.split('/')[-1] self.zappa = Zappa( boto_session=session, profile_name=self.profile_name, aws_region=self.aws_region, From e1fa1344f9c7775bbce546872a2848a8b73581d2 Mon Sep 17 00:00:00 2001 From: Matt Land Date: Thu, 7 Feb 2019 15:55:52 -0600 Subject: [PATCH 16/27] dep --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index dd460b447..78e40ef01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,8 +13,8 @@ lambda-packages==0.20.0 pip>=9.0.1, <=10.1.0 python-dateutil>=2.6.1, <2.8.0 python-slugify==1.2.4 -PyYAML==3.12 -requests>=2.10.0 +PyYAML==3.13 +requests>=2.20.0 six>=1.11.0 toml>=0.9.4 tqdm==4.19.1 From 6b64e788f38b2f8eda5f8105807bf36a8cf36bd8 Mon Sep 17 00:00:00 2001 From: nnguyen Date: Sat, 17 Aug 2019 23:30:16 -0500 Subject: [PATCH 17/27] Match PyYaml to Zappa --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 78e40ef01..186cd8244 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,7 +13,7 @@ lambda-packages==0.20.0 pip>=9.0.1, <=10.1.0 python-dateutil>=2.6.1, <2.8.0 python-slugify==1.2.4 -PyYAML==3.13 +PyYAML>=3.13 requests>=2.20.0 six>=1.11.0 toml>=0.9.4 From 315957a60e16874b692dac54e3bc26befe99585a Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Mon, 15 Jun 2020 17:22:11 -0500 Subject: [PATCH 18/27] merging --- setup.py | 27 - ....GetBucketNotificationConfiguration_1.json | 18 - tests/tests_placebo.py | 517 --- zappa/cli.py | 2801 ----------------- zappa/handler.py | 616 ---- zappa/utilities.py | 573 ---- 6 files changed, 4552 deletions(-) delete mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json delete mode 100644 tests/tests_placebo.py delete mode 100755 zappa/cli.py delete mode 100644 zappa/handler.py delete mode 100644 zappa/utilities.py diff --git a/setup.py b/setup.py index b49abd9ff..b885c80c4 100755 --- a/setup.py +++ b/setup.py @@ -4,37 +4,11 @@ from io import open from zappa import __version__ -<<<<<<< HEAD -# Set external files -try: - from pypandoc import convert - README = convert('README.md', 'rst') -except ImportError: - README = open(os.path.join(os.path.dirname(__file__), 'README.md'), 'r', encoding="utf-8").read() -dependency_links = [] -with open(os.path.join(os.path.dirname(__file__), 'requirements.txt')) as f: - if sys.version_info[0] == 2: - required = f.read().splitlines() - else: - # This logic is intended to prevent the futures package from being installed in python 3 environments - # as it can cause unexpected syntax errors in other packages. Futures is in the standard library in python 3 - # and is should never be installed in these environments. - # Related: https://github.com/Miserlou/Zappa/issues/1179 - required = [] - for package in f.read().splitlines(): - if 'futures' not in package: - if '://' in package: - dependency_links.append(package.replace('-e ', '') + '-0') - required.append(package.split('egg=')[1]) - else: - required.append(package) -======= with open('README.md') as readme_file: long_description = readme_file.read() with open(os.path.join(os.path.dirname(__file__), 'requirements.in')) as f: required = f.read().splitlines() ->>>>>>> upstream/master with open(os.path.join(os.path.dirname(__file__), 'test_requirements.in')) as f: test_required = f.read().splitlines() @@ -43,7 +17,6 @@ name='zappa', version=__version__, packages=['zappa'], - dependency_links=dependency_links, install_requires=required, tests_require=test_required, test_suite='nose.collector', diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json deleted file mode 100644 index 8f61a5923..000000000 --- a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "status_code": 200, - "data": { - "items": [ - { - "description": "Api Key for rdrde6pecd", - "enabled": true, - "stageKeys": [ - "rdrde6pecd/devor" - ], - "lastUpdatedDate": 1470384739, - "createdDate": 1470384739, - "id": "W9QTptojpE3bsg816Rrjh4was83amtaAx5s6NXaAl", - "name": "devor_rdrde6pecd" - } - ] - } -} diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py deleted file mode 100644 index 01a77fe12..000000000 --- a/tests/tests_placebo.py +++ /dev/null @@ -1,517 +0,0 @@ -# -*- coding: utf8 -*- -import mock -import os -import random -import string -import unittest - -from .utils import placebo_session - -from zappa.cli import ZappaCLI -from zappa.handler import LambdaHandler -from zappa.utilities import (add_event_source, get_event_source_status, remove_event_source) -from zappa.core import Zappa - - -def random_string(length): - return ''.join(random.choice(string.printable) for _ in range(length)) - - -class TestZappa(unittest.TestCase): - def setUp(self): - self.sleep_patch = mock.patch('time.sleep', return_value=None) - # Tests expect us-east-1. - # If the user has set a different region in env variables, we set it aside for now and use us-east-1 - self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None) - os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' - if not os.environ.get('PLACEBO_MODE') == 'record': - self.sleep_patch.start() - - def tearDown(self): - if not os.environ.get('PLACEBO_MODE') == 'record': - self.sleep_patch.stop() - del os.environ['AWS_DEFAULT_REGION'] - if self.users_current_region_name is not None: - # Give the user their AWS region back, we're done testing with us-east-1. - os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name - - @placebo_session - def test_upload_remove_s3(self, session): - bucket_name = 'test_zappa_upload_s3' - z = Zappa(session) - zip_path = z.create_lambda_zip(minify=False) - res = z.upload_to_s3(zip_path, bucket_name) - self.assertTrue(res) - s3 = session.resource('s3') - - # will throw ClientError with 404 if bucket doesn't exist - s3.meta.client.head_bucket(Bucket=bucket_name) - - # will throw ClientError with 404 if object doesn't exist - s3.meta.client.head_object( - Bucket=bucket_name, - Key=zip_path, - ) - res = z.remove_from_s3(zip_path, bucket_name) - self.assertTrue(res) - - fail = z.upload_to_s3('/tmp/this_isnt_real', bucket_name) - self.assertFalse(fail) - - #Will graciouly handle quirky S3 behavior on 'us-east-1' region name' - z.aws_region = 'us-east-1' - res = z.upload_to_s3(zip_path, bucket_name) - os.remove(zip_path) - self.assertTrue(res) - - @placebo_session - def test_copy_on_s3(self, session): - bucket_name = 'test_zappa_upload_s3' - z = Zappa(session) - zip_path = z.create_lambda_zip(minify=False) - res = z.upload_to_s3(zip_path, bucket_name) - self.assertTrue(res) - s3 = session.resource('s3') - - # will throw ClientError with 404 if bucket doesn't exist - s3.meta.client.head_bucket(Bucket=bucket_name) - - # will throw ClientError with 404 if object doesn't exist - s3.meta.client.head_object( - Bucket=bucket_name, - Key=zip_path, - ) - zp = 'copy_' + zip_path - res = z.copy_on_s3(zip_path, zp, bucket_name) - os.remove(zip_path) - self.assertTrue(res) - - @placebo_session - def test_create_lambda_function_s3(self, session): - bucket_name = 'lmbda' - zip_path = 'Spheres-dev-1454694878.zip' - - z = Zappa(session) - z.aws_region = 'us-east-1' - z.load_credentials(session) - z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' - - arn = z.create_lambda_function( - bucket=bucket_name, - s3_key=zip_path, - function_name='test_lmbda_function55', - handler='runme.lambda_handler' - ) - - arn = z.update_lambda_function( - bucket=bucket_name, - s3_key=zip_path, - function_name='test_lmbda_function55', - ) - - @placebo_session - def test_create_lambda_function_local(self, session): - bucket_name = 'lmbda' - local_file = 'Spheres-dev-1454694878.zip' - - z = Zappa(session) - z.aws_region = 'us-east-1' - z.load_credentials(session) - z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' - - arn = z.create_lambda_function( - bucket=bucket_name, - local_zip=local_file, - function_name='test_lmbda_function55', - handler='runme.lambda_handler' - ) - - arn = z.update_lambda_function( - bucket=bucket_name, - local_zip=local_file, - function_name='test_lmbda_function55', - ) - - @placebo_session - def test_rollback_lambda_function_version(self, session): - z = Zappa(session) - z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' - - function_name = 'django-helloworld-unicode' - too_many_versions = z.rollback_lambda_function_version(function_name, 99999) - self.assertFalse(too_many_versions) - - function_arn = z.rollback_lambda_function_version(function_name, 1) - - @placebo_session - def test_invoke_lambda_function(self, session): - z = Zappa(session) - z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' - - function_name = 'django-helloworld-unicode' - payload = '{"event": "hello"}' - response = z.invoke_lambda_function(function_name, payload) - - @placebo_session - def test_create_iam_roles(self, session): - z = Zappa(session) - arn, updated = z.create_iam_roles() - self.assertEqual(arn, "arn:aws:iam::123:role/{}".format(z.role_name)) - - @placebo_session - def test_get_api_url(self, session): - z = Zappa(session) - z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' - url = z.get_api_url('Spheres-demonstration', 'demonstration') - - @placebo_session - def test_fetch_logs(self, session): - z = Zappa(session) - z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' - events = z.fetch_logs('Spheres-demonstration') - self.assertTrue(events is not None) - - ## - # Handler - ## - - @placebo_session - def test_handler(self, session): - # Init will test load_remote_settings - lh = LambdaHandler('test_settings', session=session) - - # Annoyingly, this will fail during record, but - # the result will actually be okay to use in playback. - # See: https://github.com/garnaat/placebo/issues/48 - self.assertEqual(os.environ['hello'], 'world') - - event = { - "body": {}, - "headers": {}, - "params": { - "parameter_1": "asdf1", - "parameter_2": "asdf2", - }, - "method": "GET", - "query": {} - } - lh.handler(event, None) - - # Test scheduled event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'detail-type': 'Scheduled Event', - 'source': 'aws.events', - 'version': '0', - 'time': '2016-05-10T21:05:39Z', - 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', - 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] - } - lh.handler(event, None) - - # Test command event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'command': 'test_settings.command', - 'source': 'aws.events', - 'version': '0', - 'time': '2016-05-10T21:05:39Z', - 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', - 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] - } - lh.handler(event, None) - - # Test command for async event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'command': 'zappa.asynchronous.route_lambda_task', - 'task_path': 'tests.test_app.async_me', - 'args': ['xxx'], - 'kwargs': {}, - 'source': 'aws.events', - 'version': '0', - 'time': '2016-05-10T21:05:39Z', - 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', - } - self.assertEqual('run async when on lambda xxx', lh.handler(event, None)) - event['kwargs'] = {'foo': 'bar'} - self.assertEqual('run async when on lambda xxxbar', lh.handler(event, None)) - - # Test raw_command event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'raw_command': 'print("check one two")', - 'source': 'aws.events', - 'version': '0', - 'time': '2016-05-10T21:05:39Z', - 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', - 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] - } - lh.handler(event, None) - - # Test AWS S3 event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'Records': [{'s3': {'configurationId': 'test_project:test_settings.aws_s3_event'}}], - 'source': 'aws.events', - 'version': '0', - 'time': '2016-05-10T21:05:39Z', - 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', - 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] - } - self.assertEqual("AWS S3 EVENT", lh.handler(event, None)) - - # Test AWS SNS event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'Records': [ - { - 'EventVersion': '1.0', - 'EventSource': 'aws:sns', - 'EventSubscriptionArn': 'arn:aws:sns:EXAMPLE', - 'Sns': { - 'SignatureVersion': '1', - 'Timestamp': '1970-01-01T00:00:00.000Z', - 'Signature': 'EXAMPLE', - 'SigningCertUrl': 'EXAMPLE', - 'MessageId': '95df01b4-ee98-5cb9-9903-4c221d41eb5e', - 'Message': 'Hello from SNS!', - 'Subject': 'TestInvoke', - 'Type': 'Notification', - 'UnsubscribeUrl': 'EXAMPLE', - 'TopicArn': 'arn:aws:sns:1', - 'MessageAttributes': { - 'Test': {'Type': 'String', 'Value': 'TestString'}, - 'TestBinary': {'Type': 'Binary', 'Value': 'TestBinary'} - } - } - } - ] - } - self.assertEqual("AWS SNS EVENT", lh.handler(event, None)) - - # Test AWS SNS event - event = { - 'account': '72333333333', - 'region': 'us-east-1', - 'detail': {}, - 'Records': [ - { - 'EventVersion': '1.0', - 'EventSource': 'aws:sns', - 'EventSubscriptionArn': 'arn:aws:sns:EXAMPLE', - 'Sns': { - 'SignatureVersion': '1', - 'Timestamp': '1970-01-01T00:00:00.000Z', - 'Signature': 'EXAMPLE', - 'SigningCertUrl': 'EXAMPLE', - 'MessageId': '95df01b4-ee98-5cb9-9903-4c221d41eb5e', - 'Message': '{"args": ["arg1", "arg2"], "command": "zappa.asynchronous.route_sns_task", ' - '"task_path": "test_settings.aws_async_sns_event", "kwargs": {"arg3": "varg3"}}', - 'Subject': 'TestInvoke', - 'Type': 'Notification', - 'UnsubscribeUrl': 'EXAMPLE', - 'MessageAttributes': { - 'Test': {'Type': 'String', 'Value': 'TestString'}, - 'TestBinary': {'Type': 'Binary', 'Value': 'TestBinary'} - } - } - } - ] - } - self.assertEqual("AWS ASYNC SNS EVENT", lh.handler(event, None)) - - # Test AWS DynamoDB event - event = { - 'Records': [ - { - 'eventID': '1', - 'eventVersion': '1.0', - 'dynamodb': { - 'Keys': {'Id': {'N': '101'}}, - 'NewImage': {'Message': {'S': 'New item!'}, 'Id': {'N': '101'}}, - 'StreamViewType': 'NEW_AND_OLD_IMAGES', - 'SequenceNumber': '111', 'SizeBytes': 26 - }, - 'awsRegion': 'us-west-2', - 'eventName': 'INSERT', - 'eventSourceARN': 'arn:aws:dynamodb:1', - 'eventSource': 'aws:dynamodb' - } - ] - } - self.assertEqual("AWS DYNAMODB EVENT", lh.handler(event, None)) - - # Test AWS kinesis event - event = { - 'Records': [ - { - 'eventID': 'shardId-000000000000:49545115243490985018280067714973144582180062593244200961', - 'eventVersion': '1.0', - 'kinesis': { - 'partitionKey': 'partitionKey-3', - 'data': 'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=', - 'kinesisSchemaVersion': '1.0', - 'sequenceNumber': '49545115243490985018280067714973144582180062593244200961' - }, - 'invokeIdentityArn': 'arn:aws:iam::EXAMPLE', - 'eventName': 'aws:kinesis:record', - 'eventSourceARN': 'arn:aws:kinesis:1', - 'eventSource': 'aws:kinesis', - 'awsRegion': 'us-east-1' - } - ] - } - self.assertEqual("AWS KINESIS EVENT", lh.handler(event, None)) - - # Test AWS SQS event - event = { - "Records": [ - { - "messageId": "c80e8021-a70a-42c7-a470-796e1186f753", - "receiptHandle": "AQEBJQ+/u6NsnT5t8Q/VbVxgdUl4TMKZ5FqhksRdIQvLBhwNvADoBxYSOVeCBXdnS9P+erlTtwEALHsnBXynkfPLH3BOUqmgzP25U8kl8eHzq6RAlzrSOfTO8ox9dcp6GLmW33YjO3zkq5VRYyQlJgLCiAZUpY2D4UQcE5D1Vm8RoKfbE+xtVaOctYeINjaQJ1u3mWx9T7tork3uAlOe1uyFjCWU5aPX/1OHhWCGi2EPPZj6vchNqDOJC/Y2k1gkivqCjz1CZl6FlZ7UVPOx3AMoszPuOYZ+Nuqpx2uCE2MHTtMHD8PVjlsWirt56oUr6JPp9aRGo6bitPIOmi4dX0FmuMKD6u/JnuZCp+AXtJVTmSHS8IXt/twsKU7A+fiMK01NtD5msNgVPoe9JbFtlGwvTQ==", - "body": "{\"foo\":\"bar\"}", - "attributes": { - "ApproximateReceiveCount": "3", - "SentTimestamp": "1529104986221", - "SenderId": "594035263019", - "ApproximateFirstReceiveTimestamp": "1529104986230" - }, - "messageAttributes": {}, - "md5OfBody": "9bb58f26192e4ba00f01e2e7b136bbd8", - "eventSource": "aws:sqs", - "eventSourceARN": "arn:aws:sqs:1", - "awsRegion": "us-east-1" - } - ] - } - self.assertEqual("AWS SQS EVENT", lh.handler(event, None)) - - # Test Authorizer event - event = {'authorizationToken': 'hubtoken1', 'methodArn': 'arn:aws:execute-api:us-west-2:1234:xxxxx/dev/GET/v1/endpoint/param', 'type': 'TOKEN'} - self.assertEqual("AUTHORIZER_EVENT", lh.handler(event, None)) - - # Ensure Zappa does return 401 if no function was defined. - lh.settings.AUTHORIZER_FUNCTION = None - with self.assertRaisesRegexp(Exception, 'Unauthorized'): - lh.handler(event, None) - - # Unhandled event - event = { - 'Records': [ - { - 'eventID': 'shardId-000000000000:49545115243490985018280067714973144582180062593244200961', - 'eventVersion': '1.0', - 'kinesis': { - 'partitionKey': 'partitionKey-3', - 'data': 'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=', - 'kinesisSchemaVersion': '1.0', - 'sequenceNumber': '49545115243490985018280067714973144582180062593244200961' - }, - 'eventSourceARN': 'bad:arn:1', - } - ] - } - self.assertIsNone(lh.handler(event, None)) - - ## - # CLI - ## - - @placebo_session - def test_cli_aws(self, session): - zappa_cli = ZappaCLI() - zappa_cli.api_stage = 'ttt888' - zappa_cli.api_key_required = True - zappa_cli.authorization_type = 'NONE' - zappa_cli.load_settings('test_settings.json', session) - zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' - zappa_cli.deploy() - zappa_cli.update() - zappa_cli.rollback(1) - zappa_cli.tail(since=0, filter_pattern='', keep_open=False) - zappa_cli.schedule() - zappa_cli.unschedule() - zappa_cli.undeploy(no_confirm=True, remove_logs=True) - - @placebo_session - def test_cli_aws_status(self, session): - zappa_cli = ZappaCLI() - zappa_cli.api_stage = 'ttt888' - zappa_cli.load_settings('test_settings.json', session) - zappa_cli.api_stage = 'devor' - zappa_cli.lambda_name = 'baby-flask-devor' - zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' - resp = zappa_cli.status() - - ## - # Let's Encrypt / ACME - ## - - ## - # Django - ## - - ## - # Util / Misc - ## - - @placebo_session - def test_add_event_source(self, session): - - event_source = {'arn': 'blah:blah:blah:blah', 'events': [ - "s3:ObjectCreated:*" - ]} - # Sanity. This should fail. - try: - es = add_event_source(event_source, 'blah:blah:blah:blah', 'test_settings.callback', session) - self.fail("Success should have failed.") - except ValueError: - pass - - event_source = {'arn': 's3:s3:s3:s3', 'events': [ - "s3:ObjectCreated:*" - ]} - add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - - @placebo_session - def test_cognito_trigger(self, session): - z = Zappa(session) - z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') - - @placebo_session - def test_cognito_trigger_existing(self, session): - z = Zappa(session) - z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') - - @placebo_session - def test_cli_cognito_triggers(self, session): - zappa_cli = ZappaCLI() - zappa_cli.api_stage = 'ttt888' - zappa_cli.api_key_required = True - zappa_cli.load_settings('test_settings.json', session) - zappa_cli.lambda_arn = 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test' - zappa_cli.update_cognito_triggers() - - @placebo_session - def test_cognito_trigger_existing_UnusedAccountValidityDays(self, session): - z = Zappa(session) - z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') - - -if __name__ == '__main__': - unittest.main() diff --git a/zappa/cli.py b/zappa/cli.py deleted file mode 100755 index 526c6e0b4..000000000 --- a/zappa/cli.py +++ /dev/null @@ -1,2801 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -Zappa CLI - -Deploy arbitrary Python programs as serverless Zappa applications. - -""" -from past.builtins import basestring -from builtins import input, bytes - -import argcomplete -import argparse -import base64 -import pkgutil -import botocore -import click -import collections -import hjson as json -import inspect -import importlib -import logging -import os -import pkg_resources -import random -import re -import requests -import slugify -import string -import sys -import tempfile -import time -import toml -import yaml -import zipfile - -from click import Context, BaseCommand -from click.exceptions import ClickException -from click.globals import push_context -from dateutil import parser -from datetime import datetime, timedelta - -from .core import Zappa, logger, API_GATEWAY_REGIONS -from .utilities import (check_new_version_available, detect_django_settings, - detect_flask_apps, parse_s3_url, human_size, - validate_name, InvalidAwsLambdaName, get_venv_from_python_version, - get_runtime_from_python_version, string_to_timestamp, is_valid_bucket_name) - - -CUSTOM_SETTINGS = [ - 'apigateway_policy', - 'assume_policy', - 'attach_policy', - 'aws_region', - 'delete_local_zip', - 'delete_s3_zip', - 'exclude', - 'exclude_glob', - 'extra_permissions', - 'include', - 'role_name', - 'touch', -] - -BOTO3_CONFIG_DOCS_URL = 'https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration' - -## -# Main Input Processing -## - -class ZappaCLI: - """ - ZappaCLI object is responsible for loading the settings, - handling the input arguments and executing the calls to the core library. - - """ - - # CLI - vargs = None - command = None - stage_env = None - - # Zappa settings - zappa = None - zappa_settings = None - load_credentials = True - disable_progress = False - - # Specific settings - api_stage = None - app_function = None - aws_region = None - debug = None - prebuild_script = None - project_name = None - profile_name = None - lambda_arn = None - lambda_name = None - lambda_description = None - lambda_concurrency = None - s3_bucket_name = None - settings_file = None - zip_path = None - handler_path = None - vpc_config = None - memory_size = None - use_apigateway = None - lambda_handler = None - django_settings = None - manage_roles = True - exception_handler = None - environment_variables = None - authorizer = None - xray_tracing = False - aws_kms_key_arn = '' - context_header_mappings = None - tags = [] - layers = None - - stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$') - - def __init__(self): - self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val) - - @property - def stage_config(self): - """ - A shortcut property for settings of a stage. - """ - - def get_stage_setting(stage, extended_stages=None): - if extended_stages is None: - extended_stages = [] - - if stage in extended_stages: - raise RuntimeError(stage + " has already been extended to these settings. " - "There is a circular extends within the settings file.") - extended_stages.append(stage) - - try: - stage_settings = dict(self.zappa_settings[stage].copy()) - except KeyError: - raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.") - - extends_stage = self.zappa_settings[stage].get('extends', None) - if not extends_stage: - return stage_settings - extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages) - extended_settings.update(stage_settings) - return extended_settings - - settings = get_stage_setting(stage=self.api_stage) - - # Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip - if 'delete_zip' in settings: - settings['delete_local_zip'] = settings.get('delete_zip') - - settings.update(self.stage_config_overrides) - - return settings - - @property - def stage_config_overrides(self): - """ - Returns zappa_settings we forcefully override for the current stage - set by `self.override_stage_config_setting(key, value)` - """ - return getattr(self, '_stage_config_overrides', {}).get(self.api_stage, {}) - - def override_stage_config_setting(self, key, val): - """ - Forcefully override a setting set by zappa_settings (for the current stage only) - :param key: settings key - :param val: value - """ - self._stage_config_overrides = getattr(self, '_stage_config_overrides', {}) - self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val - - def handle(self, argv=None): - """ - Main function. - - Parses command, load settings and dispatches accordingly. - - """ - - desc = ('Zappa - Deploy Python applications to AWS Lambda' - ' and API Gateway.\n') - parser = argparse.ArgumentParser(description=desc) - parser.add_argument( - '-v', '--version', action='version', - version=pkg_resources.get_distribution("zappa").version, - help='Print the zappa version' - ) - parser.add_argument( - '--color', default='auto', choices=['auto','never','always'] - ) - - env_parser = argparse.ArgumentParser(add_help=False) - me_group = env_parser.add_mutually_exclusive_group() - all_help = ('Execute this command for all of our defined ' - 'Zappa stages.') - me_group.add_argument('--all', action='store_true', help=all_help) - me_group.add_argument('stage_env', nargs='?') - - group = env_parser.add_argument_group() - group.add_argument( - '-a', '--app_function', help='The WSGI application function.' - ) - group.add_argument( - '-s', '--settings_file', help='The path to a Zappa settings file.' - ) - group.add_argument( - '-q', '--quiet', action='store_true', help='Silence all output.' - ) - # https://github.com/Miserlou/Zappa/issues/407 - # Moved when 'template' command added. - # Fuck Terraform. - group.add_argument( - '-j', '--json', action='store_true', help='Make the output of this command be machine readable.' - ) - # https://github.com/Miserlou/Zappa/issues/891 - group.add_argument( - '--disable_progress', action='store_true', help='Disable progress bars.' - ) - group.add_argument( - "--no_venv", action="store_true", help="Skip venv check." - ) - - ## - # Certify - ## - subparsers = parser.add_subparsers(title='subcommands', dest='command') - cert_parser = subparsers.add_parser( - 'certify', parents=[env_parser], - help='Create and install SSL certificate' - ) - cert_parser.add_argument( - '--manual', action='store_true', - help=("Gets new Let's Encrypt certificates, but prints them to console." - "Does not update API Gateway domains.") - ) - cert_parser.add_argument( - '-y', '--yes', action='store_true', help='Auto confirm yes.' - ) - - ## - # Deploy - ## - deploy_parser = subparsers.add_parser( - 'deploy', parents=[env_parser], help='Deploy application.' - ) - deploy_parser.add_argument( - '-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package' - ) - - ## - # Init - ## - init_parser = subparsers.add_parser('init', help='Initialize Zappa app.') - - ## - # Package - ## - package_parser = subparsers.add_parser( - 'package', parents=[env_parser], help='Build the application zip package locally.' - ) - package_parser.add_argument( - '-o', '--output', help='Name of file to output the package to.' - ) - - ## - # Template - ## - template_parser = subparsers.add_parser( - 'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.' - ) - template_parser.add_argument( - '-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.' - ) - template_parser.add_argument( - '-r', '--role-arn', required=True, help='ARN of the Role to template with.' - ) - template_parser.add_argument( - '-o', '--output', help='Name of file to output the template to.' - ) - - ## - # Invocation - ## - invoke_parser = subparsers.add_parser( - 'invoke', parents=[env_parser], - help='Invoke remote function.' - ) - invoke_parser.add_argument( - '--raw', action='store_true', - help=('When invoking remotely, invoke this python as a string,' - ' not as a modular path.') - ) - invoke_parser.add_argument( - '--no-color', action='store_true', - help=("Don't color the output") - ) - invoke_parser.add_argument('command_rest') - - ## - # Manage - ## - manage_parser = subparsers.add_parser( - 'manage', - help='Invoke remote Django manage.py commands.' - ) - rest_help = ("Command in the form of . is not " - "required if --all is specified") - manage_parser.add_argument('--all', action='store_true', help=all_help) - manage_parser.add_argument('command_rest', nargs='+', help=rest_help) - manage_parser.add_argument( - '--no-color', action='store_true', - help=("Don't color the output") - ) - # This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser - # https://github.com/Miserlou/Zappa/issues/1002 - manage_parser.add_argument( - '-s', '--settings_file', help='The path to a Zappa settings file.' - ) - - ## - # Rollback - ## - def positive_int(s): - """ Ensure an arg is positive """ - i = int(s) - if i < 0: - msg = "This argument must be positive (got {})".format(s) - raise argparse.ArgumentTypeError(msg) - return i - - rollback_parser = subparsers.add_parser( - 'rollback', parents=[env_parser], - help='Rollback deployed code to a previous version.' - ) - rollback_parser.add_argument( - '-n', '--num-rollback', type=positive_int, default=1, - help='The number of versions to rollback.' - ) - - ## - # Scheduling - ## - subparsers.add_parser( - 'schedule', parents=[env_parser], - help='Schedule functions to occur at regular intervals.' - ) - - ## - # Status - ## - subparsers.add_parser( - 'status', parents=[env_parser], - help='Show deployment status and event schedules.' - ) - - ## - # Log Tailing - ## - tail_parser = subparsers.add_parser( - 'tail', parents=[env_parser], help='Tail deployment logs.' - ) - tail_parser.add_argument( - '--no-color', action='store_true', - help="Don't color log tail output." - ) - tail_parser.add_argument( - '--http', action='store_true', - help='Only show HTTP requests in tail output.' - ) - tail_parser.add_argument( - '--non-http', action='store_true', - help='Only show non-HTTP requests in tail output.' - ) - tail_parser.add_argument( - '--since', type=str, default="100000s", - help="Only show lines since a certain timeframe." - ) - tail_parser.add_argument( - '--filter', type=str, default="", - help="Apply a filter pattern to the logs." - ) - tail_parser.add_argument( - '--force-color', action='store_true', - help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)' - ) - tail_parser.add_argument( - '--disable-keep-open', action='store_true', - help="Exit after printing the last available log, rather than keeping the log open." - ) - - ## - # Undeploy - ## - undeploy_parser = subparsers.add_parser( - 'undeploy', parents=[env_parser], help='Undeploy application.' - ) - undeploy_parser.add_argument( - '--remove-logs', action='store_true', - help=('Removes log groups of api gateway and lambda task' - ' during the undeployment.'), - ) - undeploy_parser.add_argument( - '-y', '--yes', action='store_true', help='Auto confirm yes.' - ) - - ## - # Unschedule - ## - subparsers.add_parser('unschedule', parents=[env_parser], - help='Unschedule functions.') - - ## - # Updating - ## - update_parser = subparsers.add_parser( - 'update', parents=[env_parser], help='Update deployed application.' - ) - update_parser.add_argument( - '-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package' - ) - update_parser.add_argument( - '-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code" - ) - - ## - # Debug - ## - subparsers.add_parser( - 'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.' - ) - - argcomplete.autocomplete(parser) - args = parser.parse_args(argv) - self.vargs = vars(args) - - if args.color == 'never': - disable_click_colors() - elif args.color == 'always': - #TODO: Support aggressive coloring like "--force-color" on all commands - pass - elif args.color == 'auto': - pass - - # Parse the input - # NOTE(rmoe): Special case for manage command - # The manage command can't have both stage_env and command_rest - # arguments. Since they are both positional arguments argparse can't - # differentiate the two. This causes problems when used with --all. - # (e.g. "manage --all showmigrations admin" argparse thinks --all has - # been specified AND that stage_env='showmigrations') - # By having command_rest collect everything but --all we can split it - # apart here instead of relying on argparse. - if not args.command: - parser.print_help() - return - - if args.command == 'manage' and not self.vargs.get('all'): - self.stage_env = self.vargs['command_rest'].pop(0) - else: - self.stage_env = self.vargs.get('stage_env') - - if args.command == 'package': - self.load_credentials = False - - self.command = args.command - - self.disable_progress = self.vargs.get('disable_progress') - if self.vargs.get('quiet'): - self.silence() - - # We don't have any settings yet, so make those first! - # (Settings-based interactions will fail - # before a project has been initialized.) - if self.command == 'init': - self.init() - return - - # Make sure there isn't a new version available - if not self.vargs.get('json'): - self.check_for_update() - - # Load and Validate Settings File - self.load_settings_file(self.vargs.get('settings_file')) - - # Should we execute this for all stages, or just one? - all_stages = self.vargs.get('all') - stages = [] - - if all_stages: # All stages! - stages = self.zappa_settings.keys() - else: # Just one env. - if not self.stage_env: - # If there's only one stage defined in the settings, - # use that as the default. - if len(self.zappa_settings.keys()) == 1: - stages.append(list(self.zappa_settings.keys())[0]) - else: - parser.error("Please supply a stage to interact with.") - else: - stages.append(self.stage_env) - - for stage in stages: - try: - self.dispatch_command(self.command, stage) - except ClickException as e: - # Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407 - e.show() - sys.exit(e.exit_code) - - def dispatch_command(self, command, stage): - """ - Given a command to execute and stage, - execute that command. - """ - - self.api_stage = stage - - if command not in ['status', 'manage']: - if not self.vargs.get('json', None): - click.echo("Calling " + click.style(command, fg="green", bold=True) + " for stage " + - click.style(self.api_stage, bold=True) + ".." ) - - # Explicitly define the app function. - # Related: https://github.com/Miserlou/Zappa/issues/832 - if self.vargs.get('app_function', None): - self.app_function = self.vargs['app_function'] - - # Load our settings, based on api_stage. - try: - self.load_settings(self.vargs.get('settings_file')) - except ValueError as e: - if hasattr(e, 'message'): - print("Error: {}".format(e.message)) - else: - print(str(e)) - sys.exit(-1) - self.callback('settings') - - # Hand it off - if command == 'deploy': # pragma: no cover - self.deploy(self.vargs['zip']) - if command == 'package': # pragma: no cover - self.package(self.vargs['output']) - if command == 'template': # pragma: no cover - self.template( self.vargs['lambda_arn'], - self.vargs['role_arn'], - output=self.vargs['output'], - json=self.vargs['json'] - ) - elif command == 'update': # pragma: no cover - self.update(self.vargs['zip'], self.vargs['no_upload']) - elif command == 'rollback': # pragma: no cover - self.rollback(self.vargs['num_rollback']) - elif command == 'invoke': # pragma: no cover - - if not self.vargs.get('command_rest'): - print("Please enter the function to invoke.") - return - - self.invoke( - self.vargs['command_rest'], - raw_python=self.vargs['raw'], - no_color=self.vargs['no_color'], - ) - elif command == 'manage': # pragma: no cover - - if not self.vargs.get('command_rest'): - print("Please enter the management command to invoke.") - return - - if not self.django_settings: - print("This command is for Django projects only!") - print("If this is a Django project, please define django_settings in your zappa_settings.") - return - - command_tail = self.vargs.get('command_rest') - if len(command_tail) > 1: - command = " ".join(command_tail) # ex: zappa manage dev "shell --version" - else: - command = command_tail[0] # ex: zappa manage dev showmigrations admin - - self.invoke( - command, - command="manage", - no_color=self.vargs['no_color'], - ) - - elif command == 'tail': # pragma: no cover - self.tail( - colorize=(not self.vargs['no_color']), - http=self.vargs['http'], - non_http=self.vargs['non_http'], - since=self.vargs['since'], - filter_pattern=self.vargs['filter'], - force_colorize=self.vargs['force_color'] or None, - keep_open=not self.vargs['disable_keep_open'] - ) - elif command == 'undeploy': # pragma: no cover - self.undeploy( - no_confirm=self.vargs['yes'], - remove_logs=self.vargs['remove_logs'] - ) - elif command == 'schedule': # pragma: no cover - self.schedule() - elif command == 'unschedule': # pragma: no cover - self.unschedule() - elif command == 'status': # pragma: no cover - self.status(return_json=self.vargs['json']) - elif command == 'certify': # pragma: no cover - self.certify( - no_confirm=self.vargs['yes'], - manual=self.vargs['manual'] - ) - elif command == 'shell': # pragma: no cover - self.shell() - - ## - # The Commands - ## - - def package(self, output=None): - """ - Only build the package - """ - # Make sure we're in a venv. - self.check_venv() - - # force not to delete the local zip - self.override_stage_config_setting('delete_local_zip', False) - # Execute the prebuild script - if self.prebuild_script: - self.execute_prebuild_script() - # Create the Lambda Zip - self.create_package(output) - self.callback('zip') - size = human_size(os.path.getsize(self.zip_path)) - click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")") - - def template(self, lambda_arn, role_arn, output=None, json=False): - """ - Only build the template file. - """ - - if not lambda_arn: - raise ClickException("Lambda ARN is required to template.") - - if not role_arn: - raise ClickException("Role ARN is required to template.") - - self.zappa.credentials_arn = role_arn - - # Create the template! - template = self.zappa.create_stack_template( - lambda_arn=lambda_arn, - lambda_name=self.lambda_name, - api_key_required=self.api_key_required, - iam_authorization=self.iam_authorization, - authorizer=self.authorizer, - cors_options=self.cors, - description=self.apigateway_description, - endpoint_configuration=self.endpoint_configuration - ) - - if not output: - template_file = self.lambda_name + '-template-' + str(int(time.time())) + '.json' - else: - template_file = output - with open(template_file, 'wb') as out: - out.write(bytes(template.to_json(indent=None, separators=(',',':')), "utf-8")) - - if not json: - click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True)) - else: - with open(template_file, 'r') as out: - print(out.read()) - - def deploy(self, source_zip=None): - """ - Package your project, upload it to S3, register the Lambda function - and create the API Gateway routes. - - """ - - if not source_zip: - # Make sure we're in a venv. - self.check_venv() - - # Execute the prebuild script - if self.prebuild_script: - self.execute_prebuild_script() - - # Make sure this isn't already deployed. - deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name) - if len(deployed_versions) > 0: - raise ClickException("This application is " + click.style("already deployed", fg="red") + - " - did you mean to call " + click.style("update", bold=True) + "?") - - # Make sure the necessary IAM execution roles are available - if self.manage_roles: - try: - self.zappa.create_iam_roles() - except botocore.client.ClientError as ce: - raise ClickException( - click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!\n" + - "You may " + click.style("lack the necessary AWS permissions", bold=True) + - " to automatically manage a Zappa execution role.\n" + - click.style("Exception reported by AWS:", bold=True) + format(ce) + '\n' + - "To fix this, see here: " + - click.style( - "https://github.com/Miserlou/Zappa#custom-aws-iam-roles-and-policies-for-deployment", - bold=True) - + '\n') - - # Create the Lambda Zip - self.create_package() - self.callback('zip') - - # Upload it to S3 - success = self.zappa.upload_to_s3( - self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress) - if not success: # pragma: no cover - raise ClickException("Unable to upload to S3. Quitting.") - - # If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip - if self.stage_config.get('slim_handler', False): - # https://github.com/Miserlou/Zappa/issues/510 - success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress) - if not success: # pragma: no cover - raise ClickException("Unable to upload handler to S3. Quitting.") - - # Copy the project zip to the current project zip - current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name) - success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name, - bucket_name=self.s3_bucket_name) - if not success: # pragma: no cover - raise ClickException("Unable to copy the zip to be the current project. Quitting.") - - handler_file = self.handler_path - else: - handler_file = self.zip_path - - # Fixes https://github.com/Miserlou/Zappa/issues/613 - try: - self.lambda_arn = self.zappa.get_lambda_function( - function_name=self.lambda_name) - except botocore.client.ClientError: - # Register the Lambda function with that zip as the source - # You'll also need to define the path to your lambda_handler code. - kwargs = dict( - handler=self.lambda_handler, - description=self.lambda_description, - vpc_config=self.vpc_config, - dead_letter_config=self.dead_letter_config, - timeout=self.timeout_seconds, - memory_size=self.memory_size, - runtime=self.runtime, - aws_environment_variables=self.aws_environment_variables, - aws_kms_key_arn=self.aws_kms_key_arn, - use_alb=self.use_alb, - layers=self.layers, - concurrency=self.lambda_concurrency, - ) - if source_zip and source_zip.startswith('s3://'): - bucket, key_name = parse_s3_url(source_zip) - kwargs['function_name'] = self.lambda_name - kwargs['bucket'] = bucket - kwargs['s3_key'] = key_name - elif source_zip and not source_zip.startswith('s3://'): - with open(source_zip, mode='rb') as fh: - byte_stream = fh.read() - kwargs['function_name'] = self.lambda_name - kwargs['local_zip'] = byte_stream - else: - kwargs['function_name'] = self.lambda_name - kwargs['bucket'] = self.s3_bucket_name - kwargs['s3_key'] = handler_file - - self.lambda_arn = self.zappa.create_lambda_function(**kwargs) - - # Schedule events for this deployment - self.schedule() - - endpoint_url = '' - deployment_string = click.style("Deployment complete", fg="green", bold=True) + "!" - - if self.use_alb: - kwargs = dict( - lambda_arn=self.lambda_arn, - lambda_name=self.lambda_name, - alb_vpc_config=self.alb_vpc_config, - timeout=self.timeout_seconds - ) - self.zappa.deploy_lambda_alb(**kwargs) - - if self.use_apigateway: - - # Create and configure the API Gateway - template = self.zappa.create_stack_template( - lambda_arn=self.lambda_arn, - lambda_name=self.lambda_name, - api_key_required=self.api_key_required, - iam_authorization=self.iam_authorization, - authorizer=self.authorizer, - cors_options=self.cors, - description=self.apigateway_description, - endpoint_configuration=self.endpoint_configuration - ) - - self.zappa.update_stack( - self.lambda_name, - self.s3_bucket_name, - wait=True, - disable_progress=self.disable_progress - ) - - api_id = self.zappa.get_api_id(self.lambda_name) - - # Add binary support - if self.binary_support: - self.zappa.add_binary_support(api_id=api_id, cors=self.cors) - - # Add payload compression - if self.stage_config.get('payload_compression', True): - self.zappa.add_api_compression( - api_id=api_id, - min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0)) - - # Deploy the API! - endpoint_url = self.deploy_api_gateway(api_id) - deployment_string = deployment_string + ": {}".format(endpoint_url) - - # Create/link API key - if self.api_key_required: - if self.api_key is None: - self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage) - else: - self.zappa.add_api_stage_to_api_key(api_key=self.api_key, api_id=api_id, stage_name=self.api_stage) - - if self.stage_config.get('touch', True): - self.touch_endpoint(endpoint_url) - - # Finally, delete the local copy our zip package - if not source_zip: - if self.stage_config.get('delete_local_zip', True): - self.remove_local_zip() - - # Remove the project zip from S3. - if not source_zip: - self.remove_uploaded_zip() - - self.callback('post') - - click.echo(deployment_string) - - def update(self, source_zip=None, no_upload=False): - """ - Repackage and update the function code. - """ - - if not source_zip: - # Make sure we're in a venv. - self.check_venv() - - # Execute the prebuild script - if self.prebuild_script: - self.execute_prebuild_script() - - # Temporary version check - try: - updated_time = 1472581018 - function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) - conf = function_response['Configuration'] - last_updated = parser.parse(conf['LastModified']) - last_updated_unix = time.mktime(last_updated.timetuple()) - except botocore.exceptions.BotoCoreError as e: - click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0]) - sys.exit(-1) - except Exception as e: - click.echo(click.style("Warning!", fg="red") + " Couldn't get function " + self.lambda_name + - " in " + self.zappa.aws_region + " - have you deployed yet?") - sys.exit(-1) - - if last_updated_unix <= updated_time: - click.echo(click.style("Warning!", fg="red") + - " You may have upgraded Zappa since deploying this application. You will need to " + - click.style("redeploy", bold=True) + " for this deployment to work properly!") - - # Make sure the necessary IAM execution roles are available - if self.manage_roles: - try: - self.zappa.create_iam_roles() - except botocore.client.ClientError: - click.echo(click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!") - click.echo("You may " + click.style("lack the necessary AWS permissions", bold=True) + - " to automatically manage a Zappa execution role.") - click.echo("To fix this, see here: " + - click.style("https://github.com/Miserlou/Zappa#custom-aws-iam-roles-and-policies-for-deployment", - bold=True)) - sys.exit(-1) - - # Create the Lambda Zip, - if not no_upload: - self.create_package() - self.callback('zip') - - # Upload it to S3 - if not no_upload: - success = self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress) - if not success: # pragma: no cover - raise ClickException("Unable to upload project to S3. Quitting.") - - # If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip - if self.stage_config.get('slim_handler', False): - # https://github.com/Miserlou/Zappa/issues/510 - success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress) - if not success: # pragma: no cover - raise ClickException("Unable to upload handler to S3. Quitting.") - - # Copy the project zip to the current project zip - current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name) - success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name, - bucket_name=self.s3_bucket_name) - if not success: # pragma: no cover - raise ClickException("Unable to copy the zip to be the current project. Quitting.") - - handler_file = self.handler_path - else: - handler_file = self.zip_path - - # Register the Lambda function with that zip as the source - # You'll also need to define the path to your lambda_handler code. - kwargs = dict( - bucket=self.s3_bucket_name, - function_name=self.lambda_name, - num_revisions=self.num_retained_versions, - concurrency=self.lambda_concurrency, - ) - if source_zip and source_zip.startswith('s3://'): - bucket, key_name = parse_s3_url(source_zip) - kwargs.update(dict( - bucket=bucket, - s3_key=key_name - )) - self.lambda_arn = self.zappa.update_lambda_function(**kwargs) - elif source_zip and not source_zip.startswith('s3://'): - with open(source_zip, mode='rb') as fh: - byte_stream = fh.read() - kwargs['local_zip'] = byte_stream - self.lambda_arn = self.zappa.update_lambda_function(**kwargs) - else: - if not no_upload: - kwargs['s3_key'] = handler_file - self.lambda_arn = self.zappa.update_lambda_function(**kwargs) - - # Remove the uploaded zip from S3, because it is now registered.. - if not source_zip and not no_upload: - self.remove_uploaded_zip() - - # Update the configuration, in case there are changes. - self.lambda_arn = self.zappa.update_lambda_configuration( - lambda_arn=self.lambda_arn, - function_name=self.lambda_name, - handler=self.lambda_handler, - description=self.lambda_description, - vpc_config=self.vpc_config, - timeout=self.timeout_seconds, - memory_size=self.memory_size, - runtime=self.runtime, - aws_environment_variables=self.aws_environment_variables, - aws_kms_key_arn=self.aws_kms_key_arn, - layers=self.layers - ) - - # Finally, delete the local copy our zip package - if not source_zip and not no_upload: - if self.stage_config.get('delete_local_zip', True): - self.remove_local_zip() - - if self.use_apigateway: - - self.zappa.create_stack_template( - lambda_arn=self.lambda_arn, - lambda_name=self.lambda_name, - api_key_required=self.api_key_required, - iam_authorization=self.iam_authorization, - authorizer=self.authorizer, - cors_options=self.cors, - description=self.apigateway_description, - endpoint_configuration=self.endpoint_configuration - ) - self.zappa.update_stack( - self.lambda_name, - self.s3_bucket_name, - wait=True, - update_only=True, - disable_progress=self.disable_progress) - - api_id = self.zappa.get_api_id(self.lambda_name) - - # Update binary support - if self.binary_support: - self.zappa.add_binary_support(api_id=api_id, cors=self.cors) - else: - self.zappa.remove_binary_support(api_id=api_id, cors=self.cors) - - if self.stage_config.get('payload_compression', True): - self.zappa.add_api_compression( - api_id=api_id, - min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0)) - else: - self.zappa.remove_api_compression(api_id=api_id) - - # It looks a bit like we might actually be using this just to get the URL, - # but we're also updating a few of the APIGW settings. - endpoint_url = self.deploy_api_gateway(api_id) - - if self.stage_config.get('domain', None): - endpoint_url = self.stage_config.get('domain') - - else: - endpoint_url = None - - self.schedule() - - # Update any cognito pool with the lambda arn - # do this after schedule as schedule clears the lambda policy and we need to add one - self.update_cognito_triggers() - - self.callback('post') - - if endpoint_url and 'https://' not in endpoint_url: - endpoint_url = 'https://' + endpoint_url - - if self.base_path: - endpoint_url += '/' + self.base_path - - deployed_string = "Your updated Zappa deployment is " + click.style("live", fg='green', bold=True) + "!" - if self.use_apigateway: - deployed_string = deployed_string + ": " + click.style("{}".format(endpoint_url), bold=True) - - api_url = None - if endpoint_url and 'amazonaws.com' not in endpoint_url: - api_url = self.zappa.get_api_url( - self.lambda_name, - self.api_stage) - - if endpoint_url != api_url: - deployed_string = deployed_string + " (" + api_url + ")" - - if self.stage_config.get('touch', True): - if api_url: - self.touch_endpoint(api_url) - elif endpoint_url: - self.touch_endpoint(endpoint_url) - - click.echo(deployed_string) - - def rollback(self, revision): - """ - Rollsback the currently deploy lambda code to a previous revision. - """ - - print("Rolling back..") - - self.zappa.rollback_lambda_function_version( - self.lambda_name, versions_back=revision) - print("Done!") - - def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False, force_colorize=False): - """ - Tail this function's logs. - - if keep_open, do so repeatedly, printing any new logs - """ - - try: - since_stamp = string_to_timestamp(since) - - last_since = since_stamp - while True: - new_logs = self.zappa.fetch_logs( - self.lambda_name, - start_time=since_stamp, - limit=limit, - filter_pattern=filter_pattern, - ) - - new_logs = [ e for e in new_logs if e['timestamp'] > last_since ] - self.print_logs(new_logs, colorize, http, non_http, force_colorize) - - if not keep_open: - break - if new_logs: - last_since = new_logs[-1]['timestamp'] - time.sleep(1) - except KeyboardInterrupt: # pragma: no cover - # Die gracefully - try: - sys.exit(0) - except SystemExit: - os._exit(130) - - def undeploy(self, no_confirm=False, remove_logs=False): - """ - Tear down an existing deployment. - """ - - if not no_confirm: # pragma: no cover - confirm = input("Are you sure you want to undeploy? [y/n] ") - if confirm != 'y': - return - - if self.use_alb: - self.zappa.undeploy_lambda_alb(self.lambda_name) - - if self.use_apigateway: - if remove_logs: - self.zappa.remove_api_gateway_logs(self.lambda_name) - - domain_name = self.stage_config.get('domain', None) - base_path = self.stage_config.get('base_path', None) - - # Only remove the api key when not specified - if self.api_key_required and self.api_key is None: - api_id = self.zappa.get_api_id(self.lambda_name) - self.zappa.remove_api_key(api_id, self.api_stage) - - gateway_id = self.zappa.undeploy_api_gateway( - self.lambda_name, - domain_name=domain_name, - base_path=base_path - ) - - self.unschedule() # removes event triggers, including warm up event. - - self.zappa.delete_lambda_function(self.lambda_name) - if remove_logs: - self.zappa.remove_lambda_function_logs(self.lambda_name) - - click.echo(click.style("Done", fg="green", bold=True) + "!") - - def update_cognito_triggers(self): - """ - Update any cognito triggers - """ - if self.cognito: - user_pool = self.cognito.get('user_pool') - triggers = self.cognito.get('triggers', []) - lambda_configs = set() - for trigger in triggers: - lambda_configs.add(trigger['source'].split('_')[0]) - self.zappa.update_cognito(self.lambda_name, user_pool, lambda_configs, self.lambda_arn) - - def schedule(self): - """ - Given a a list of functions and a schedule to execute them, - setup up regular execution. - - """ - events = self.stage_config.get('events', []) - - if events: - if not isinstance(events, list): # pragma: no cover - print("Events must be supplied as a list.") - return - - for event in events: - self.collision_warning(event.get('function')) - - if self.stage_config.get('keep_warm', True): - if not events: - events = [] - - keep_warm_rate = self.stage_config.get('keep_warm_expression', "rate(4 minutes)") - events.append({'name': 'zappa-keep-warm', - 'function': 'handler.keep_warm_callback', - 'expression': keep_warm_rate, - 'description': 'Zappa Keep Warm - {}'.format(self.lambda_name)}) - - if events: - try: - function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) - except botocore.exceptions.ClientError as e: # pragma: no cover - click.echo(click.style("Function does not exist", fg="yellow") + ", please " + - click.style("deploy", bold=True) + "first. Ex:" + - click.style("zappa deploy {}.".format(self.api_stage), bold=True)) - sys.exit(-1) - - print("Scheduling..") - self.zappa.schedule_events( - lambda_arn=function_response['Configuration']['FunctionArn'], - lambda_name=self.lambda_name, - events=events - ) - - # Add async tasks SNS - if self.stage_config.get('async_source', None) == 'sns' \ - and self.stage_config.get('async_resources', True): - self.lambda_arn = self.zappa.get_lambda_function( - function_name=self.lambda_name) - topic_arn = self.zappa.create_async_sns_topic( - lambda_name=self.lambda_name, - lambda_arn=self.lambda_arn - ) - click.echo('SNS Topic created: %s' % topic_arn) - - # Add async tasks DynamoDB - table_name = self.stage_config.get('async_response_table', False) - read_capacity = self.stage_config.get('async_response_table_read_capacity', 1) - write_capacity = self.stage_config.get('async_response_table_write_capacity', 1) - if table_name and self.stage_config.get('async_resources', True): - created, response_table = self.zappa.create_async_dynamodb_table( - table_name, read_capacity, write_capacity) - if created: - click.echo('DynamoDB table created: %s' % table_name) - else: - click.echo('DynamoDB table exists: %s' % table_name) - provisioned_throughput = response_table['Table']['ProvisionedThroughput'] - if provisioned_throughput['ReadCapacityUnits'] != read_capacity or \ - provisioned_throughput['WriteCapacityUnits'] != write_capacity: - click.echo(click.style( - "\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(table_name), - fg='red' - )) - - def unschedule(self): - """ - Given a a list of scheduled functions, - tear down their regular execution. - - """ - - # Run even if events are not defined to remove previously existing ones (thus default to []). - events = self.stage_config.get('events', []) - - if not isinstance(events, list): # pragma: no cover - print("Events must be supplied as a list.") - return - - function_arn = None - try: - function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) - function_arn = function_response['Configuration']['FunctionArn'] - except botocore.exceptions.ClientError as e: # pragma: no cover - raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. " - "Proceeding to unschedule CloudWatch based events.".format(self.api_stage)) - - print("Unscheduling..") - self.zappa.unschedule_events( - lambda_name=self.lambda_name, - lambda_arn=function_arn, - events=events, - ) - - # Remove async task SNS - if self.stage_config.get('async_source', None) == 'sns' \ - and self.stage_config.get('async_resources', True): - removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name) - click.echo('SNS Topic removed: %s' % ', '.join(removed_arns)) - - def invoke(self, function_name, raw_python=False, command=None, no_color=False): - """ - Invoke a remote function. - """ - - # There are three likely scenarios for 'command' here: - # command, which is a modular function path - # raw_command, which is a string of python to execute directly - # manage, which is a Django-specific management command invocation - key = command if command is not None else 'command' - if raw_python: - command = {'raw_command': function_name} - else: - command = {key: function_name} - - # Can't use hjson - import json as json - - response = self.zappa.invoke_lambda_function( - self.lambda_name, - json.dumps(command), - invocation_type='RequestResponse', - ) - - if 'LogResult' in response: - if no_color: - print(base64.b64decode(response['LogResult'])) - else: - decoded = base64.b64decode(response['LogResult']).decode() - formatted = self.format_invoke_command(decoded) - colorized = self.colorize_invoke_command(formatted) - print(colorized) - else: - print(response) - - # For a successful request FunctionError is not in response. - # https://github.com/Miserlou/Zappa/pull/1254/ - if 'FunctionError' in response: - raise ClickException( - "{} error occurred while invoking command.".format(response['FunctionError']) -) - - def format_invoke_command(self, string): - """ - Formats correctly the string output from the invoke() method, - replacing line breaks and tabs when necessary. - """ - - string = string.replace('\\n', '\n') - - formated_response = '' - for line in string.splitlines(): - if line.startswith('REPORT'): - line = line.replace('\t', '\n') - if line.startswith('[DEBUG]'): - line = line.replace('\t', ' ') - formated_response += line + '\n' - formated_response = formated_response.replace('\n\n', '\n') - - return formated_response - - def colorize_invoke_command(self, string): - """ - Apply various heuristics to return a colorized version the invoke - command string. If these fail, simply return the string in plaintext. - - Inspired by colorize_log_entry(). - """ - - final_string = string - - try: - - # Line headers - try: - for token in ['START', 'END', 'REPORT', '[DEBUG]']: - if token in final_string: - format_string = '[{}]' - # match whole words only - pattern = r'\b{}\b' - if token == '[DEBUG]': - format_string = '{}' - pattern = re.escape(token) - repl = click.style( - format_string.format(token), - bold=True, - fg='cyan' - ) - final_string = re.sub( - pattern.format(token), repl, final_string - ) - except Exception: # pragma: no cover - pass - - # Green bold Tokens - try: - for token in [ - 'Zappa Event:', - 'RequestId:', - 'Version:', - 'Duration:', - 'Billed', - 'Memory Size:', - 'Max Memory Used:' - ]: - if token in final_string: - final_string = final_string.replace(token, click.style( - token, - bold=True, - fg='green' - )) - except Exception: # pragma: no cover - pass - - # UUIDs - for token in final_string.replace('\t', ' ').split(' '): - try: - if token.count('-') == 4 and token.replace('-', '').isalnum(): - final_string = final_string.replace( - token, - click.style(token, fg='magenta') - ) - except Exception: # pragma: no cover - pass - - return final_string - except Exception: - return string - - def status(self, return_json=False): - """ - Describe the status of the current deployment. - """ - - def tabular_print(title, value): - """ - Convenience function for priting formatted table items. - """ - click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value))) - return - - # Lambda Env Details - lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name) - - if not lambda_versions: - raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" % - (self.lambda_name, self.zappa.aws_region), fg='red')) - - status_dict = collections.OrderedDict() - status_dict["Lambda Versions"] = len(lambda_versions) - function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) - conf = function_response['Configuration'] - self.lambda_arn = conf['FunctionArn'] - status_dict["Lambda Name"] = self.lambda_name - status_dict["Lambda ARN"] = self.lambda_arn - status_dict["Lambda Role ARN"] = conf['Role'] - status_dict["Lambda Handler"] = conf['Handler'] - status_dict["Lambda Code Size"] = conf['CodeSize'] - status_dict["Lambda Version"] = conf['Version'] - status_dict["Lambda Last Modified"] = conf['LastModified'] - status_dict["Lambda Memory Size"] = conf['MemorySize'] - status_dict["Lambda Timeout"] = conf['Timeout'] - status_dict["Lambda Runtime"] = conf['Runtime'] - if 'VpcConfig' in conf.keys(): - status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned') - else: - status_dict["Lambda VPC ID"] = None - - # Calculated statistics - try: - function_invocations = self.zappa.cloudwatch.get_metric_statistics( - Namespace='AWS/Lambda', - MetricName='Invocations', - StartTime=datetime.utcnow()-timedelta(days=1), - EndTime=datetime.utcnow(), - Period=1440, - Statistics=['Sum'], - Dimensions=[{'Name': 'FunctionName', - 'Value': '{}'.format(self.lambda_name)}] - )['Datapoints'][0]['Sum'] - except Exception as e: - function_invocations = 0 - try: - function_errors = self.zappa.cloudwatch.get_metric_statistics( - Namespace='AWS/Lambda', - MetricName='Errors', - StartTime=datetime.utcnow()-timedelta(days=1), - EndTime=datetime.utcnow(), - Period=1440, - Statistics=['Sum'], - Dimensions=[{'Name': 'FunctionName', - 'Value': '{}'.format(self.lambda_name)}] - )['Datapoints'][0]['Sum'] - except Exception as e: - function_errors = 0 - - try: - error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100) - except: - error_rate = "Error calculating" - status_dict["Invocations (24h)"] = int(function_invocations) - status_dict["Errors (24h)"] = int(function_errors) - status_dict["Error Rate (24h)"] = error_rate - - # URLs - if self.use_apigateway: - api_url = self.zappa.get_api_url( - self.lambda_name, - self.api_stage) - - status_dict["API Gateway URL"] = api_url - - # Api Keys - api_id = self.zappa.get_api_id(self.lambda_name) - for api_key in self.zappa.get_api_keys(api_id, self.api_stage): - status_dict["API Gateway x-api-key"] = api_key - - # There literally isn't a better way to do this. - # AWS provides no way to tie a APIGW domain name to its Lambda function. - domain_url = self.stage_config.get('domain', None) - base_path = self.stage_config.get('base_path', None) - if domain_url: - status_dict["Domain URL"] = 'https://' + domain_url - if base_path: - status_dict["Domain URL"] += '/' + base_path - else: - status_dict["Domain URL"] = "None Supplied" - - # Scheduled Events - event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn) - status_dict["Num. Event Rules"] = len(event_rules) - if len(event_rules) > 0: - status_dict['Events'] = [] - for rule in event_rules: - event_dict = {} - rule_name = rule['Name'] - event_dict["Event Rule Name"] = rule_name - event_dict["Event Rule Schedule"] = rule.get('ScheduleExpression', None) - event_dict["Event Rule State"] = rule.get('State', None).title() - event_dict["Event Rule ARN"] = rule.get('Arn', None) - status_dict['Events'].append(event_dict) - - if return_json: - # Putting the status in machine readable format - # https://github.com/Miserlou/Zappa/issues/407 - print(json.dumpsJSON(status_dict)) - else: - click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ") - for k, v in status_dict.items(): - if k == 'Events': - # Events are a list of dicts - for event in v: - for item_k, item_v in event.items(): - tabular_print(item_k, item_v) - else: - tabular_print(k, v) - - # TODO: S3/SQS/etc. type events? - - return True - - def check_stage_name(self, stage_name): - """ - Make sure the stage name matches the AWS-allowed pattern - - (calls to apigateway_client.create_deployment, will fail with error - message "ClientError: An error occurred (BadRequestException) when - calling the CreateDeployment operation: Stage name only allows - a-zA-Z0-9_" if the pattern does not match) - """ - if self.stage_name_env_pattern.match(stage_name): - return True - raise ValueError("AWS requires stage name to match a-zA-Z0-9_") - - def check_environment(self, environment): - """ - Make sure the environment contains only strings - - (since putenv needs a string) - """ - - non_strings = [] - for (k,v) in environment.items(): - if not isinstance(v, basestring): - non_strings.append(k) - if non_strings: - raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings))) - else: - return True - - def init(self, settings_file="zappa_settings.json"): - """ - Initialize a new Zappa project by creating a new zappa_settings.json in a guided process. - - This should probably be broken up into few separate componants once it's stable. - Testing these inputs requires monkeypatching with mock, which isn't pretty. - - """ - - # Make sure we're in a venv. - self.check_venv() - - # Ensure that we don't already have a zappa_settings file. - if os.path.isfile(settings_file): - raise ClickException("This project already has a " + click.style("{0!s} file".format(settings_file), fg="red", bold=True) + "!") - - # Explain system. - click.echo(click.style("""\n███████╗ █████╗ ██████╗ ██████╗ █████╗ -╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗ - ███╔╝ ███████║██████╔╝██████╔╝███████║ - ███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║ -███████╗██║ ██║██║ ██║ ██║ ██║ -╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""", fg='green', bold=True)) - - click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True)) - click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications" - " on AWS Lambda and AWS API Gateway.") - click.echo("This `init` command will help you create and configure your new Zappa deployment.") - click.echo("Let's get started!\n") - - # Create Env - while True: - click.echo("Your Zappa configuration can support multiple production stages, like '" + - click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" + - click.style("production", bold=True) + "'.") - env = input("What do you want to call this environment (default 'dev'): ") or "dev" - try: - self.check_stage_name(env) - break - except ValueError: - click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red")) - - # Detect AWS profiles and regions - # If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack - session = botocore.session.Session() - config = session.full_config - profiles = config.get("profiles", {}) - profile_names = list(profiles.keys()) - - click.echo("\nAWS Lambda and API Gateway are only available in certain regions. "\ - "Let's check to make sure you have a profile set up in one that will work.") - - if not profile_names: - profile_name, profile = None, None - click.echo("We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}" - .format(click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True))) - elif len(profile_names) == 1: - profile_name = profile_names[0] - profile = profiles[profile_name] - click.echo("Okay, using profile {}!".format(click.style(profile_name, bold=True))) - else: - if "default" in profile_names: - default_profile = [p for p in profile_names if p == "default"][0] - else: - default_profile = profile_names[0] - - while True: - profile_name = input("We found the following profiles: {}, and {}. "\ - "Which would you like us to use? (default '{}'): " - .format( - ', '.join(profile_names[:-1]), - profile_names[-1], - default_profile - )) or default_profile - if profile_name in profiles: - profile = profiles[profile_name] - break - else: - click.echo("Please enter a valid name for your AWS profile.") - - profile_region = profile.get("region") if profile else None - - # Create Bucket - click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".") - click.echo("If you don't have a bucket yet, we'll create one for you too.") - default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)) - while True: - bucket = input("What do you want to call your bucket? (default '%s'): " % default_bucket) or default_bucket - - if is_valid_bucket_name(bucket): - break - - click.echo(click.style("Invalid bucket name!", bold=True)) - click.echo("S3 buckets must be named according to the following rules:") - click.echo("""* Bucket names must be unique across all existing bucket names in Amazon S3. -* Bucket names must comply with DNS naming conventions. -* Bucket names must be at least 3 and no more than 63 characters long. -* Bucket names must not contain uppercase characters or underscores. -* Bucket names must start with a lowercase letter or number. -* Bucket names must be a series of one or more labels. Adjacent labels are separated - by a single period (.). Bucket names can contain lowercase letters, numbers, and - hyphens. Each label must start and end with a lowercase letter or a number. -* Bucket names must not be formatted as an IP address (for example, 192.168.5.4). -* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL - wildcard certificate only matches buckets that don't contain periods. To work around - this, use HTTP or write your own certificate verification logic. We recommend that - you do not use periods (".") in bucket names when using virtual hosted–style buckets. -""") - - - # Detect Django/Flask - try: # pragma: no cover - import django - has_django = True - except ImportError as e: - has_django = False - - try: # pragma: no cover - import flask - has_flask = True - except ImportError as e: - has_flask = False - - print('') - # App-specific - if has_django: # pragma: no cover - click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!") - click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?") - django_settings = None - - matches = detect_django_settings() - while django_settings in [None, '']: - if matches: - click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) - django_settings = input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0] - else: - click.echo("(This will likely be something like 'your_project.settings')") - django_settings = input("Where are your project's settings?: ") - django_settings = django_settings.replace("'", "") - django_settings = django_settings.replace('"', "") - else: - matches = None - if has_flask: - click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.") - matches = detect_flask_apps() - click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?") - click.echo("This will likely be something like 'your_module.app'.") - app_function = None - while app_function in [None, '']: - if matches: - click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) - app_function = input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0] - else: - app_function = input("Where is your app's function?: ") - app_function = app_function.replace("'", "") - app_function = app_function.replace('"', "") - - # TODO: Create VPC? - # Memory size? Time limit? - # Domain? LE keys? Region? - # 'Advanced Settings' mode? - - # Globalize - click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.") - click.echo("If you are using Zappa for the first time, you probably don't want to do this!") - global_deployment = False - while True: - global_type = input("Would you like to deploy this application " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ") - if not global_type: - break - if global_type.lower() in ["y", "yes", "p", "primary"]: - global_deployment = True - break - if global_type.lower() in ["n", "no"]: - global_deployment = False - break - - # The given environment name - zappa_settings = { - env: { - 'profile_name': profile_name, - 's3_bucket': bucket, - 'runtime': get_venv_from_python_version(), - 'project_name': self.get_project_name() - } - } - - if profile_region: - zappa_settings[env]['aws_region'] = profile_region - - if has_django: - zappa_settings[env]['django_settings'] = django_settings - else: - zappa_settings[env]['app_function'] = app_function - - # Global Region Deployment - if global_deployment: - additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region] - # Create additional stages - if global_type.lower() in ["p", "primary"]: - additional_regions = [r for r in additional_regions if '-1' in r] - - for region in additional_regions: - env_name = env + '_' + region.replace('-', '_') - g_env = { - env_name: { - 'extends': env, - 'aws_region': region - } - } - zappa_settings.update(g_env) - - import json as json # hjson is fine for loading, not fine for writing. - zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4) - - click.echo("\nOkay, here's your " + click.style("zappa_settings.json", bold=True) + ":\n") - click.echo(click.style(zappa_settings_json, fg="yellow", bold=False)) - - confirm = input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes' - if confirm[0] not in ['y', 'Y', 'yes', 'YES']: - click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.") - return - - # Write - with open("zappa_settings.json", "w") as zappa_settings_file: - zappa_settings_file.write(zappa_settings_json) - - if global_deployment: - click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n") - click.echo(click.style("\t$ zappa deploy --all", bold=True)) - - click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") - click.echo(click.style("\t$ zappa update --all", bold=True)) - else: - click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n") - click.echo(click.style("\t$ zappa deploy %s" % env, bold=True)) - - click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") - click.echo(click.style("\t$ zappa update %s" % env, bold=True)) - - click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) + - " here: " + click.style("https://github.com/Miserlou/Zappa", fg="cyan", bold=True)) - click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " + - click.style("https://zappateam.slack.com", fg="cyan", bold=True)) - click.echo("\nEnjoy!,") - click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!") - - return - - def certify(self, no_confirm=True, manual=False): - """ - Register or update a domain certificate for this env. - """ - - if not self.domain: - raise ClickException("Can't certify a domain without " + click.style("domain", fg="red", bold=True) + " configured!") - - if not no_confirm: # pragma: no cover - confirm = input("Are you sure you want to certify? [y/n] ") - if confirm != 'y': - return - - # Make sure this isn't already deployed. - deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name) - if len(deployed_versions) == 0: - raise ClickException("This application " + click.style("isn't deployed yet", fg="red") + - " - did you mean to call " + click.style("deploy", bold=True) + "?") - - - account_key_location = self.stage_config.get('lets_encrypt_key', None) - cert_location = self.stage_config.get('certificate', None) - cert_key_location = self.stage_config.get('certificate_key', None) - cert_chain_location = self.stage_config.get('certificate_chain', None) - cert_arn = self.stage_config.get('certificate_arn', None) - base_path = self.stage_config.get('base_path', None) - - # These are sensitive - certificate_body = None - certificate_private_key = None - certificate_chain = None - - # Prepare for custom Let's Encrypt - if not cert_location and not cert_arn: - if not account_key_location: - raise ClickException("Can't certify a domain without " + click.style("lets_encrypt_key", fg="red", bold=True) + - " or " + click.style("certificate", fg="red", bold=True)+ - " or " + click.style("certificate_arn", fg="red", bold=True) + " configured!") - - # Get install account_key to /tmp/account_key.pem - from .letsencrypt import gettempdir - if account_key_location.startswith('s3://'): - bucket, key_name = parse_s3_url(account_key_location) - self.zappa.s3_client.download_file(bucket, key_name, os.path.join(gettempdir(), 'account.key')) - else: - from shutil import copyfile - copyfile(account_key_location, os.path.join(gettempdir(), 'account.key')) - - # Prepare for Custom SSL - elif not account_key_location and not cert_arn: - if not cert_location or not cert_key_location or not cert_chain_location: - raise ClickException("Can't certify a domain without " + - click.style("certificate, certificate_key and certificate_chain", fg="red", bold=True) + " configured!") - - # Read the supplied certificates. - with open(cert_location) as f: - certificate_body = f.read() - - with open(cert_key_location) as f: - certificate_private_key = f.read() - - with open(cert_chain_location) as f: - certificate_chain = f.read() - - - click.echo("Certifying domain " + click.style(self.domain, fg="green", bold=True) + "..") - - # Get cert and update domain. - - # Let's Encrypt - if not cert_location and not cert_arn: - from .letsencrypt import get_cert_and_update_domain - cert_success = get_cert_and_update_domain( - self.zappa, - self.lambda_name, - self.api_stage, - self.domain, - manual - ) - - # Custom SSL / ACM - else: - route53 = self.stage_config.get('route53_enabled', True) - if not self.zappa.get_domain_name(self.domain, route53=route53): - dns_name = self.zappa.create_domain_name( - domain_name=self.domain, - certificate_name=self.domain + "-Zappa-Cert", - certificate_body=certificate_body, - certificate_private_key=certificate_private_key, - certificate_chain=certificate_chain, - certificate_arn=cert_arn, - lambda_name=self.lambda_name, - stage=self.api_stage, - base_path=base_path - ) - if route53: - self.zappa.update_route53_records(self.domain, dns_name) - print("Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be " - "created and propagated through AWS, but it requires no further work on your part.") - else: - self.zappa.update_domain_name( - domain_name=self.domain, - certificate_name=self.domain + "-Zappa-Cert", - certificate_body=certificate_body, - certificate_private_key=certificate_private_key, - certificate_chain=certificate_chain, - certificate_arn=cert_arn, - lambda_name=self.lambda_name, - stage=self.api_stage, - route53=route53, - base_path=base_path - ) - - cert_success = True - - if cert_success: - click.echo("Certificate " + click.style("updated", fg="green", bold=True) + "!") - else: - click.echo(click.style("Failed", fg="red", bold=True) + " to generate or install certificate! :(") - click.echo("\n==============\n") - shamelessly_promote() - - ## - # Shell - ## - def shell(self): - """ - Spawn a debug shell. - """ - click.echo(click.style("NOTICE!", fg="yellow", bold=True) + " This is a " + click.style("local", fg="green", bold=True) + " shell, inside a " + click.style("Zappa", bold=True) + " object!") - self.zappa.shell() - return - - ## - # Utility - ## - - def callback(self, position): - """ - Allows the execution of custom code between creation of the zip file and deployment to AWS. - - :return: None - """ - - callbacks = self.stage_config.get('callbacks', {}) - callback = callbacks.get(position) - - if callback: - (mod_path, cb_func_name) = callback.rsplit('.', 1) - - try: # Prefer callback in working directory - if mod_path.count('.') >= 1: # Callback function is nested in a folder - (mod_folder_path, mod_name) = mod_path.rsplit('.', 1) - mod_folder_path_fragments = mod_folder_path.split('.') - working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments) - else: - mod_name = mod_path - working_dir = os.getcwd() - - working_dir_importer = pkgutil.get_importer(working_dir) - module_ = working_dir_importer.find_module(mod_name).load_module(mod_name) - - except (ImportError, AttributeError): - - try: # Callback func might be in virtualenv - module_ = importlib.import_module(mod_path) - except ImportError: # pragma: no cover - raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( - "import {position} callback ".format(position=position), - bold=True) + 'module: "{mod_path}"'.format(mod_path=click.style(mod_path, bold=True))) - - if not hasattr(module_, cb_func_name): # pragma: no cover - raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( - "find {position} callback ".format(position=position), bold=True) + 'function: "{cb_func_name}" '.format( - cb_func_name=click.style(cb_func_name, bold=True)) + 'in module "{mod_path}"'.format(mod_path=mod_path)) - - - cb_func = getattr(module_, cb_func_name) - cb_func(self) # Call the function passing self - - def check_for_update(self): - """ - Print a warning if there's a new Zappa version available. - """ - try: - version = pkg_resources.require("zappa")[0].version - updateable = check_new_version_available(version) - if updateable: - click.echo(click.style("Important!", fg="yellow", bold=True) + - " A new version of " + click.style("Zappa", bold=True) + " is available!") - click.echo("Upgrade with: " + click.style("pip install zappa --upgrade", bold=True)) - click.echo("Visit the project page on GitHub to see the latest changes: " + - click.style("https://github.com/Miserlou/Zappa", bold=True)) - except Exception as e: # pragma: no cover - print(e) - return - - def load_settings(self, settings_file=None, session=None): - """ - Load the local zappa_settings file. - - An existing boto session can be supplied, though this is likely for testing purposes. - - Returns the loaded Zappa object. - """ - - # Ensure we're passed a valid settings file. - if not settings_file: - settings_file = self.get_json_or_yaml_settings() - if not os.path.isfile(settings_file): - raise ClickException("Please configure your zappa_settings file.") - - # Load up file - self.load_settings_file(settings_file) - - # Make sure that the stages are valid names: - for stage_name in self.zappa_settings.keys(): - try: - self.check_stage_name(stage_name) - except ValueError: - raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name)) - - # Make sure that this stage is our settings - if self.api_stage not in self.zappa_settings.keys(): - raise ClickException("Please define stage '{0!s}' in your Zappa settings.".format(self.api_stage)) - - # We need a working title for this project. Use one if supplied, else cwd dirname. - if 'project_name' in self.stage_config: # pragma: no cover - # If the name is invalid, this will throw an exception with message up stack - self.project_name = validate_name(self.stage_config['project_name']) - else: - self.project_name = self.get_project_name() - - # The name of the actual AWS Lambda function, ex, 'helloworld-dev' - # Assume that we already have have validated the name beforehand. - # Related: https://github.com/Miserlou/Zappa/pull/664 - # https://github.com/Miserlou/Zappa/issues/678 - # And various others from Slack. - self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage) - - # Load stage-specific settings - self.s3_bucket_name = self.stage_config.get('s3_bucket', "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9))) - self.vpc_config = self.stage_config.get('vpc_config', {}) - self.memory_size = self.stage_config.get('memory_size', 512) - self.app_function = self.stage_config.get('app_function', None) - self.exception_handler = self.stage_config.get('exception_handler', None) - self.aws_region = self.stage_config.get('aws_region', None) - self.debug = self.stage_config.get('debug', True) - self.prebuild_script = self.stage_config.get('prebuild_script', None) - self.profile_name = self.stage_config.get('profile_name', None) - self.log_level = self.stage_config.get('log_level', "DEBUG") - self.domain = self.stage_config.get('domain', None) - self.base_path = self.stage_config.get('base_path', None) - self.timeout_seconds = self.stage_config.get('timeout_seconds', 30) - dead_letter_arn = self.stage_config.get('dead_letter_arn', '') - self.dead_letter_config = {'TargetArn': dead_letter_arn} if dead_letter_arn else {} - self.cognito = self.stage_config.get('cognito', None) - self.num_retained_versions = self.stage_config.get('num_retained_versions',None) - - # Check for valid values of num_retained_versions - if self.num_retained_versions is not None and type(self.num_retained_versions) is not int: - raise ClickException("Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s" % type(self.num_retained_versions)) - elif type(self.num_retained_versions) is int and self.num_retained_versions<1: - raise ClickException("The value for num_retained_versions in the zappa_settings.json should be greater than 0.") - - # Provide legacy support for `use_apigateway`, now `apigateway_enabled`. - # https://github.com/Miserlou/Zappa/issues/490 - # https://github.com/Miserlou/Zappa/issues/493 - self.use_apigateway = self.stage_config.get('use_apigateway', True) - if self.use_apigateway: - self.use_apigateway = self.stage_config.get('apigateway_enabled', True) - self.apigateway_description = self.stage_config.get('apigateway_description', None) - - self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler') - # DEPRECATED. https://github.com/Miserlou/Zappa/issues/456 - self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None) - self.remote_env_file = self.stage_config.get('remote_env_file', None) - self.remote_env = self.stage_config.get('remote_env', None) - self.settings_file = self.stage_config.get('settings_file', None) - self.django_settings = self.stage_config.get('django_settings', None) - self.manage_roles = self.stage_config.get('manage_roles', True) - self.binary_support = self.stage_config.get('binary_support', True) - self.api_key_required = self.stage_config.get('api_key_required', False) - self.api_key = self.stage_config.get('api_key') - self.endpoint_configuration = self.stage_config.get('endpoint_configuration', None) - self.iam_authorization = self.stage_config.get('iam_authorization', False) - self.cors = self.stage_config.get("cors", False) - self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment") - self.lambda_concurrency = self.stage_config.get('lambda_concurrency', None) - self.environment_variables = self.stage_config.get('environment_variables', {}) - self.aws_environment_variables = self.stage_config.get('aws_environment_variables', {}) - self.check_environment(self.environment_variables) - self.authorizer = self.stage_config.get('authorizer', {}) - self.runtime = self.stage_config.get('runtime', get_runtime_from_python_version()) - self.aws_kms_key_arn = self.stage_config.get('aws_kms_key_arn', '') - self.context_header_mappings = self.stage_config.get('context_header_mappings', {}) - self.xray_tracing = self.stage_config.get('xray_tracing', False) - self.desired_role_arn = self.stage_config.get('role_arn') - self.layers = self.stage_config.get('layers', None) - - # Load ALB-related settings - self.use_alb = self.stage_config.get('alb_enabled', False) - self.alb_vpc_config = self.stage_config.get('alb_vpc_config', {}) - - # Additional tags - self.tags = self.stage_config.get('tags', {}) - - desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole" - if self.desired_role_arn: - desired_role_name = self.desired_role_arn.split('/')[-1] - self.zappa = Zappa( boto_session=session, - profile_name=self.profile_name, - aws_region=self.aws_region, - load_credentials=self.load_credentials, - desired_role_name=desired_role_name, - desired_role_arn=self.desired_role_arn, - runtime=self.runtime, - tags=self.tags, - endpoint_urls=self.stage_config.get('aws_endpoint_urls',{}), - xray_tracing=self.xray_tracing - ) - - for setting in CUSTOM_SETTINGS: - if setting in self.stage_config: - setting_val = self.stage_config[setting] - # Read the policy file contents. - if setting.endswith('policy'): - with open(setting_val, 'r') as f: - setting_val = f.read() - setattr(self.zappa, setting, setting_val) - - if self.app_function: - self.collision_warning(self.app_function) - if self.app_function[-3:] == '.py': - click.echo(click.style("Warning!", fg="red", bold=True) + - " Your app_function is pointing to a " + click.style("file and not a function", bold=True) + - "! It should probably be something like 'my_file.app', not 'my_file.py'!") - - return self.zappa - - def get_json_or_yaml_settings(self, settings_name="zappa_settings"): - """ - Return zappa_settings path as JSON or YAML (or TOML), as appropriate. - """ - zs_json = settings_name + ".json" - zs_yml = settings_name + ".yml" - zs_yaml = settings_name + ".yaml" - zs_toml = settings_name + ".toml" - - # Must have at least one - if not os.path.isfile(zs_json) \ - and not os.path.isfile(zs_yml) \ - and not os.path.isfile(zs_yaml) \ - and not os.path.isfile(zs_toml): - raise ClickException("Please configure a zappa_settings file or call `zappa init`.") - - # Prefer JSON - if os.path.isfile(zs_json): - settings_file = zs_json - elif os.path.isfile(zs_toml): - settings_file = zs_toml - elif os.path.isfile(zs_yml): - settings_file = zs_yml - else: - settings_file = zs_yaml - - return settings_file - - def load_settings_file(self, settings_file=None): - """ - Load our settings file. - """ - - if not settings_file: - settings_file = self.get_json_or_yaml_settings() - if not os.path.isfile(settings_file): - raise ClickException("Please configure your zappa_settings file or call `zappa init`.") - - path, ext = os.path.splitext(settings_file) - if ext == '.yml' or ext == '.yaml': - with open(settings_file) as yaml_file: - try: - self.zappa_settings = yaml.safe_load(yaml_file) - except ValueError: # pragma: no cover - raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.") - elif ext == '.toml': - with open(settings_file) as toml_file: - try: - self.zappa_settings = toml.load(toml_file) - except ValueError: # pragma: no cover - raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.") - else: - with open(settings_file) as json_file: - try: - self.zappa_settings = json.load(json_file) - except ValueError: # pragma: no cover - raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.") - - def create_package(self, output=None): - """ - Ensure that the package can be properly configured, - and then create it. - - """ - - # Create the Lambda zip package (includes project and virtualenvironment) - # Also define the path the handler file so it can be copied to the zip - # root for Lambda. - current_file = os.path.dirname(os.path.abspath( - inspect.getfile(inspect.currentframe()))) - handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py' - - # Create the zip file(s) - if self.stage_config.get('slim_handler', False): - # Create two zips. One with the application and the other with just the handler. - # https://github.com/Miserlou/Zappa/issues/510 - self.zip_path = self.zappa.create_lambda_zip( - prefix=self.lambda_name, - use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True), - exclude=self.stage_config.get('exclude', []), - exclude_glob=self.stage_config.get('exclude_glob', []), - disable_progress=self.disable_progress, - archive_format='tarball' - ) - - # Make sure the normal venv is not included in the handler's zip - exclude = self.stage_config.get('exclude', []) - cur_venv = self.zappa.get_current_venv() - exclude.append(cur_venv.split('/')[-1]) - self.handler_path = self.zappa.create_lambda_zip( - prefix='handler_{0!s}'.format(self.lambda_name), - venv=self.zappa.create_handler_venv(), - handler_file=handler_file, - slim_handler=True, - exclude=exclude, - exclude_glob=self.stage_config.get('exclude_glob', []), - output=output, - disable_progress=self.disable_progress - ) - else: - # This could be python3.6 optimized. - exclude = self.stage_config.get( - 'exclude', [ - "boto3", - "dateutil", - "botocore", - "s3transfer", - "concurrent" - ]) - - # Create a single zip that has the handler and application - self.zip_path = self.zappa.create_lambda_zip( - prefix=self.lambda_name, - handler_file=handler_file, - use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True), - exclude=exclude, - exclude_glob=self.stage_config.get('exclude_glob', []), - output=output, - disable_progress=self.disable_progress - ) - - # Warn if this is too large for Lambda. - file_stats = os.stat(self.zip_path) - if file_stats.st_size > 52428800: # pragma: no cover - print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. ' - 'Try setting "slim_handler" to true in your Zappa settings file.\n\n') - - # Throw custom settings into the zip that handles requests - if self.stage_config.get('slim_handler', False): - handler_zip = self.handler_path - else: - handler_zip = self.zip_path - - with zipfile.ZipFile(handler_zip, 'a') as lambda_zip: - - settings_s = "# Generated by Zappa\n" - - if self.app_function: - if '.' not in self.app_function: # pragma: no cover - raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." + - " It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.") - app_module, app_function = self.app_function.rsplit('.', 1) - settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function) - - if self.exception_handler: - settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler) - else: - settings_s += "EXCEPTION_HANDLER=None\n" - - if self.debug: - settings_s = settings_s + "DEBUG=True\n" - else: - settings_s = settings_s + "DEBUG=False\n" - - settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level)) - - if self.binary_support: - settings_s = settings_s + "BINARY_SUPPORT=True\n" - else: - settings_s = settings_s + "BINARY_SUPPORT=False\n" - - head_map_dict = {} - head_map_dict.update(dict(self.context_header_mappings)) - settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format( - head_map_dict - ) - - # If we're on a domain, we don't need to define the /<> in - # the WSGI PATH - if self.domain: - settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain)) - else: - settings_s = settings_s + "DOMAIN=None\n" - - if self.base_path: - settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path)) - else: - settings_s = settings_s + "BASE_PATH=None\n" - - # Pass through remote config bucket and path - if self.remote_env: - settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format( - self.remote_env - ) - # DEPRECATED. use remove_env instead - elif self.remote_env_bucket and self.remote_env_file: - settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format( - self.remote_env_bucket, self.remote_env_file - ) - - # Local envs - env_dict = {} - if self.aws_region: - env_dict['AWS_REGION'] = self.aws_region - env_dict.update(dict(self.environment_variables)) - - # Environment variable keys must be ascii - # https://github.com/Miserlou/Zappa/issues/604 - # https://github.com/Miserlou/Zappa/issues/998 - try: - env_dict = dict((k.encode('ascii').decode('ascii'), v) for (k, v) in env_dict.items()) - except Exception: - raise ValueError("Environment variable keys must be ascii.") - - settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format( - env_dict - ) - - # We can be environment-aware - settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage)) - settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name)) - - if self.settings_file: - settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file)) - else: - settings_s = settings_s + "SETTINGS_FILE=None\n" - - if self.django_settings: - settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings)) - else: - settings_s = settings_s + "DJANGO_SETTINGS=None\n" - - # If slim handler, path to project zip - if self.stage_config.get('slim_handler', False): - settings_s += "ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format( - self.s3_bucket_name, self.api_stage, self.project_name) - - # since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file - # and tell the handler we are the slim_handler - # https://github.com/Miserlou/Zappa/issues/776 - settings_s += "SLIM_HANDLER=True\n" - - include = self.stage_config.get('include', []) - if len(include) >= 1: - settings_s += "INCLUDE=" + str(include) + '\n' - - # AWS Events function mapping - event_mapping = {} - events = self.stage_config.get('events', []) - for event in events: - arn = event.get('event_source', {}).get('arn') - function = event.get('function') - if arn and function: - event_mapping[arn] = function - settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping) - - # Map Lext bot events - bot_events = self.stage_config.get('bot_events', []) - bot_events_mapping = {} - for bot_event in bot_events: - event_source = bot_event.get('event_source', {}) - intent = event_source.get('intent') - invocation_source = event_source.get('invocation_source') - function = bot_event.get('function') - if intent and invocation_source and function: - bot_events_mapping[str(intent) + ':' + str(invocation_source)] = function - - settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(bot_events_mapping) - - # Map cognito triggers - cognito_trigger_mapping = {} - cognito_config = self.stage_config.get('cognito', {}) - triggers = cognito_config.get('triggers', []) - for trigger in triggers: - source = trigger.get('source') - function = trigger.get('function') - if source and function: - cognito_trigger_mapping[source] = function - settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(cognito_trigger_mapping) - - # Authorizer config - authorizer_function = self.authorizer.get('function', None) - if authorizer_function: - settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function) - - # Copy our Django app into root of our package. - # It doesn't work otherwise. - if self.django_settings: - base = __file__.rsplit(os.sep, 1)[0] - django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py')) - lambda_zip.write(django_py, 'django_zappa_app.py') - - # async response - async_response_table = self.stage_config.get('async_response_table', '') - settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table) - - # Lambda requires a specific chmod - temp_settings = tempfile.NamedTemporaryFile(delete=False) - os.chmod(temp_settings.name, 0o644) - temp_settings.write(bytes(settings_s, "utf-8")) - temp_settings.close() - lambda_zip.write(temp_settings.name, 'zappa_settings.py') - os.unlink(temp_settings.name) - - def remove_local_zip(self): - """ - Remove our local zip file. - """ - - if self.stage_config.get('delete_local_zip', True): - try: - if os.path.isfile(self.zip_path): - os.remove(self.zip_path) - if self.handler_path and os.path.isfile(self.handler_path): - os.remove(self.handler_path) - except Exception as e: # pragma: no cover - sys.exit(-1) - - def remove_uploaded_zip(self): - """ - Remove the local and S3 zip file after uploading and updating. - """ - - # Remove the uploaded zip from S3, because it is now registered.. - if self.stage_config.get('delete_s3_zip', True): - self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name) - if self.stage_config.get('slim_handler', False): - # Need to keep the project zip as the slim handler uses it. - self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name) - - def on_exit(self): - """ - Cleanup after the command finishes. - Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs. - """ - if self.zip_path: - # Only try to remove uploaded zip if we're running a command that has loaded credentials - if self.load_credentials: - self.remove_uploaded_zip() - - self.remove_local_zip() - - def print_logs(self, logs, colorize=True, http=False, non_http=False, force_colorize=None): - """ - Parse, filter and print logs to the console. - - """ - - for log in logs: - timestamp = log['timestamp'] - message = log['message'] - if "START RequestId" in message: - continue - if "REPORT RequestId" in message: - continue - if "END RequestId" in message: - continue - - if not colorize and not force_colorize: - if http: - if self.is_http_log_entry(message.strip()): - print("[" + str(timestamp) + "] " + message.strip()) - elif non_http: - if not self.is_http_log_entry(message.strip()): - print("[" + str(timestamp) + "] " + message.strip()) - else: - print("[" + str(timestamp) + "] " + message.strip()) - else: - if http: - if self.is_http_log_entry(message.strip()): - click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) - elif non_http: - if not self.is_http_log_entry(message.strip()): - click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) - else: - click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) - - def is_http_log_entry(self, string): - """ - Determines if a log entry is an HTTP-formatted log string or not. - """ - # Debug event filter - if 'Zappa Event' in string: - return False - - # IP address filter - for token in string.replace('\t', ' ').split(' '): - try: - if (token.count('.') == 3 and token.replace('.', '').isnumeric()): - return True - except Exception: # pragma: no cover - pass - - return False - - def get_project_name(self): - return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15] - - def colorize_log_entry(self, string): - """ - Apply various heuristics to return a colorized version of a string. - If these fail, simply return the string in plaintext. - """ - - final_string = string - try: - - # First, do stuff in square brackets - inside_squares = re.findall(r'\[([^]]*)\]', string) - for token in inside_squares: - if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']: - final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan')) - else: - final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan')) - - # Then do quoted strings - quotes = re.findall(r'"[^"]*"', string) - for token in quotes: - final_string = final_string.replace(token, click.style(token, fg="yellow")) - - # And UUIDs - for token in final_string.replace('\t', ' ').split(' '): - try: - if token.count('-') == 4 and token.replace('-', '').isalnum(): - final_string = final_string.replace(token, click.style(token, fg="magenta")) - except Exception: # pragma: no cover - pass - - # And IP addresses - try: - if token.count('.') == 3 and token.replace('.', '').isnumeric(): - final_string = final_string.replace(token, click.style(token, fg="red")) - except Exception: # pragma: no cover - pass - - # And status codes - try: - if token in ['200']: - final_string = final_string.replace(token, click.style(token, fg="green")) - if token in ['400', '401', '403', '404', '405', '500']: - final_string = final_string.replace(token, click.style(token, fg="red")) - except Exception: # pragma: no cover - pass - - # And Zappa Events - try: - if "Zappa Event:" in final_string: - final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green")) - except Exception: # pragma: no cover - pass - - # And dates - for token in final_string.split('\t'): - try: - is_date = parser.parse(token) - final_string = final_string.replace(token, click.style(token, fg="green")) - except Exception: # pragma: no cover - pass - - final_string = final_string.replace('\t', ' ').replace(' ', ' ') - if final_string[0] != ' ': - final_string = ' ' + final_string - return final_string - except Exception as e: # pragma: no cover - return string - - def execute_prebuild_script(self): - """ - Parse and execute the prebuild_script from the zappa_settings. - - """ - - (pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1) - - try: # Prefer prebuild script in working directory - if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder - (mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1) - mod_folder_path_fragments = mod_folder_path.split('.') - working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments) - else: - mod_name = pb_mod_path - working_dir = os.getcwd() - - working_dir_importer = pkgutil.get_importer(working_dir) - module_ = working_dir_importer.find_module(mod_name).load_module(mod_name) - - except (ImportError, AttributeError): - - try: # Prebuild func might be in virtualenv - module_ = importlib.import_module(pb_mod_path) - except ImportError: # pragma: no cover - raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( - "import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format( - pb_mod_path=click.style(pb_mod_path, bold=True))) - - if not hasattr(module_, pb_func): # pragma: no cover - raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( - "find prebuild script ", bold=True) + 'function: "{pb_func}" '.format( - pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format( - pb_mod_path=pb_mod_path)) - - prebuild_function = getattr(module_, pb_func) - prebuild_function() # Call the function - - def collision_warning(self, item): - """ - Given a string, print a warning if this could - collide with a Zappa core package module. - - Use for app functions and events. - """ - - namespace_collisions = [ - "zappa.", "wsgi.", "middleware.", "handler.", "util.", "letsencrypt.", "cli." - ] - for namespace_collision in namespace_collisions: - if item.startswith(namespace_collision): - click.echo(click.style("Warning!", fg="red", bold=True) + - " You may have a namespace collision between " + - click.style(item, bold=True) + - " and " + - click.style(namespace_collision, bold=True) + - "! You may want to rename that file.") - - def deploy_api_gateway(self, api_id): - cache_cluster_enabled = self.stage_config.get('cache_cluster_enabled', False) - cache_cluster_size = str(self.stage_config.get('cache_cluster_size', .5)) - endpoint_url = self.zappa.deploy_api_gateway( - api_id=api_id, - stage_name=self.api_stage, - cache_cluster_enabled=cache_cluster_enabled, - cache_cluster_size=cache_cluster_size, - cloudwatch_log_level=self.stage_config.get('cloudwatch_log_level', 'OFF'), - cloudwatch_data_trace=self.stage_config.get('cloudwatch_data_trace', False), - cloudwatch_metrics_enabled=self.stage_config.get('cloudwatch_metrics_enabled', False), - cache_cluster_ttl=self.stage_config.get('cache_cluster_ttl', 300), - cache_cluster_encrypted=self.stage_config.get('cache_cluster_encrypted', False) - ) - return endpoint_url - - def check_venv(self): - """ Ensure we're inside a virtualenv. """ - if self.vargs and self.vargs.get("no_venv"): - return - if self.zappa: - venv = self.zappa.get_current_venv() - else: - # Just for `init`, when we don't have settings yet. - venv = Zappa.get_current_venv() - if not venv: - raise ClickException( - click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + - "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan")) - - def silence(self): - """ - Route all stdout to null. - """ - - sys.stdout = open(os.devnull, 'w') - sys.stderr = open(os.devnull, 'w') - - def touch_endpoint(self, endpoint_url): - """ - Test the deployed endpoint with a GET request. - """ - - # Private APIGW endpoints most likely can't be reached by a deployer - # unless they're connected to the VPC by VPN. Instead of trying - # connect to the service, print a warning and let the user know - # to check it manually. - # See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565 - if 'PRIVATE' in self.stage_config.get('endpoint_configuration', []): - print( - click.style("Warning!", fg="yellow", bold=True) + - " Since you're deploying a private API Gateway endpoint," - " Zappa cannot determine if your function is returning " - " a correct status code. You should check your API's response" - " manually before considering this deployment complete." - ) - return - - touch_path = self.stage_config.get('touch_path', '/') - req = requests.get(endpoint_url + touch_path) - - # Sometimes on really large packages, it can take 60-90 secs to be - # ready and requests will return 504 status_code until ready. - # So, if we get a 504 status code, rerun the request up to 4 times or - # until we don't get a 504 error - if req.status_code == 504: - i = 0 - status_code = 504 - while status_code == 504 and i <= 4: - req = requests.get(endpoint_url + touch_path) - status_code = req.status_code - i += 1 - - if req.status_code >= 500: - raise ClickException(click.style("Warning!", fg="red", bold=True) + - " Status check on the deployed lambda failed." + - " A GET request to '" + touch_path + "' yielded a " + - click.style(str(req.status_code), fg="red", bold=True) + " response code.") - -#################################################################### -# Main -#################################################################### - -def shamelessly_promote(): - """ - Shamelessly promote our little community. - """ - - click.echo("Need " + click.style("help", fg='green', bold=True) + - "? Found a " + click.style("bug", fg='green', bold=True) + - "? Let us " + click.style("know", fg='green', bold=True) + "! :D") - click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: " - + click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True)) - click.echo("And join our " + click.style("Slack", bold=True) + " channel here: " - + click.style("https://zappateam.slack.com", fg='cyan', bold=True)) - click.echo("Love!,") - click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!") - -def disable_click_colors(): - """ - Set a Click context where colors are disabled. Creates a throwaway BaseCommand - to play nicely with the Context constructor. - The intended side-effect here is that click.echo() checks this context and will - suppress colors. - https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39 - """ - - ctx = Context(BaseCommand('AllYourBaseAreBelongToUs')) - ctx.color = False - push_context(ctx) - -def handle(): # pragma: no cover - """ - Main program execution handler. - """ - - try: - cli = ZappaCLI() - sys.exit(cli.handle()) - except SystemExit as e: # pragma: no cover - cli.on_exit() - sys.exit(e.code) - - except KeyboardInterrupt: # pragma: no cover - cli.on_exit() - sys.exit(130) - except Exception as e: - cli.on_exit() - - click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(") - click.echo("\n==============\n") - import traceback - traceback.print_exc() - click.echo("\n==============\n") - shamelessly_promote() - - sys.exit(-1) - -if __name__ == '__main__': # pragma: no cover - handle() diff --git a/zappa/handler.py b/zappa/handler.py deleted file mode 100644 index cd1899309..000000000 --- a/zappa/handler.py +++ /dev/null @@ -1,616 +0,0 @@ -import base64 -import boto3 -import collections -import datetime -import importlib -import inspect -import json -import logging -import os -import sys -import traceback -import tarfile - -from builtins import str -from werkzeug.wrappers import Response - -# This file may be copied into a project's root, -# so handle both scenarios. -try: - from zappa.middleware import ZappaWSGIMiddleware - from zappa.wsgi import create_wsgi_request, common_log - from zappa.utilities import merge_headers, parse_s3_url -except ImportError as e: # pragma: no cover - from .middleware import ZappaWSGIMiddleware - from .wsgi import create_wsgi_request, common_log - from .utilities import merge_headers, parse_s3_url - - -# Set up logging -logging.basicConfig() -logger = logging.getLogger() -logger.setLevel(logging.INFO) - - -class LambdaHandler: - """ - Singleton for avoiding duplicate setup. - - Pattern provided by @benbangert. - """ - - __instance = None - settings = None - settings_name = None - session = None - - # Application - app_module = None - wsgi_app = None - trailing_slash = False - - def __new__(cls, settings_name="zappa_settings", session=None): - """Singleton instance to avoid repeat setup""" - if LambdaHandler.__instance is None: - print("Instancing..") - LambdaHandler.__instance = object.__new__(cls) - return LambdaHandler.__instance - - def __init__(self, settings_name="zappa_settings", session=None): - - # We haven't cached our settings yet, load the settings and app. - if not self.settings: - # Loading settings from a python module - self.settings = importlib.import_module(settings_name) - self.settings_name = settings_name - self.session = session - - # Custom log level - if self.settings.LOG_LEVEL: - level = logging.getLevelName(self.settings.LOG_LEVEL) - logger.setLevel(level) - - remote_env = getattr(self.settings, 'REMOTE_ENV', None) - remote_bucket, remote_file = parse_s3_url(remote_env) - - if remote_bucket and remote_file: - self.load_remote_settings(remote_bucket, remote_file) - - # Let the system know that this will be a Lambda/Zappa/Stack - os.environ["SERVERTYPE"] = "AWS Lambda" - os.environ["FRAMEWORK"] = "Zappa" - try: - os.environ["PROJECT"] = self.settings.PROJECT_NAME - os.environ["STAGE"] = self.settings.API_STAGE - except Exception: # pragma: no cover - pass - - # Set any locally defined env vars - # Environment variable keys can't be Unicode - # https://github.com/Miserlou/Zappa/issues/604 - for key in self.settings.ENVIRONMENT_VARIABLES.keys(): - os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key] - - # Pulling from S3 if given a zip path - project_archive_path = getattr(self.settings, 'ARCHIVE_PATH', None) - if project_archive_path: - self.load_remote_project_archive(project_archive_path) - - - # Load compiled library to the PythonPath - # checks if we are the slim_handler since this is not needed otherwise - # https://github.com/Miserlou/Zappa/issues/776 - is_slim_handler = getattr(self.settings, 'SLIM_HANDLER', False) - if is_slim_handler: - included_libraries = getattr(self.settings, 'INCLUDE', ['libmysqlclient.so.18']) - try: - from ctypes import cdll, util - for library in included_libraries: - try: - cdll.LoadLibrary(os.path.join(os.getcwd(), library)) - except OSError: - print("Failed to find library: {}...right filename?".format(library)) - except ImportError: - print ("Failed to import cytpes library") - - # This is a non-WSGI application - # https://github.com/Miserlou/Zappa/pull/748 - if not hasattr(self.settings, 'APP_MODULE') and not self.settings.DJANGO_SETTINGS: - self.app_module = None - wsgi_app_function = None - # This is probably a normal WSGI app (Or django with overloaded wsgi application) - # https://github.com/Miserlou/Zappa/issues/1164 - elif hasattr(self.settings, 'APP_MODULE'): - if self.settings.DJANGO_SETTINGS: - sys.path.append('/var/task') - from django.conf import ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE - # add the Lambda root path into the sys.path - self.trailing_slash = True - os.environ[SETTINGS_ENVIRONMENT_VARIABLE] = self.settings.DJANGO_SETTINGS - else: - self.trailing_slash = False - - # The app module - self.app_module = importlib.import_module(self.settings.APP_MODULE) - - # The application - wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION) - # Django gets special treatment. - else: - try: # Support both for tests - from zappa.ext.django_zappa import get_django_wsgi - except ImportError: # pragma: no cover - from django_zappa_app import get_django_wsgi - - # Get the Django WSGI app from our extension - wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) - self.trailing_slash = True - - self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function) - - def load_remote_project_archive(self, project_zip_path): - """ - Puts the project files from S3 in /tmp and adds to path - """ - project_folder = '/tmp/{0!s}'.format(self.settings.PROJECT_NAME) - if not os.path.isdir(project_folder): - # The project folder doesn't exist in this cold lambda, get it from S3 - if not self.session: - boto_session = boto3.Session() - else: - boto_session = self.session - - # Download zip file from S3 - remote_bucket, remote_file = parse_s3_url(project_zip_path) - s3 = boto_session.resource('s3') - archive_on_s3 = s3.Object(remote_bucket, remote_file).get() - - with tarfile.open(fileobj=archive_on_s3['Body'], mode="r|gz") as t: - t.extractall(project_folder) - - # Add to project path - sys.path.insert(0, project_folder) - - # Change working directory to project folder - # Related: https://github.com/Miserlou/Zappa/issues/702 - os.chdir(project_folder) - return True - - def load_remote_settings(self, remote_bucket, remote_file): - """ - Attempt to read a file from s3 containing a flat json object. Adds each - key->value pair as environment variables. Helpful for keeping - sensitiZve or stage-specific configuration variables in s3 instead of - version control. - """ - if not self.session: - boto_session = boto3.Session() - else: - boto_session = self.session - - s3 = boto_session.resource('s3') - try: - remote_env_object = s3.Object(remote_bucket, remote_file).get() - except Exception as e: # pragma: no cover - # catch everything aws might decide to raise - print('Could not load remote settings file.', e) - return - - try: - content = remote_env_object['Body'].read() - except Exception as e: # pragma: no cover - # catch everything aws might decide to raise - print('Exception while reading remote settings file.', e) - return - - try: - settings_dict = json.loads(content) - except (ValueError, TypeError): # pragma: no cover - print('Failed to parse remote settings!') - return - - # add each key-value to environment - overwrites existing keys! - for key, value in settings_dict.items(): - if self.settings.LOG_LEVEL == "DEBUG": - print('Adding {} -> {} to environment'.format( - key, - value - )) - # Environment variable keys can't be Unicode - # https://github.com/Miserlou/Zappa/issues/604 - try: - os.environ[str(key)] = value - except Exception: - if self.settings.LOG_LEVEL == "DEBUG": - print("Environment variable keys must be non-unicode!") - - @staticmethod - def import_module_and_get_function(whole_function): - """ - Given a modular path to a function, import that module - and return the function. - """ - module, function = whole_function.rsplit('.', 1) - app_module = importlib.import_module(module) - app_function = getattr(app_module, function) - return app_function - - @classmethod - def lambda_handler(cls, event, context): # pragma: no cover - handler = cls() - exception_handler = handler.settings.EXCEPTION_HANDLER - try: - return handler.handler(event, context) - except Exception as ex: - exception_processed = cls._process_exception(exception_handler=exception_handler, - event=event, context=context, exception=ex) - if not exception_processed: - # Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry - # an event execution in case of failure. - raise - - @classmethod - def _process_exception(cls, exception_handler, event, context, exception): - exception_processed = False - if exception_handler: - try: - handler_function = cls.import_module_and_get_function(exception_handler) - exception_processed = handler_function(exception, event, context) - except Exception as cex: - logger.error(msg='Failed to process exception via custom handler.') - print(cex) - return exception_processed - - @staticmethod - def run_function(app_function, event, context): - """ - Given a function and event context, - detect signature and execute, returning any result. - """ - # getargspec does not support python 3 method with type hints - # Related issue: https://github.com/Miserlou/Zappa/issues/1452 - if hasattr(inspect, "getfullargspec"): # Python 3 - args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function) - else: # Python 2 - args, varargs, keywords, defaults = inspect.getargspec(app_function) - num_args = len(args) - if num_args == 0: - result = app_function(event, context) if varargs else app_function() - elif num_args == 1: - result = app_function(event, context) if varargs else app_function(event) - elif num_args == 2: - result = app_function(event, context) - else: - raise RuntimeError("Function signature is invalid. Expected a function that accepts at most " - "2 arguments or varargs.") - return result - - def get_function_for_aws_event(self, record): - """ - Get the associated function to execute for a triggered AWS event - - Support S3, SNS, DynamoDB, kinesis and SQS events - """ - if 's3' in record: - if ':' in record['s3']['configurationId']: - return record['s3']['configurationId'].split(':')[-1] - - arn = None - if 'Sns' in record: - try: - message = json.loads(record['Sns']['Message']) - if message.get('command'): - return message['command'] - except ValueError: - pass - arn = record['Sns'].get('TopicArn') - elif 'dynamodb' in record or 'kinesis' in record: - arn = record.get('eventSourceARN') - elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs': - arn = record.get('eventSourceARN') - elif 's3' in record: - arn = record['s3']['bucket']['arn'] - - if arn: - return self.settings.AWS_EVENT_MAPPING.get(arn) - - return None - - def get_function_from_bot_intent_trigger(self, event): - """ - For the given event build ARN and return the configured function - """ - intent = event.get('currentIntent') - if intent: - intent = intent.get('name') - if intent: - return self.settings.AWS_BOT_EVENT_MAPPING.get( - "{}:{}".format(intent, event.get('invocationSource')) - ) - - def get_function_for_cognito_trigger(self, trigger): - """ - Get the associated function to execute for a cognito trigger - """ - print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)) - return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger) - - def handler(self, event, context): - """ - An AWS Lambda function which parses specific API Gateway input into a - WSGI request, feeds it to our WSGI app, processes the response, and returns - that back to the API Gateway. - - """ - settings = self.settings - - # If in DEBUG mode, log all raw incoming events. - if settings.DEBUG: - logger.debug('Zappa Event: {}'.format(event)) - # Set any API Gateway defined Stage Variables - # as env vars - if event.get('stageVariables'): - for key in event['stageVariables'].keys(): - os.environ[str(key)] = event['stageVariables'][key] - - # This is the result of a keep alive, recertify - # or scheduled event. - if event.get('detail-type') == 'Scheduled Event': - - whole_function = event['resources'][0].split('/')[-1].split('-')[-1] - - # This is a scheduled function. - if '.' in whole_function: - app_function = self.import_module_and_get_function(whole_function) - - # Execute the function! - return self.run_function(app_function, event, context) - - # Else, let this execute as it were. - - # This is a direct command invocation. - elif event.get('command', None): - - whole_function = event['command'] - app_function = self.import_module_and_get_function(whole_function) - result = self.run_function(app_function, event, context) - print("Result of %s:" % whole_function) - print(result) - return result - - # This is a direct, raw python invocation. - # It's _extremely_ important we don't allow this event source - # to be overridden by unsanitized, non-admin user input. - elif event.get('raw_command', None): - - raw_command = event['raw_command'] - exec(raw_command) - return - - # This is a Django management command invocation. - elif event.get('manage', None): - - from django.core import management - - try: # Support both for tests - from zappa.ext.django_zappa import get_django_wsgi - except ImportError as e: # pragma: no cover - from django_zappa_app import get_django_wsgi - - # Get the Django WSGI app from our extension - # We don't actually need the function, - # but we do need to do all of the required setup for it. - app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) - - # Couldn't figure out how to get the value into stdout with StringIO.. - # Read the log for now. :[] - management.call_command(*event['manage'].split(' ')) - return {} - - # This is an AWS-event triggered invocation. - elif event.get('Records', None): - - records = event.get('Records') - result = None - #whole_function = self.get_function_for_aws_event(records[0]) - # that didn't work. Lets just get it from the the Zappa settings instead - whole_function = '{}.{}'.format(self.settings.APP_MODULE, self.settings.APP_FUNCTION) - if whole_function: - app_function = self.import_module_and_get_function(whole_function) - result = self.run_function(app_function, event, context) - logger.debug(result) - else: - logger.error("Cannot find a function to process the triggered event.") - return result - - # this is an AWS-event triggered from Lex bot's intent - elif event.get('bot'): - result = None - whole_function = self.get_function_from_bot_intent_trigger(event) - if whole_function: - app_function = self.import_module_and_get_function(whole_function) - result = self.run_function(app_function, event, context) - logger.debug(result) - else: - logger.error("Cannot find a function to process the triggered event.") - return result - - # This is an API Gateway authorizer event - elif event.get('type') == 'TOKEN': - whole_function = self.settings.AUTHORIZER_FUNCTION - if whole_function: - app_function = self.import_module_and_get_function(whole_function) - policy = self.run_function(app_function, event, context) - return policy - else: - logger.error("Cannot find a function to process the authorization request.") - raise Exception('Unauthorized') - - # This is an AWS Cognito Trigger Event - elif event.get('triggerSource', None): - triggerSource = event.get('triggerSource') - whole_function = self.get_function_for_cognito_trigger(triggerSource) - result = event - if whole_function: - app_function = self.import_module_and_get_function(whole_function) - result = self.run_function(app_function, event, context) - logger.debug(result) - else: - logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource)) - return result - - # This is a CloudWatch event - # Related: https://github.com/Miserlou/Zappa/issues/1924 - elif event.get('awslogs', None): - result = None - whole_function = '{}.{}'.format(settings.APP_MODULE, settings.APP_FUNCTION) - app_function = self.import_module_and_get_function(whole_function) - if app_function: - result = self.run_function(app_function, event, context) - logger.debug("Result of %s:" % whole_function) - logger.debug(result) - else: - logger.error("Cannot find a function to process the triggered event.") - return result - - # Normal web app flow - try: - # Timing - time_start = datetime.datetime.now() - - # This is a normal HTTP request - if event.get('httpMethod', None): - script_name = '' - is_elb_context = False - headers = merge_headers(event) - if event.get('requestContext', None) and event['requestContext'].get('elb', None): - # Related: https://github.com/Miserlou/Zappa/issues/1715 - # inputs/outputs for lambda loadbalancer - # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html - is_elb_context = True - # host is lower-case when forwarded from ELB - host = headers.get('host') - # TODO: pathParameters is a first-class citizen in apigateway but not available without - # some parsing work for ELB (is this parameter used for anything?) - event['pathParameters'] = '' - else: - if headers: - host = headers.get('Host') - else: - host = None - logger.debug('host found: [{}]'.format(host)) - - if host: - if 'amazonaws.com' in host: - logger.debug('amazonaws found in host') - # The path provided in th event doesn't include the - # stage, so we must tell Flask to include the API - # stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014 - script_name = '/' + settings.API_STAGE - else: - # This is a test request sent from the AWS console - if settings.DOMAIN: - # Assume the requests received will be on the specified - # domain. No special handling is required - pass - else: - # Assume the requests received will be to the - # amazonaws.com endpoint, so tell Flask to include the - # API stage - script_name = '/' + settings.API_STAGE - - base_path = getattr(settings, 'BASE_PATH', None) - - # Create the environment for WSGI and handle the request - environ = create_wsgi_request( - event, - script_name=script_name, - base_path=base_path, - trailing_slash=self.trailing_slash, - binary_support=settings.BINARY_SUPPORT, - context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS - ) - - # We are always on https on Lambda, so tell our wsgi app that. - environ['HTTPS'] = 'on' - environ['wsgi.url_scheme'] = 'https' - environ['lambda.context'] = context - environ['lambda.event'] = event - - # Execute the application - with Response.from_app(self.wsgi_app, environ) as response: - # This is the object we're going to return. - # Pack the WSGI response into our special dictionary. - zappa_returndict = dict() - - # Issue #1715: ALB support. ALB responses must always include - # base64 encoding and status description - if is_elb_context: - zappa_returndict.setdefault('isBase64Encoded', False) - zappa_returndict.setdefault('statusDescription', response.status) - - if response.data: - if settings.BINARY_SUPPORT and \ - not response.mimetype.startswith("text/") \ - and response.mimetype != "application/json": - zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8') - zappa_returndict["isBase64Encoded"] = True - else: - zappa_returndict['body'] = response.get_data(as_text=True) - - zappa_returndict['statusCode'] = response.status_code - if 'headers' in event: - zappa_returndict['headers'] = {} - for key, value in response.headers: - zappa_returndict['headers'][key] = value - if 'multiValueHeaders' in event: - zappa_returndict['multiValueHeaders'] = {} - for key, value in response.headers: - zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key) - - # Calculate the total response time, - # and log it in the Common Log format. - time_end = datetime.datetime.now() - delta = time_end - time_start - response_time_ms = delta.total_seconds() * 1000 - response.content = response.data - common_log(environ, response, response_time=response_time_ms) - - return zappa_returndict - except Exception as e: # pragma: no cover - # Print statements are visible in the logs either way - print(e) - exc_info = sys.exc_info() - message = ('An uncaught exception happened while servicing this request. ' - 'You can investigate this with the `zappa tail` command.') - - # If we didn't even build an app_module, just raise. - if not settings.DJANGO_SETTINGS: - try: - self.app_module - except NameError as ne: - message = 'Failed to import module: {}'.format(ne.message) - - # Call exception handler for unhandled exceptions - exception_handler = self.settings.EXCEPTION_HANDLER - self._process_exception(exception_handler=exception_handler, - event=event, context=context, exception=e) - - # Return this unspecified exception as a 500, using template that API Gateway expects. - content = collections.OrderedDict() - content['statusCode'] = 500 - body = {'message': message} - if settings.DEBUG: # only include traceback if debug is on. - body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability. - content['body'] = json.dumps(str(body), sort_keys=True, indent=4) - return content - - -def lambda_handler(event, context): # pragma: no cover - return LambdaHandler.lambda_handler(event, context) - - -def keep_warm_callback(event, context): - """Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true.""" - lambda_handler(event={}, context=context) # overriding event with an empty one so that web app initialization will - # be triggered. diff --git a/zappa/utilities.py b/zappa/utilities.py deleted file mode 100644 index 9b8818d98..000000000 --- a/zappa/utilities.py +++ /dev/null @@ -1,573 +0,0 @@ -import botocore -import calendar -import datetime -import durationpy -import fnmatch -import io -import json -import logging -import os -import re -import shutil -import stat -import sys - -from past.builtins import basestring - -from urllib.parse import urlparse - -LOG = logging.getLogger(__name__) - -## -# Settings / Packaging -## - -def copytree(src, dst, metadata=True, symlinks=False, ignore=None): - """ - This is a contributed re-implementation of 'copytree' that - should work with the exact same behavior on multiple platforms. - - When `metadata` is False, file metadata such as permissions and modification - times are not copied. - """ - - def copy_file(src, dst, item): - s = os.path.join(src, item) - d = os.path.join(dst, item) - - if symlinks and os.path.islink(s): # pragma: no cover - if os.path.lexists(d): - os.remove(d) - os.symlink(os.readlink(s), d) - if metadata: - try: - st = os.lstat(s) - mode = stat.S_IMODE(st.st_mode) - os.lchmod(d, mode) - except: - pass # lchmod not available - elif os.path.isdir(s): - copytree(s, d, metadata, symlinks, ignore) - else: - shutil.copy2(s, d) if metadata else shutil.copy(s, d) - try: - lst = os.listdir(src) - if not os.path.exists(dst): - os.makedirs(dst) - if metadata: - shutil.copystat(src, dst) - except NotADirectoryError: # egg-link files - copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src)) - return - - if ignore: - excl = ignore(src, lst) - lst = [x for x in lst if x not in excl] - - for item in lst: - copy_file(src, dst, item) - - -def parse_s3_url(url): - """ - Parses S3 URL. - - Returns bucket (domain) and file (full path). - """ - bucket = '' - path = '' - if url: - result = urlparse(url) - bucket = result.netloc - path = result.path.strip('/') - return bucket, path - -def human_size(num, suffix='B'): - """ - Convert bytes length to a human-readable version - """ - for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'): - if abs(num) < 1024.0: - return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix) - num /= 1024.0 - return "{0:.1f}{1!s}{2!s}".format(num, 'Yi', suffix) - -def string_to_timestamp(timestring): - """ - Accepts a str, returns an int timestamp. - """ - - ts = None - - # Uses an extended version of Go's duration string. - try: - delta = durationpy.from_str(timestring); - past = datetime.datetime.utcnow() - delta - ts = calendar.timegm(past.timetuple()) - return ts - except Exception as e: - pass - - if ts: - return ts - # else: - # print("Unable to parse timestring.") - return 0 - -## -# `init` related -## - -def detect_django_settings(): - """ - Automatically try to discover Django settings files, - return them as relative module paths. - """ - - matches = [] - for root, dirnames, filenames in os.walk(os.getcwd()): - for filename in fnmatch.filter(filenames, '*settings.py'): - full = os.path.join(root, filename) - if 'site-packages' in full: - continue - full = os.path.join(root, filename) - package_path = full.replace(os.getcwd(), '') - package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') - - matches.append(package_module) - return matches - -def detect_flask_apps(): - """ - Automatically try to discover Flask apps files, - return them as relative module paths. - """ - - matches = [] - for root, dirnames, filenames in os.walk(os.getcwd()): - for filename in fnmatch.filter(filenames, '*.py'): - full = os.path.join(root, filename) - if 'site-packages' in full: - continue - - full = os.path.join(root, filename) - - with io.open(full, 'r', encoding='utf-8') as f: - lines = f.readlines() - for line in lines: - app = None - - # Kind of janky.. - if '= Flask(' in line: - app = line.split('= Flask(')[0].strip() - if '=Flask(' in line: - app = line.split('=Flask(')[0].strip() - - if not app: - continue - - package_path = full.replace(os.getcwd(), '') - package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') - app_module = package_module + '.' + app - - matches.append(app_module) - - return matches - -def get_venv_from_python_version(): - return 'python{}.{}'.format(*sys.version_info) - -def get_runtime_from_python_version(): - """ - """ - if sys.version_info[0] < 3: - raise ValueError("Python 2.x is no longer supported.") - else: - if sys.version_info[1] <= 6: - return 'python3.6' - elif sys.version_info[1] <= 7: - return 'python3.7' - else: - return 'python3.8' - -## -# Async Tasks -## - -def get_topic_name(lambda_name): - """ Topic name generation """ - return '%s-zappa-async' % lambda_name - -## -# Event sources / Kappa -## - -def get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): - """ - - Given an event_source dictionary item, a session and a lambda_arn, - hack into Kappa's Gibson, create out an object we can call - to schedule this event, and return the event source. - - """ - import kappa.function - import kappa.restapi - import kappa.event_source.base - import kappa.event_source.dynamodb_stream - import kappa.event_source.kinesis - import kappa.event_source.s3 - import kappa.event_source.sns - import kappa.event_source.cloudwatch - import kappa.policy - import kappa.role - import kappa.awsclient - - class PseudoContext: - def __init__(self): - return - - class PseudoFunction: - def __init__(self): - return - - # Mostly adapted from kappa - will probably be replaced by kappa support - class SqsEventSource(kappa.event_source.base.EventSource): - - def __init__(self, context, config): - super().__init__(context, config) - self._lambda = kappa.awsclient.create_client( - 'lambda', context.session) - - def _get_uuid(self, function): - uuid = None - response = self._lambda.call( - 'list_event_source_mappings', - FunctionName=function.name, - EventSourceArn=self.arn) - LOG.debug(response) - if len(response['EventSourceMappings']) > 0: - uuid = response['EventSourceMappings'][0]['UUID'] - return uuid - - def add(self, function): - try: - response = self._lambda.call( - 'create_event_source_mapping', - FunctionName=function.name, - EventSourceArn=self.arn, - BatchSize=self.batch_size, - Enabled=self.enabled - ) - LOG.debug(response) - except Exception: - LOG.exception('Unable to add event source') - - def enable(self, function): - self._config['enabled'] = True - try: - response = self._lambda.call( - 'update_event_source_mapping', - UUID=self._get_uuid(function), - Enabled=self.enabled - ) - LOG.debug(response) - except Exception: - LOG.exception('Unable to enable event source') - - def disable(self, function): - self._config['enabled'] = False - try: - response = self._lambda.call( - 'update_event_source_mapping', - FunctionName=function.name, - Enabled=self.enabled - ) - LOG.debug(response) - except Exception: - LOG.exception('Unable to disable event source') - - def update(self, function): - response = None - uuid = self._get_uuid(function) - if uuid: - try: - response = self._lambda.call( - 'update_event_source_mapping', - BatchSize=self.batch_size, - Enabled=self.enabled, - FunctionName=function.arn) - LOG.debug(response) - except Exception: - LOG.exception('Unable to update event source') - - def remove(self, function): - response = None - uuid = self._get_uuid(function) - if uuid: - response = self._lambda.call( - 'delete_event_source_mapping', - UUID=uuid) - LOG.debug(response) - return response - - def status(self, function): - response = None - LOG.debug('getting status for event source %s', self.arn) - uuid = self._get_uuid(function) - if uuid: - try: - response = self._lambda.call( - 'get_event_source_mapping', - UUID=self._get_uuid(function)) - LOG.debug(response) - except botocore.exceptions.ClientError: - LOG.debug('event source %s does not exist', self.arn) - response = None - else: - LOG.debug('No UUID for event source %s', self.arn) - return response - - class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource): - @property - def filters(self): - return self._config.get('filters') - - def add_filters(self, function): - try: - subscription = self.exists(function) - if subscription: - response = self._sns.call( - 'set_subscription_attributes', - SubscriptionArn=subscription['SubscriptionArn'], - AttributeName='FilterPolicy', - AttributeValue=json.dumps(self.filters) - ) - kappa.event_source.sns.LOG.debug(response) - except Exception: - kappa.event_source.sns.LOG.exception('Unable to add filters for SNS topic %s', self.arn) - - def add(self, function): - super().add(function) - if self.filters: - self.add_filters(function) - - class ExtendedS3EventSource(kappa.event_source.s3.S3EventSource): - # until https://github.com/garnaat/kappa/pull/120 - def _make_notification_id(self, function_name=None): - import uuid - uid = str(uuid.uuid4()).split('-')[0] - return 'Kappa-%s-notification' % uid - - event_source_map = { - 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, - 'kinesis': kappa.event_source.kinesis.KinesisEventSource, - 's3': ExtendedS3EventSource, - 'sns': ExtendedSnsEventSource, - 'sqs': SqsEventSource, - 'events': kappa.event_source.cloudwatch.CloudWatchEventSource - } - - arn = event_source['arn'] - _, _, svc, _ = arn.split(':', 3) - - event_source_func = event_source_map.get(svc, None) - if not event_source_func: - raise ValueError('Unknown event source: {0}'.format(arn)) - - def autoreturn(self, function_name): - return function_name - - event_source_func._make_notification_id = autoreturn - - ctx = PseudoContext() - ctx.session = boto_session - - funk = PseudoFunction() - funk.name = lambda_arn - funk.arn = lambda_arn - - funk._context = ctx - - event_source_obj = event_source_func(ctx, event_source) - - return event_source_obj, ctx, funk - -def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): - """ - Given an event_source dictionary, create the object and add the event source. - """ - - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) - # TODO: Detect changes in config and refine exists algorithm - if not dry: - if not event_source_obj.status(funk): - event_source_obj.add(funk) - return 'successful' if event_source_obj.status(funk) else 'failed' - else: - return 'exists' - - return 'dryrun' - -def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): - """ - Given an event_source dictionary, create the object and remove the event source. - """ - - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) - - # This is slightly dirty, but necessary for using Kappa this way. - funk.arn = lambda_arn - if not dry: - rule_response = event_source_obj.remove(funk) - return rule_response - else: - return event_source_obj - -def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False): - """ - Given an event_source dictionary, create the object and get the event source status. - """ - - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) - return event_source_obj.status(funk) - -## -# Analytics / Surveillance / Nagging -## - -def check_new_version_available(this_version): - """ - Checks if a newer version of Zappa is available. - - Returns True is updateable, else False. - - """ - import requests - - pypi_url = 'https://pypi.org/pypi/Zappa/json' - resp = requests.get(pypi_url, timeout=1.5) - top_version = resp.json()['info']['version'] - - return this_version != top_version - - -class InvalidAwsLambdaName(Exception): - """Exception: proposed AWS Lambda name is invalid""" - pass - - -def validate_name(name, maxlen=80): - """Validate name for AWS Lambda function. - name: actual name (without `arn:aws:lambda:...:` prefix and without - `:$LATEST`, alias or version suffix. - maxlen: max allowed length for name without prefix and suffix. - - The value 80 was calculated from prefix with longest known region name - and assuming that no alias or version would be longer than `$LATEST`. - - Based on AWS Lambda spec - http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html - - Return: the name - Raise: InvalidAwsLambdaName, if the name is invalid. - """ - if not isinstance(name, basestring): - msg = "Name must be of type string" - raise InvalidAwsLambdaName(msg) - if len(name) > maxlen: - msg = "Name is longer than {maxlen} characters." - raise InvalidAwsLambdaName(msg.format(maxlen=maxlen)) - if len(name) == 0: - msg = "Name must not be empty string." - raise InvalidAwsLambdaName(msg) - if not re.match("^[a-zA-Z0-9-_]+$", name): - msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -" - raise InvalidAwsLambdaName(msg) - return name - - -def contains_python_files_or_subdirs(folder): - """ - Checks (recursively) if the directory contains .py or .pyc files - """ - for root, dirs, files in os.walk(folder): - if [filename for filename in files if filename.endswith('.py') or filename.endswith('.pyc')]: - return True - - for d in dirs: - for _, subdirs, subfiles in os.walk(d): - if [filename for filename in subfiles if filename.endswith('.py') or filename.endswith('.pyc')]: - return True - - return False - - -def conflicts_with_a_neighbouring_module(directory_path): - """ - Checks if a directory lies in the same directory as a .py file with the same name. - """ - parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path)) - neighbours = os.listdir(parent_dir_path) - conflicting_neighbour_filename = current_dir_name+'.py' - return conflicting_neighbour_filename in neighbours - - -# https://github.com/Miserlou/Zappa/issues/1188 -def titlecase_keys(d): - """ - Takes a dict with keys of type str and returns a new dict with all keys titlecased. - """ - return {k.title(): v for k, v in d.items()} - - -# https://github.com/Miserlou/Zappa/issues/1688 -def is_valid_bucket_name(name): - """ - Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules - """ - # Bucket names must be at least 3 and no more than 63 characters long. - if (len(name) < 3 or len(name) > 63): - return False - # Bucket names must not contain uppercase characters or underscores. - if (any(x.isupper() for x in name)): - return False - if "_" in name: - return False - # Bucket names must start with a lowercase letter or number. - if not (name[0].islower() or name[0].isdigit()): - return False - # Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). - for label in name.split("."): - # Each label must start and end with a lowercase letter or a number. - if len(label) < 1: - return False - if not (label[0].islower() or label[0].isdigit()): - return False - if not (label[-1].islower() or label[-1].isdigit()): - return False - # Bucket names must not be formatted as an IP address (for example, 192.168.5.4). - looks_like_IP = True - for label in name.split("."): - if not label.isdigit(): - looks_like_IP = False - break - if looks_like_IP: - return False - - return True - - -def merge_headers(event): - """ - Merge the values of headers and multiValueHeaders into a single dict. - Opens up support for multivalue headers via API Gateway and ALB. - See: https://github.com/Miserlou/Zappa/pull/1756 - """ - headers = event.get('headers') or {} - multi_headers = (event.get('multiValueHeaders') or {}).copy() - for h in set(headers.keys()): - if h not in multi_headers: - multi_headers[h] = [headers[h]] - for h in multi_headers.keys(): - multi_headers[h] = ', '.join(multi_headers[h]) - return multi_headers From 4f5279489a970c5ed9b905837501fff585cd2d90 Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Mon, 15 Jun 2020 17:23:53 -0500 Subject: [PATCH 19/27] merging --- tests/tests_placebo.py | 517 ++++++++ zappa/cli.py | 2799 ++++++++++++++++++++++++++++++++++++++++ zappa/handler.py | 615 +++++++++ zappa/utilities.py | 583 +++++++++ 4 files changed, 4514 insertions(+) create mode 100644 tests/tests_placebo.py create mode 100755 zappa/cli.py create mode 100644 zappa/handler.py create mode 100644 zappa/utilities.py diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py new file mode 100644 index 000000000..47e477173 --- /dev/null +++ b/tests/tests_placebo.py @@ -0,0 +1,517 @@ +# -*- coding: utf8 -*- +import mock +import os +import random +import string +import unittest + +from .utils import placebo_session + +from zappa.cli import ZappaCLI +from zappa.handler import LambdaHandler +from zappa.utilities import (add_event_source, remove_event_source) +from zappa.core import Zappa + + +def random_string(length): + return ''.join(random.choice(string.printable) for _ in range(length)) + + +class TestZappa(unittest.TestCase): + def setUp(self): + self.sleep_patch = mock.patch('time.sleep', return_value=None) + # Tests expect us-east-1. + # If the user has set a different region in env variables, we set it aside for now and use us-east-1 + self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None) + os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' + if not os.environ.get('PLACEBO_MODE') == 'record': + self.sleep_patch.start() + + def tearDown(self): + if not os.environ.get('PLACEBO_MODE') == 'record': + self.sleep_patch.stop() + del os.environ['AWS_DEFAULT_REGION'] + if self.users_current_region_name is not None: + # Give the user their AWS region back, we're done testing with us-east-1. + os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name + + @placebo_session + def test_upload_remove_s3(self, session): + bucket_name = 'test_zappa_upload_s3' + z = Zappa(session) + zip_path = z.create_lambda_zip(minify=False) + res = z.upload_to_s3(zip_path, bucket_name) + self.assertTrue(res) + s3 = session.resource('s3') + + # will throw ClientError with 404 if bucket doesn't exist + s3.meta.client.head_bucket(Bucket=bucket_name) + + # will throw ClientError with 404 if object doesn't exist + s3.meta.client.head_object( + Bucket=bucket_name, + Key=zip_path, + ) + res = z.remove_from_s3(zip_path, bucket_name) + self.assertTrue(res) + + fail = z.upload_to_s3('/tmp/this_isnt_real', bucket_name) + self.assertFalse(fail) + + #Will graciouly handle quirky S3 behavior on 'us-east-1' region name' + z.aws_region = 'us-east-1' + res = z.upload_to_s3(zip_path, bucket_name) + os.remove(zip_path) + self.assertTrue(res) + + @placebo_session + def test_copy_on_s3(self, session): + bucket_name = 'test_zappa_upload_s3' + z = Zappa(session) + zip_path = z.create_lambda_zip(minify=False) + res = z.upload_to_s3(zip_path, bucket_name) + self.assertTrue(res) + s3 = session.resource('s3') + + # will throw ClientError with 404 if bucket doesn't exist + s3.meta.client.head_bucket(Bucket=bucket_name) + + # will throw ClientError with 404 if object doesn't exist + s3.meta.client.head_object( + Bucket=bucket_name, + Key=zip_path, + ) + zp = 'copy_' + zip_path + res = z.copy_on_s3(zip_path, zp, bucket_name) + os.remove(zip_path) + self.assertTrue(res) + + @placebo_session + def test_create_lambda_function_s3(self, session): + bucket_name = 'lmbda' + zip_path = 'Spheres-dev-1454694878.zip' + + z = Zappa(session) + z.aws_region = 'us-east-1' + z.load_credentials(session) + z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' + + arn = z.create_lambda_function( + bucket=bucket_name, + s3_key=zip_path, + function_name='test_lmbda_function55', + handler='runme.lambda_handler' + ) + + arn = z.update_lambda_function( + bucket=bucket_name, + s3_key=zip_path, + function_name='test_lmbda_function55', + ) + + @placebo_session + def test_create_lambda_function_local(self, session): + bucket_name = 'lmbda' + local_file = 'Spheres-dev-1454694878.zip' + + z = Zappa(session) + z.aws_region = 'us-east-1' + z.load_credentials(session) + z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' + + arn = z.create_lambda_function( + bucket=bucket_name, + local_zip=local_file, + function_name='test_lmbda_function55', + handler='runme.lambda_handler' + ) + + arn = z.update_lambda_function( + bucket=bucket_name, + local_zip=local_file, + function_name='test_lmbda_function55', + ) + + @placebo_session + def test_rollback_lambda_function_version(self, session): + z = Zappa(session) + z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' + + function_name = 'django-helloworld-unicode' + too_many_versions = z.rollback_lambda_function_version(function_name, 99999) + self.assertFalse(too_many_versions) + + function_arn = z.rollback_lambda_function_version(function_name, 1) + + @placebo_session + def test_invoke_lambda_function(self, session): + z = Zappa(session) + z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' + + function_name = 'django-helloworld-unicode' + payload = '{"event": "hello"}' + response = z.invoke_lambda_function(function_name, payload) + + @placebo_session + def test_create_iam_roles(self, session): + z = Zappa(session) + arn, updated = z.create_iam_roles() + self.assertEqual(arn, "arn:aws:iam::123:role/{}".format(z.role_name)) + + @placebo_session + def test_get_api_url(self, session): + z = Zappa(session) + z.credentials_arn = 'arn:aws:iam::724336686645:role/ZappaLambdaExecution' + url = z.get_api_url('Spheres-demonstration', 'demonstration') + + @placebo_session + def test_fetch_logs(self, session): + z = Zappa(session) + z.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' + events = z.fetch_logs('Spheres-demonstration') + self.assertTrue(events is not None) + + ## + # Handler + ## + + @placebo_session + def test_handler(self, session): + # Init will test load_remote_settings + lh = LambdaHandler('test_settings', session=session) + + # Annoyingly, this will fail during record, but + # the result will actually be okay to use in playback. + # See: https://github.com/garnaat/placebo/issues/48 + self.assertEqual(os.environ['hello'], 'world') + + event = { + "body": {}, + "headers": {}, + "params": { + "parameter_1": "asdf1", + "parameter_2": "asdf2", + }, + "method": "GET", + "query": {} + } + lh.handler(event, None) + + # Test scheduled event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'detail-type': 'Scheduled Event', + 'source': 'aws.events', + 'version': '0', + 'time': '2016-05-10T21:05:39Z', + 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', + 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] + } + lh.handler(event, None) + + # Test command event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'command': 'test_settings.command', + 'source': 'aws.events', + 'version': '0', + 'time': '2016-05-10T21:05:39Z', + 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', + 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] + } + lh.handler(event, None) + + # Test command for async event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'command': 'zappa.asynchronous.route_lambda_task', + 'task_path': 'tests.test_app.async_me', + 'args': ['xxx'], + 'kwargs': {}, + 'source': 'aws.events', + 'version': '0', + 'time': '2016-05-10T21:05:39Z', + 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', + } + self.assertEqual('run async when on lambda xxx', lh.handler(event, None)) + event['kwargs'] = {'foo': 'bar'} + self.assertEqual('run async when on lambda xxxbar', lh.handler(event, None)) + + # Test raw_command event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'raw_command': 'print("check one two")', + 'source': 'aws.events', + 'version': '0', + 'time': '2016-05-10T21:05:39Z', + 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', + 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] + } + lh.handler(event, None) + + # Test AWS S3 event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'Records': [{'s3': {'configurationId': 'test_project:test_settings.aws_s3_event'}}], + 'source': 'aws.events', + 'version': '0', + 'time': '2016-05-10T21:05:39Z', + 'id': '0d6a6db0-d5e7-4755-93a0-750a8bf49d55', + 'resources': ['arn:aws:events:us-east-1:72333333333:rule/tests.test_app.schedule_me'] + } + self.assertEqual("AWS S3 EVENT", lh.handler(event, None)) + + # Test AWS SNS event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'Records': [ + { + 'EventVersion': '1.0', + 'EventSource': 'aws:sns', + 'EventSubscriptionArn': 'arn:aws:sns:EXAMPLE', + 'Sns': { + 'SignatureVersion': '1', + 'Timestamp': '1970-01-01T00:00:00.000Z', + 'Signature': 'EXAMPLE', + 'SigningCertUrl': 'EXAMPLE', + 'MessageId': '95df01b4-ee98-5cb9-9903-4c221d41eb5e', + 'Message': 'Hello from SNS!', + 'Subject': 'TestInvoke', + 'Type': 'Notification', + 'UnsubscribeUrl': 'EXAMPLE', + 'TopicArn': 'arn:aws:sns:1', + 'MessageAttributes': { + 'Test': {'Type': 'String', 'Value': 'TestString'}, + 'TestBinary': {'Type': 'Binary', 'Value': 'TestBinary'} + } + } + } + ] + } + self.assertEqual("AWS SNS EVENT", lh.handler(event, None)) + + # Test AWS SNS event + event = { + 'account': '72333333333', + 'region': 'us-east-1', + 'detail': {}, + 'Records': [ + { + 'EventVersion': '1.0', + 'EventSource': 'aws:sns', + 'EventSubscriptionArn': 'arn:aws:sns:EXAMPLE', + 'Sns': { + 'SignatureVersion': '1', + 'Timestamp': '1970-01-01T00:00:00.000Z', + 'Signature': 'EXAMPLE', + 'SigningCertUrl': 'EXAMPLE', + 'MessageId': '95df01b4-ee98-5cb9-9903-4c221d41eb5e', + 'Message': '{"args": ["arg1", "arg2"], "command": "zappa.asynchronous.route_sns_task", ' + '"task_path": "test_settings.aws_async_sns_event", "kwargs": {"arg3": "varg3"}}', + 'Subject': 'TestInvoke', + 'Type': 'Notification', + 'UnsubscribeUrl': 'EXAMPLE', + 'MessageAttributes': { + 'Test': {'Type': 'String', 'Value': 'TestString'}, + 'TestBinary': {'Type': 'Binary', 'Value': 'TestBinary'} + } + } + } + ] + } + self.assertEqual("AWS ASYNC SNS EVENT", lh.handler(event, None)) + + # Test AWS DynamoDB event + event = { + 'Records': [ + { + 'eventID': '1', + 'eventVersion': '1.0', + 'dynamodb': { + 'Keys': {'Id': {'N': '101'}}, + 'NewImage': {'Message': {'S': 'New item!'}, 'Id': {'N': '101'}}, + 'StreamViewType': 'NEW_AND_OLD_IMAGES', + 'SequenceNumber': '111', 'SizeBytes': 26 + }, + 'awsRegion': 'us-west-2', + 'eventName': 'INSERT', + 'eventSourceARN': 'arn:aws:dynamodb:1', + 'eventSource': 'aws:dynamodb' + } + ] + } + self.assertEqual("AWS DYNAMODB EVENT", lh.handler(event, None)) + + # Test AWS kinesis event + event = { + 'Records': [ + { + 'eventID': 'shardId-000000000000:49545115243490985018280067714973144582180062593244200961', + 'eventVersion': '1.0', + 'kinesis': { + 'partitionKey': 'partitionKey-3', + 'data': 'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=', + 'kinesisSchemaVersion': '1.0', + 'sequenceNumber': '49545115243490985018280067714973144582180062593244200961' + }, + 'invokeIdentityArn': 'arn:aws:iam::EXAMPLE', + 'eventName': 'aws:kinesis:record', + 'eventSourceARN': 'arn:aws:kinesis:1', + 'eventSource': 'aws:kinesis', + 'awsRegion': 'us-east-1' + } + ] + } + self.assertEqual("AWS KINESIS EVENT", lh.handler(event, None)) + + # Test AWS SQS event + event = { + "Records": [ + { + "messageId": "c80e8021-a70a-42c7-a470-796e1186f753", + "receiptHandle": "AQEBJQ+/u6NsnT5t8Q/VbVxgdUl4TMKZ5FqhksRdIQvLBhwNvADoBxYSOVeCBXdnS9P+erlTtwEALHsnBXynkfPLH3BOUqmgzP25U8kl8eHzq6RAlzrSOfTO8ox9dcp6GLmW33YjO3zkq5VRYyQlJgLCiAZUpY2D4UQcE5D1Vm8RoKfbE+xtVaOctYeINjaQJ1u3mWx9T7tork3uAlOe1uyFjCWU5aPX/1OHhWCGi2EPPZj6vchNqDOJC/Y2k1gkivqCjz1CZl6FlZ7UVPOx3AMoszPuOYZ+Nuqpx2uCE2MHTtMHD8PVjlsWirt56oUr6JPp9aRGo6bitPIOmi4dX0FmuMKD6u/JnuZCp+AXtJVTmSHS8IXt/twsKU7A+fiMK01NtD5msNgVPoe9JbFtlGwvTQ==", + "body": "{\"foo\":\"bar\"}", + "attributes": { + "ApproximateReceiveCount": "3", + "SentTimestamp": "1529104986221", + "SenderId": "594035263019", + "ApproximateFirstReceiveTimestamp": "1529104986230" + }, + "messageAttributes": {}, + "md5OfBody": "9bb58f26192e4ba00f01e2e7b136bbd8", + "eventSource": "aws:sqs", + "eventSourceARN": "arn:aws:sqs:1", + "awsRegion": "us-east-1" + } + ] + } + self.assertEqual("AWS SQS EVENT", lh.handler(event, None)) + + # Test Authorizer event + event = {'authorizationToken': 'hubtoken1', 'methodArn': 'arn:aws:execute-api:us-west-2:1234:xxxxx/dev/GET/v1/endpoint/param', 'type': 'TOKEN'} + self.assertEqual("AUTHORIZER_EVENT", lh.handler(event, None)) + + # Ensure Zappa does return 401 if no function was defined. + lh.settings.AUTHORIZER_FUNCTION = None + with self.assertRaisesRegexp(Exception, 'Unauthorized'): + lh.handler(event, None) + + # Unhandled event + event = { + 'Records': [ + { + 'eventID': 'shardId-000000000000:49545115243490985018280067714973144582180062593244200961', + 'eventVersion': '1.0', + 'kinesis': { + 'partitionKey': 'partitionKey-3', + 'data': 'SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=', + 'kinesisSchemaVersion': '1.0', + 'sequenceNumber': '49545115243490985018280067714973144582180062593244200961' + }, + 'eventSourceARN': 'bad:arn:1', + } + ] + } + self.assertIsNone(lh.handler(event, None)) + + ## + # CLI + ## + + @placebo_session + def test_cli_aws(self, session): + zappa_cli = ZappaCLI() + zappa_cli.api_stage = 'ttt888' + zappa_cli.api_key_required = True + zappa_cli.authorization_type = 'NONE' + zappa_cli.load_settings('test_settings.json', session) + zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' + zappa_cli.deploy() + zappa_cli.update() + zappa_cli.rollback(1) + zappa_cli.tail(since=0, filter_pattern='', keep_open=False) + zappa_cli.schedule() + zappa_cli.unschedule() + zappa_cli.undeploy(no_confirm=True, remove_logs=True) + + @placebo_session + def test_cli_aws_status(self, session): + zappa_cli = ZappaCLI() + zappa_cli.api_stage = 'ttt888' + zappa_cli.load_settings('test_settings.json', session) + zappa_cli.api_stage = 'devor' + zappa_cli.lambda_name = 'baby-flask-devor' + zappa_cli.zappa.credentials_arn = 'arn:aws:iam::12345:role/ZappaLambdaExecution' + resp = zappa_cli.status() + + ## + # Let's Encrypt / ACME + ## + + ## + # Django + ## + + ## + # Util / Misc + ## + + @placebo_session + def test_add_event_source(self, session): + + event_source = {'arn': 'blah:blah:blah:blah', 'events': [ + "s3:ObjectCreated:*" + ]} + # Sanity. This should fail. + try: + es = add_event_source(event_source, 'blah:blah:blah:blah', 'test_settings.callback', session) + self.fail("Success should have failed.") + except ValueError: + pass + + event_source = {'arn': 's3:s3:s3:s3', 'events': [ + "s3:ObjectCreated:*" + ]} + add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + + @placebo_session + def test_cognito_trigger(self, session): + z = Zappa(session) + z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') + + @placebo_session + def test_cognito_trigger_existing(self, session): + z = Zappa(session) + z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') + + @placebo_session + def test_cli_cognito_triggers(self, session): + zappa_cli = ZappaCLI() + zappa_cli.api_stage = 'ttt888' + zappa_cli.api_key_required = True + zappa_cli.load_settings('test_settings.json', session) + zappa_cli.lambda_arn = 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test' + zappa_cli.update_cognito_triggers() + + @placebo_session + def test_cognito_trigger_existing_UnusedAccountValidityDays(self, session): + z = Zappa(session) + z.update_cognito('Zappa-Trigger-Test', 'us-east-1_9jUv74DH8', {'PreSignUp': 'test.tasks.pre_signup'}, 'arn:aws:lambda:us-east-1:12345:function:Zappa-Trigger-Test') + + +if __name__ == '__main__': + unittest.main() diff --git a/zappa/cli.py b/zappa/cli.py new file mode 100755 index 000000000..2820eacba --- /dev/null +++ b/zappa/cli.py @@ -0,0 +1,2799 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Zappa CLI + +Deploy arbitrary Python programs as serverless Zappa applications. + +""" +from past.builtins import basestring +from builtins import input, bytes + +import argcomplete +import argparse +import base64 +import pkgutil +import botocore +import click +import collections +import hjson as json +import inspect +import importlib +import logging +import os +import pkg_resources +import random +import re +import requests +import slugify +import string +import sys +import tempfile +import time +import toml +import yaml +import zipfile + +from click import Context, BaseCommand +from click.exceptions import ClickException +from click.globals import push_context +from dateutil import parser +from datetime import datetime, timedelta + +from .core import Zappa, logger, API_GATEWAY_REGIONS +from .utilities import (check_new_version_available, detect_django_settings, + detect_flask_apps, parse_s3_url, human_size, + validate_name, InvalidAwsLambdaName, get_venv_from_python_version, + get_runtime_from_python_version, string_to_timestamp, is_valid_bucket_name) + + +CUSTOM_SETTINGS = [ + 'apigateway_policy', + 'assume_policy', + 'attach_policy', + 'aws_region', + 'delete_local_zip', + 'delete_s3_zip', + 'exclude', + 'exclude_glob', + 'extra_permissions', + 'include', + 'role_name', + 'touch', +] + +BOTO3_CONFIG_DOCS_URL = 'https://boto3.readthedocs.io/en/latest/guide/quickstart.html#configuration' + +## +# Main Input Processing +## + +class ZappaCLI: + """ + ZappaCLI object is responsible for loading the settings, + handling the input arguments and executing the calls to the core library. + + """ + + # CLI + vargs = None + command = None + stage_env = None + + # Zappa settings + zappa = None + zappa_settings = None + load_credentials = True + disable_progress = False + + # Specific settings + api_stage = None + app_function = None + aws_region = None + debug = None + prebuild_script = None + project_name = None + profile_name = None + lambda_arn = None + lambda_name = None + lambda_description = None + lambda_concurrency = None + s3_bucket_name = None + settings_file = None + zip_path = None + handler_path = None + vpc_config = None + memory_size = None + use_apigateway = None + lambda_handler = None + django_settings = None + manage_roles = True + exception_handler = None + environment_variables = None + authorizer = None + xray_tracing = False + aws_kms_key_arn = '' + context_header_mappings = None + tags = [] + layers = None + + stage_name_env_pattern = re.compile('^[a-zA-Z0-9_]+$') + + def __init__(self): + self._stage_config_overrides = {} # change using self.override_stage_config_setting(key, val) + + @property + def stage_config(self): + """ + A shortcut property for settings of a stage. + """ + + def get_stage_setting(stage, extended_stages=None): + if extended_stages is None: + extended_stages = [] + + if stage in extended_stages: + raise RuntimeError(stage + " has already been extended to these settings. " + "There is a circular extends within the settings file.") + extended_stages.append(stage) + + try: + stage_settings = dict(self.zappa_settings[stage].copy()) + except KeyError: + raise ClickException("Cannot extend settings for undefined stage '" + stage + "'.") + + extends_stage = self.zappa_settings[stage].get('extends', None) + if not extends_stage: + return stage_settings + extended_settings = get_stage_setting(stage=extends_stage, extended_stages=extended_stages) + extended_settings.update(stage_settings) + return extended_settings + + settings = get_stage_setting(stage=self.api_stage) + + # Backwards compatible for delete_zip setting that was more explicitly named delete_local_zip + if 'delete_zip' in settings: + settings['delete_local_zip'] = settings.get('delete_zip') + + settings.update(self.stage_config_overrides) + + return settings + + @property + def stage_config_overrides(self): + """ + Returns zappa_settings we forcefully override for the current stage + set by `self.override_stage_config_setting(key, value)` + """ + return getattr(self, '_stage_config_overrides', {}).get(self.api_stage, {}) + + def override_stage_config_setting(self, key, val): + """ + Forcefully override a setting set by zappa_settings (for the current stage only) + :param key: settings key + :param val: value + """ + self._stage_config_overrides = getattr(self, '_stage_config_overrides', {}) + self._stage_config_overrides.setdefault(self.api_stage, {})[key] = val + + def handle(self, argv=None): + """ + Main function. + + Parses command, load settings and dispatches accordingly. + + """ + + desc = ('Zappa - Deploy Python applications to AWS Lambda' + ' and API Gateway.\n') + parser = argparse.ArgumentParser(description=desc) + parser.add_argument( + '-v', '--version', action='version', + version=pkg_resources.get_distribution("zappa").version, + help='Print the zappa version' + ) + parser.add_argument( + '--color', default='auto', choices=['auto','never','always'] + ) + + env_parser = argparse.ArgumentParser(add_help=False) + me_group = env_parser.add_mutually_exclusive_group() + all_help = ('Execute this command for all of our defined ' + 'Zappa stages.') + me_group.add_argument('--all', action='store_true', help=all_help) + me_group.add_argument('stage_env', nargs='?') + + group = env_parser.add_argument_group() + group.add_argument( + '-a', '--app_function', help='The WSGI application function.' + ) + group.add_argument( + '-s', '--settings_file', help='The path to a Zappa settings file.' + ) + group.add_argument( + '-q', '--quiet', action='store_true', help='Silence all output.' + ) + # https://github.com/Miserlou/Zappa/issues/407 + # Moved when 'template' command added. + # Fuck Terraform. + group.add_argument( + '-j', '--json', action='store_true', help='Make the output of this command be machine readable.' + ) + # https://github.com/Miserlou/Zappa/issues/891 + group.add_argument( + '--disable_progress', action='store_true', help='Disable progress bars.' + ) + group.add_argument( + "--no_venv", action="store_true", help="Skip venv check." + ) + + ## + # Certify + ## + subparsers = parser.add_subparsers(title='subcommands', dest='command') + cert_parser = subparsers.add_parser( + 'certify', parents=[env_parser], + help='Create and install SSL certificate' + ) + cert_parser.add_argument( + '--manual', action='store_true', + help=("Gets new Let's Encrypt certificates, but prints them to console." + "Does not update API Gateway domains.") + ) + cert_parser.add_argument( + '-y', '--yes', action='store_true', help='Auto confirm yes.' + ) + + ## + # Deploy + ## + deploy_parser = subparsers.add_parser( + 'deploy', parents=[env_parser], help='Deploy application.' + ) + deploy_parser.add_argument( + '-z', '--zip', help='Deploy Lambda with specific local or S3 hosted zip package' + ) + + ## + # Init + ## + init_parser = subparsers.add_parser('init', help='Initialize Zappa app.') + + ## + # Package + ## + package_parser = subparsers.add_parser( + 'package', parents=[env_parser], help='Build the application zip package locally.' + ) + package_parser.add_argument( + '-o', '--output', help='Name of file to output the package to.' + ) + + ## + # Template + ## + template_parser = subparsers.add_parser( + 'template', parents=[env_parser], help='Create a CloudFormation template for this API Gateway.' + ) + template_parser.add_argument( + '-l', '--lambda-arn', required=True, help='ARN of the Lambda function to template to.' + ) + template_parser.add_argument( + '-r', '--role-arn', required=True, help='ARN of the Role to template with.' + ) + template_parser.add_argument( + '-o', '--output', help='Name of file to output the template to.' + ) + + ## + # Invocation + ## + invoke_parser = subparsers.add_parser( + 'invoke', parents=[env_parser], + help='Invoke remote function.' + ) + invoke_parser.add_argument( + '--raw', action='store_true', + help=('When invoking remotely, invoke this python as a string,' + ' not as a modular path.') + ) + invoke_parser.add_argument( + '--no-color', action='store_true', + help=("Don't color the output") + ) + invoke_parser.add_argument('command_rest') + + ## + # Manage + ## + manage_parser = subparsers.add_parser( + 'manage', + help='Invoke remote Django manage.py commands.' + ) + rest_help = ("Command in the form of . is not " + "required if --all is specified") + manage_parser.add_argument('--all', action='store_true', help=all_help) + manage_parser.add_argument('command_rest', nargs='+', help=rest_help) + manage_parser.add_argument( + '--no-color', action='store_true', + help=("Don't color the output") + ) + # This is explicitly added here because this is the only subcommand that doesn't inherit from env_parser + # https://github.com/Miserlou/Zappa/issues/1002 + manage_parser.add_argument( + '-s', '--settings_file', help='The path to a Zappa settings file.' + ) + + ## + # Rollback + ## + def positive_int(s): + """ Ensure an arg is positive """ + i = int(s) + if i < 0: + msg = "This argument must be positive (got {})".format(s) + raise argparse.ArgumentTypeError(msg) + return i + + rollback_parser = subparsers.add_parser( + 'rollback', parents=[env_parser], + help='Rollback deployed code to a previous version.' + ) + rollback_parser.add_argument( + '-n', '--num-rollback', type=positive_int, default=1, + help='The number of versions to rollback.' + ) + + ## + # Scheduling + ## + subparsers.add_parser( + 'schedule', parents=[env_parser], + help='Schedule functions to occur at regular intervals.' + ) + + ## + # Status + ## + subparsers.add_parser( + 'status', parents=[env_parser], + help='Show deployment status and event schedules.' + ) + + ## + # Log Tailing + ## + tail_parser = subparsers.add_parser( + 'tail', parents=[env_parser], help='Tail deployment logs.' + ) + tail_parser.add_argument( + '--no-color', action='store_true', + help="Don't color log tail output." + ) + tail_parser.add_argument( + '--http', action='store_true', + help='Only show HTTP requests in tail output.' + ) + tail_parser.add_argument( + '--non-http', action='store_true', + help='Only show non-HTTP requests in tail output.' + ) + tail_parser.add_argument( + '--since', type=str, default="100000s", + help="Only show lines since a certain timeframe." + ) + tail_parser.add_argument( + '--filter', type=str, default="", + help="Apply a filter pattern to the logs." + ) + tail_parser.add_argument( + '--force-color', action='store_true', + help='Force coloring log tail output even if coloring support is not auto-detected. (example: piping)' + ) + tail_parser.add_argument( + '--disable-keep-open', action='store_true', + help="Exit after printing the last available log, rather than keeping the log open." + ) + + ## + # Undeploy + ## + undeploy_parser = subparsers.add_parser( + 'undeploy', parents=[env_parser], help='Undeploy application.' + ) + undeploy_parser.add_argument( + '--remove-logs', action='store_true', + help=('Removes log groups of api gateway and lambda task' + ' during the undeployment.'), + ) + undeploy_parser.add_argument( + '-y', '--yes', action='store_true', help='Auto confirm yes.' + ) + + ## + # Unschedule + ## + subparsers.add_parser('unschedule', parents=[env_parser], + help='Unschedule functions.') + + ## + # Updating + ## + update_parser = subparsers.add_parser( + 'update', parents=[env_parser], help='Update deployed application.' + ) + update_parser.add_argument( + '-z', '--zip', help='Update Lambda with specific local or S3 hosted zip package' + ) + update_parser.add_argument( + '-n', '--no-upload', help="Update configuration where appropriate, but don't upload new code" + ) + + ## + # Debug + ## + subparsers.add_parser( + 'shell', parents=[env_parser], help='A debug shell with a loaded Zappa object.' + ) + + argcomplete.autocomplete(parser) + args = parser.parse_args(argv) + self.vargs = vars(args) + + if args.color == 'never': + disable_click_colors() + elif args.color == 'always': + #TODO: Support aggressive coloring like "--force-color" on all commands + pass + elif args.color == 'auto': + pass + + # Parse the input + # NOTE(rmoe): Special case for manage command + # The manage command can't have both stage_env and command_rest + # arguments. Since they are both positional arguments argparse can't + # differentiate the two. This causes problems when used with --all. + # (e.g. "manage --all showmigrations admin" argparse thinks --all has + # been specified AND that stage_env='showmigrations') + # By having command_rest collect everything but --all we can split it + # apart here instead of relying on argparse. + if not args.command: + parser.print_help() + return + + if args.command == 'manage' and not self.vargs.get('all'): + self.stage_env = self.vargs['command_rest'].pop(0) + else: + self.stage_env = self.vargs.get('stage_env') + + if args.command == 'package': + self.load_credentials = False + + self.command = args.command + + self.disable_progress = self.vargs.get('disable_progress') + if self.vargs.get('quiet'): + self.silence() + + # We don't have any settings yet, so make those first! + # (Settings-based interactions will fail + # before a project has been initialized.) + if self.command == 'init': + self.init() + return + + # Make sure there isn't a new version available + if not self.vargs.get('json'): + self.check_for_update() + + # Load and Validate Settings File + self.load_settings_file(self.vargs.get('settings_file')) + + # Should we execute this for all stages, or just one? + all_stages = self.vargs.get('all') + stages = [] + + if all_stages: # All stages! + stages = self.zappa_settings.keys() + else: # Just one env. + if not self.stage_env: + # If there's only one stage defined in the settings, + # use that as the default. + if len(self.zappa_settings.keys()) == 1: + stages.append(list(self.zappa_settings.keys())[0]) + else: + parser.error("Please supply a stage to interact with.") + else: + stages.append(self.stage_env) + + for stage in stages: + try: + self.dispatch_command(self.command, stage) + except ClickException as e: + # Discussion on exit codes: https://github.com/Miserlou/Zappa/issues/407 + e.show() + sys.exit(e.exit_code) + + def dispatch_command(self, command, stage): + """ + Given a command to execute and stage, + execute that command. + """ + + self.api_stage = stage + + if command not in ['status', 'manage']: + if not self.vargs.get('json', None): + click.echo("Calling " + click.style(command, fg="green", bold=True) + " for stage " + + click.style(self.api_stage, bold=True) + ".." ) + + # Explicitly define the app function. + # Related: https://github.com/Miserlou/Zappa/issues/832 + if self.vargs.get('app_function', None): + self.app_function = self.vargs['app_function'] + + # Load our settings, based on api_stage. + try: + self.load_settings(self.vargs.get('settings_file')) + except ValueError as e: + if hasattr(e, 'message'): + print("Error: {}".format(e.message)) + else: + print(str(e)) + sys.exit(-1) + self.callback('settings') + + # Hand it off + if command == 'deploy': # pragma: no cover + self.deploy(self.vargs['zip']) + if command == 'package': # pragma: no cover + self.package(self.vargs['output']) + if command == 'template': # pragma: no cover + self.template( self.vargs['lambda_arn'], + self.vargs['role_arn'], + output=self.vargs['output'], + json=self.vargs['json'] + ) + elif command == 'update': # pragma: no cover + self.update(self.vargs['zip'], self.vargs['no_upload']) + elif command == 'rollback': # pragma: no cover + self.rollback(self.vargs['num_rollback']) + elif command == 'invoke': # pragma: no cover + + if not self.vargs.get('command_rest'): + print("Please enter the function to invoke.") + return + + self.invoke( + self.vargs['command_rest'], + raw_python=self.vargs['raw'], + no_color=self.vargs['no_color'], + ) + elif command == 'manage': # pragma: no cover + + if not self.vargs.get('command_rest'): + print("Please enter the management command to invoke.") + return + + if not self.django_settings: + print("This command is for Django projects only!") + print("If this is a Django project, please define django_settings in your zappa_settings.") + return + + command_tail = self.vargs.get('command_rest') + if len(command_tail) > 1: + command = " ".join(command_tail) # ex: zappa manage dev "shell --version" + else: + command = command_tail[0] # ex: zappa manage dev showmigrations admin + + self.invoke( + command, + command="manage", + no_color=self.vargs['no_color'], + ) + + elif command == 'tail': # pragma: no cover + self.tail( + colorize=(not self.vargs['no_color']), + http=self.vargs['http'], + non_http=self.vargs['non_http'], + since=self.vargs['since'], + filter_pattern=self.vargs['filter'], + force_colorize=self.vargs['force_color'] or None, + keep_open=not self.vargs['disable_keep_open'] + ) + elif command == 'undeploy': # pragma: no cover + self.undeploy( + no_confirm=self.vargs['yes'], + remove_logs=self.vargs['remove_logs'] + ) + elif command == 'schedule': # pragma: no cover + self.schedule() + elif command == 'unschedule': # pragma: no cover + self.unschedule() + elif command == 'status': # pragma: no cover + self.status(return_json=self.vargs['json']) + elif command == 'certify': # pragma: no cover + self.certify( + no_confirm=self.vargs['yes'], + manual=self.vargs['manual'] + ) + elif command == 'shell': # pragma: no cover + self.shell() + + ## + # The Commands + ## + + def package(self, output=None): + """ + Only build the package + """ + # Make sure we're in a venv. + self.check_venv() + + # force not to delete the local zip + self.override_stage_config_setting('delete_local_zip', False) + # Execute the prebuild script + if self.prebuild_script: + self.execute_prebuild_script() + # Create the Lambda Zip + self.create_package(output) + self.callback('zip') + size = human_size(os.path.getsize(self.zip_path)) + click.echo(click.style("Package created", fg="green", bold=True) + ": " + click.style(self.zip_path, bold=True) + " (" + size + ")") + + def template(self, lambda_arn, role_arn, output=None, json=False): + """ + Only build the template file. + """ + + if not lambda_arn: + raise ClickException("Lambda ARN is required to template.") + + if not role_arn: + raise ClickException("Role ARN is required to template.") + + self.zappa.credentials_arn = role_arn + + # Create the template! + template = self.zappa.create_stack_template( + lambda_arn=lambda_arn, + lambda_name=self.lambda_name, + api_key_required=self.api_key_required, + iam_authorization=self.iam_authorization, + authorizer=self.authorizer, + cors_options=self.cors, + description=self.apigateway_description, + endpoint_configuration=self.endpoint_configuration + ) + + if not output: + template_file = self.lambda_name + '-template-' + str(int(time.time())) + '.json' + else: + template_file = output + with open(template_file, 'wb') as out: + out.write(bytes(template.to_json(indent=None, separators=(',',':')), "utf-8")) + + if not json: + click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True)) + else: + with open(template_file, 'r') as out: + print(out.read()) + + def deploy(self, source_zip=None): + """ + Package your project, upload it to S3, register the Lambda function + and create the API Gateway routes. + + """ + + if not source_zip: + # Make sure we're in a venv. + self.check_venv() + + # Execute the prebuild script + if self.prebuild_script: + self.execute_prebuild_script() + + # Make sure this isn't already deployed. + deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name) + if len(deployed_versions) > 0: + raise ClickException("This application is " + click.style("already deployed", fg="red") + + " - did you mean to call " + click.style("update", bold=True) + "?") + + # Make sure the necessary IAM execution roles are available + if self.manage_roles: + try: + self.zappa.create_iam_roles() + except botocore.client.ClientError as ce: + raise ClickException( + click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!\n" + + "You may " + click.style("lack the necessary AWS permissions", bold=True) + + " to automatically manage a Zappa execution role.\n" + + click.style("Exception reported by AWS:", bold=True) + format(ce) + '\n' + + "To fix this, see here: " + + click.style( + "https://github.com/Miserlou/Zappa#custom-aws-iam-roles-and-policies-for-deployment", + bold=True) + + '\n') + + # Create the Lambda Zip + self.create_package() + self.callback('zip') + + # Upload it to S3 + success = self.zappa.upload_to_s3( + self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress) + if not success: # pragma: no cover + raise ClickException("Unable to upload to S3. Quitting.") + + # If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip + if self.stage_config.get('slim_handler', False): + # https://github.com/Miserlou/Zappa/issues/510 + success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress) + if not success: # pragma: no cover + raise ClickException("Unable to upload handler to S3. Quitting.") + + # Copy the project zip to the current project zip + current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name) + success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name, + bucket_name=self.s3_bucket_name) + if not success: # pragma: no cover + raise ClickException("Unable to copy the zip to be the current project. Quitting.") + + handler_file = self.handler_path + else: + handler_file = self.zip_path + + # Fixes https://github.com/Miserlou/Zappa/issues/613 + try: + self.lambda_arn = self.zappa.get_lambda_function( + function_name=self.lambda_name) + except botocore.client.ClientError: + # Register the Lambda function with that zip as the source + # You'll also need to define the path to your lambda_handler code. + kwargs = dict( + handler=self.lambda_handler, + description=self.lambda_description, + vpc_config=self.vpc_config, + dead_letter_config=self.dead_letter_config, + timeout=self.timeout_seconds, + memory_size=self.memory_size, + runtime=self.runtime, + aws_environment_variables=self.aws_environment_variables, + aws_kms_key_arn=self.aws_kms_key_arn, + use_alb=self.use_alb, + layers=self.layers, + concurrency=self.lambda_concurrency, + ) + if source_zip and source_zip.startswith('s3://'): + bucket, key_name = parse_s3_url(source_zip) + kwargs['function_name'] = self.lambda_name + kwargs['bucket'] = bucket + kwargs['s3_key'] = key_name + elif source_zip and not source_zip.startswith('s3://'): + with open(source_zip, mode='rb') as fh: + byte_stream = fh.read() + kwargs['function_name'] = self.lambda_name + kwargs['local_zip'] = byte_stream + else: + kwargs['function_name'] = self.lambda_name + kwargs['bucket'] = self.s3_bucket_name + kwargs['s3_key'] = handler_file + + self.lambda_arn = self.zappa.create_lambda_function(**kwargs) + + # Schedule events for this deployment + self.schedule() + + endpoint_url = '' + deployment_string = click.style("Deployment complete", fg="green", bold=True) + "!" + + if self.use_alb: + kwargs = dict( + lambda_arn=self.lambda_arn, + lambda_name=self.lambda_name, + alb_vpc_config=self.alb_vpc_config, + timeout=self.timeout_seconds + ) + self.zappa.deploy_lambda_alb(**kwargs) + + if self.use_apigateway: + + # Create and configure the API Gateway + template = self.zappa.create_stack_template( + lambda_arn=self.lambda_arn, + lambda_name=self.lambda_name, + api_key_required=self.api_key_required, + iam_authorization=self.iam_authorization, + authorizer=self.authorizer, + cors_options=self.cors, + description=self.apigateway_description, + endpoint_configuration=self.endpoint_configuration + ) + + self.zappa.update_stack( + self.lambda_name, + self.s3_bucket_name, + wait=True, + disable_progress=self.disable_progress + ) + + api_id = self.zappa.get_api_id(self.lambda_name) + + # Add binary support + if self.binary_support: + self.zappa.add_binary_support(api_id=api_id, cors=self.cors) + + # Add payload compression + if self.stage_config.get('payload_compression', True): + self.zappa.add_api_compression( + api_id=api_id, + min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0)) + + # Deploy the API! + endpoint_url = self.deploy_api_gateway(api_id) + deployment_string = deployment_string + ": {}".format(endpoint_url) + + # Create/link API key + if self.api_key_required: + if self.api_key is None: + self.zappa.create_api_key(api_id=api_id, stage_name=self.api_stage) + else: + self.zappa.add_api_stage_to_api_key(api_key=self.api_key, api_id=api_id, stage_name=self.api_stage) + + if self.stage_config.get('touch', True): + self.touch_endpoint(endpoint_url) + + # Finally, delete the local copy our zip package + if not source_zip: + if self.stage_config.get('delete_local_zip', True): + self.remove_local_zip() + + # Remove the project zip from S3. + if not source_zip: + self.remove_uploaded_zip() + + self.callback('post') + + click.echo(deployment_string) + + def update(self, source_zip=None, no_upload=False): + """ + Repackage and update the function code. + """ + + if not source_zip: + # Make sure we're in a venv. + self.check_venv() + + # Execute the prebuild script + if self.prebuild_script: + self.execute_prebuild_script() + + # Temporary version check + try: + updated_time = 1472581018 + function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) + conf = function_response['Configuration'] + last_updated = parser.parse(conf['LastModified']) + last_updated_unix = time.mktime(last_updated.timetuple()) + except botocore.exceptions.BotoCoreError as e: + click.echo(click.style(type(e).__name__, fg="red") + ": " + e.args[0]) + sys.exit(-1) + except Exception as e: + click.echo(click.style("Warning!", fg="red") + " Couldn't get function " + self.lambda_name + + " in " + self.zappa.aws_region + " - have you deployed yet?") + sys.exit(-1) + + if last_updated_unix <= updated_time: + click.echo(click.style("Warning!", fg="red") + + " You may have upgraded Zappa since deploying this application. You will need to " + + click.style("redeploy", bold=True) + " for this deployment to work properly!") + + # Make sure the necessary IAM execution roles are available + if self.manage_roles: + try: + self.zappa.create_iam_roles() + except botocore.client.ClientError: + click.echo(click.style("Failed", fg="red") + " to " + click.style("manage IAM roles", bold=True) + "!") + click.echo("You may " + click.style("lack the necessary AWS permissions", bold=True) + + " to automatically manage a Zappa execution role.") + click.echo("To fix this, see here: " + + click.style("https://github.com/Miserlou/Zappa#custom-aws-iam-roles-and-policies-for-deployment", + bold=True)) + sys.exit(-1) + + # Create the Lambda Zip, + if not no_upload: + self.create_package() + self.callback('zip') + + # Upload it to S3 + if not no_upload: + success = self.zappa.upload_to_s3(self.zip_path, self.s3_bucket_name, disable_progress=self.disable_progress) + if not success: # pragma: no cover + raise ClickException("Unable to upload project to S3. Quitting.") + + # If using a slim handler, upload it to S3 and tell lambda to use this slim handler zip + if self.stage_config.get('slim_handler', False): + # https://github.com/Miserlou/Zappa/issues/510 + success = self.zappa.upload_to_s3(self.handler_path, self.s3_bucket_name, disable_progress=self.disable_progress) + if not success: # pragma: no cover + raise ClickException("Unable to upload handler to S3. Quitting.") + + # Copy the project zip to the current project zip + current_project_name = '{0!s}_{1!s}_current_project.tar.gz'.format(self.api_stage, self.project_name) + success = self.zappa.copy_on_s3(src_file_name=self.zip_path, dst_file_name=current_project_name, + bucket_name=self.s3_bucket_name) + if not success: # pragma: no cover + raise ClickException("Unable to copy the zip to be the current project. Quitting.") + + handler_file = self.handler_path + else: + handler_file = self.zip_path + + # Register the Lambda function with that zip as the source + # You'll also need to define the path to your lambda_handler code. + kwargs = dict( + bucket=self.s3_bucket_name, + function_name=self.lambda_name, + num_revisions=self.num_retained_versions, + concurrency=self.lambda_concurrency, + ) + if source_zip and source_zip.startswith('s3://'): + bucket, key_name = parse_s3_url(source_zip) + kwargs.update(dict( + bucket=bucket, + s3_key=key_name + )) + self.lambda_arn = self.zappa.update_lambda_function(**kwargs) + elif source_zip and not source_zip.startswith('s3://'): + with open(source_zip, mode='rb') as fh: + byte_stream = fh.read() + kwargs['local_zip'] = byte_stream + self.lambda_arn = self.zappa.update_lambda_function(**kwargs) + else: + if not no_upload: + kwargs['s3_key'] = handler_file + self.lambda_arn = self.zappa.update_lambda_function(**kwargs) + + # Remove the uploaded zip from S3, because it is now registered.. + if not source_zip and not no_upload: + self.remove_uploaded_zip() + + # Update the configuration, in case there are changes. + self.lambda_arn = self.zappa.update_lambda_configuration( + lambda_arn=self.lambda_arn, + function_name=self.lambda_name, + handler=self.lambda_handler, + description=self.lambda_description, + vpc_config=self.vpc_config, + timeout=self.timeout_seconds, + memory_size=self.memory_size, + runtime=self.runtime, + aws_environment_variables=self.aws_environment_variables, + aws_kms_key_arn=self.aws_kms_key_arn, + layers=self.layers + ) + + # Finally, delete the local copy our zip package + if not source_zip and not no_upload: + if self.stage_config.get('delete_local_zip', True): + self.remove_local_zip() + + if self.use_apigateway: + + self.zappa.create_stack_template( + lambda_arn=self.lambda_arn, + lambda_name=self.lambda_name, + api_key_required=self.api_key_required, + iam_authorization=self.iam_authorization, + authorizer=self.authorizer, + cors_options=self.cors, + description=self.apigateway_description, + endpoint_configuration=self.endpoint_configuration + ) + self.zappa.update_stack( + self.lambda_name, + self.s3_bucket_name, + wait=True, + update_only=True, + disable_progress=self.disable_progress) + + api_id = self.zappa.get_api_id(self.lambda_name) + + # Update binary support + if self.binary_support: + self.zappa.add_binary_support(api_id=api_id, cors=self.cors) + else: + self.zappa.remove_binary_support(api_id=api_id, cors=self.cors) + + if self.stage_config.get('payload_compression', True): + self.zappa.add_api_compression( + api_id=api_id, + min_compression_size=self.stage_config.get('payload_minimum_compression_size', 0)) + else: + self.zappa.remove_api_compression(api_id=api_id) + + # It looks a bit like we might actually be using this just to get the URL, + # but we're also updating a few of the APIGW settings. + endpoint_url = self.deploy_api_gateway(api_id) + + if self.stage_config.get('domain', None): + endpoint_url = self.stage_config.get('domain') + + else: + endpoint_url = None + + self.schedule() + + # Update any cognito pool with the lambda arn + # do this after schedule as schedule clears the lambda policy and we need to add one + self.update_cognito_triggers() + + self.callback('post') + + if endpoint_url and 'https://' not in endpoint_url: + endpoint_url = 'https://' + endpoint_url + + if self.base_path: + endpoint_url += '/' + self.base_path + + deployed_string = "Your updated Zappa deployment is " + click.style("live", fg='green', bold=True) + "!" + if self.use_apigateway: + deployed_string = deployed_string + ": " + click.style("{}".format(endpoint_url), bold=True) + + api_url = None + if endpoint_url and 'amazonaws.com' not in endpoint_url: + api_url = self.zappa.get_api_url( + self.lambda_name, + self.api_stage) + + if endpoint_url != api_url: + deployed_string = deployed_string + " (" + api_url + ")" + + if self.stage_config.get('touch', True): + if api_url: + self.touch_endpoint(api_url) + elif endpoint_url: + self.touch_endpoint(endpoint_url) + + click.echo(deployed_string) + + def rollback(self, revision): + """ + Rollsback the currently deploy lambda code to a previous revision. + """ + + print("Rolling back..") + + self.zappa.rollback_lambda_function_version( + self.lambda_name, versions_back=revision) + print("Done!") + + def tail(self, since, filter_pattern, limit=10000, keep_open=True, colorize=True, http=False, non_http=False, force_colorize=False): + """ + Tail this function's logs. + + if keep_open, do so repeatedly, printing any new logs + """ + + try: + since_stamp = string_to_timestamp(since) + + last_since = since_stamp + while True: + new_logs = self.zappa.fetch_logs( + self.lambda_name, + start_time=since_stamp, + limit=limit, + filter_pattern=filter_pattern, + ) + + new_logs = [ e for e in new_logs if e['timestamp'] > last_since ] + self.print_logs(new_logs, colorize, http, non_http, force_colorize) + + if not keep_open: + break + if new_logs: + last_since = new_logs[-1]['timestamp'] + time.sleep(1) + except KeyboardInterrupt: # pragma: no cover + # Die gracefully + try: + sys.exit(0) + except SystemExit: + os._exit(130) + + def undeploy(self, no_confirm=False, remove_logs=False): + """ + Tear down an existing deployment. + """ + + if not no_confirm: # pragma: no cover + confirm = input("Are you sure you want to undeploy? [y/n] ") + if confirm != 'y': + return + + if self.use_alb: + self.zappa.undeploy_lambda_alb(self.lambda_name) + + if self.use_apigateway: + if remove_logs: + self.zappa.remove_api_gateway_logs(self.lambda_name) + + domain_name = self.stage_config.get('domain', None) + base_path = self.stage_config.get('base_path', None) + + # Only remove the api key when not specified + if self.api_key_required and self.api_key is None: + api_id = self.zappa.get_api_id(self.lambda_name) + self.zappa.remove_api_key(api_id, self.api_stage) + + gateway_id = self.zappa.undeploy_api_gateway( + self.lambda_name, + domain_name=domain_name, + base_path=base_path + ) + + self.unschedule() # removes event triggers, including warm up event. + + self.zappa.delete_lambda_function(self.lambda_name) + if remove_logs: + self.zappa.remove_lambda_function_logs(self.lambda_name) + + click.echo(click.style("Done", fg="green", bold=True) + "!") + + def update_cognito_triggers(self): + """ + Update any cognito triggers + """ + if self.cognito: + user_pool = self.cognito.get('user_pool') + triggers = self.cognito.get('triggers', []) + lambda_configs = set() + for trigger in triggers: + lambda_configs.add(trigger['source'].split('_')[0]) + self.zappa.update_cognito(self.lambda_name, user_pool, lambda_configs, self.lambda_arn) + + def schedule(self): + """ + Given a a list of functions and a schedule to execute them, + setup up regular execution. + + """ + events = self.stage_config.get('events', []) + + if events: + if not isinstance(events, list): # pragma: no cover + print("Events must be supplied as a list.") + return + + for event in events: + self.collision_warning(event.get('function')) + + if self.stage_config.get('keep_warm', True): + if not events: + events = [] + + keep_warm_rate = self.stage_config.get('keep_warm_expression', "rate(4 minutes)") + events.append({'name': 'zappa-keep-warm', + 'function': 'handler.keep_warm_callback', + 'expression': keep_warm_rate, + 'description': 'Zappa Keep Warm - {}'.format(self.lambda_name)}) + + if events: + try: + function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) + except botocore.exceptions.ClientError as e: # pragma: no cover + click.echo(click.style("Function does not exist", fg="yellow") + ", please " + + click.style("deploy", bold=True) + "first. Ex:" + + click.style("zappa deploy {}.".format(self.api_stage), bold=True)) + sys.exit(-1) + + print("Scheduling..") + self.zappa.schedule_events( + lambda_arn=function_response['Configuration']['FunctionArn'], + lambda_name=self.lambda_name, + events=events + ) + + # Add async tasks SNS + if self.stage_config.get('async_source', None) == 'sns' \ + and self.stage_config.get('async_resources', True): + self.lambda_arn = self.zappa.get_lambda_function( + function_name=self.lambda_name) + topic_arn = self.zappa.create_async_sns_topic( + lambda_name=self.lambda_name, + lambda_arn=self.lambda_arn + ) + click.echo('SNS Topic created: %s' % topic_arn) + + # Add async tasks DynamoDB + table_name = self.stage_config.get('async_response_table', False) + read_capacity = self.stage_config.get('async_response_table_read_capacity', 1) + write_capacity = self.stage_config.get('async_response_table_write_capacity', 1) + if table_name and self.stage_config.get('async_resources', True): + created, response_table = self.zappa.create_async_dynamodb_table( + table_name, read_capacity, write_capacity) + if created: + click.echo('DynamoDB table created: %s' % table_name) + else: + click.echo('DynamoDB table exists: %s' % table_name) + provisioned_throughput = response_table['Table']['ProvisionedThroughput'] + if provisioned_throughput['ReadCapacityUnits'] != read_capacity or \ + provisioned_throughput['WriteCapacityUnits'] != write_capacity: + click.echo(click.style( + "\nWarning! Existing DynamoDB table ({}) does not match configured capacity.\n".format(table_name), + fg='red' + )) + + def unschedule(self): + """ + Given a a list of scheduled functions, + tear down their regular execution. + + """ + + # Run even if events are not defined to remove previously existing ones (thus default to []). + events = self.stage_config.get('events', []) + + if not isinstance(events, list): # pragma: no cover + print("Events must be supplied as a list.") + return + + function_arn = None + try: + function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) + function_arn = function_response['Configuration']['FunctionArn'] + except botocore.exceptions.ClientError as e: # pragma: no cover + raise ClickException("Function does not exist, you should deploy first. Ex: zappa deploy {}. " + "Proceeding to unschedule CloudWatch based events.".format(self.api_stage)) + + print("Unscheduling..") + self.zappa.unschedule_events( + lambda_name=self.lambda_name, + lambda_arn=function_arn, + events=events, + ) + + # Remove async task SNS + if self.stage_config.get('async_source', None) == 'sns' \ + and self.stage_config.get('async_resources', True): + removed_arns = self.zappa.remove_async_sns_topic(self.lambda_name) + click.echo('SNS Topic removed: %s' % ', '.join(removed_arns)) + + def invoke(self, function_name, raw_python=False, command=None, no_color=False): + """ + Invoke a remote function. + """ + + # There are three likely scenarios for 'command' here: + # command, which is a modular function path + # raw_command, which is a string of python to execute directly + # manage, which is a Django-specific management command invocation + key = command if command is not None else 'command' + if raw_python: + command = {'raw_command': function_name} + else: + command = {key: function_name} + + # Can't use hjson + import json as json + + response = self.zappa.invoke_lambda_function( + self.lambda_name, + json.dumps(command), + invocation_type='RequestResponse', + ) + + if 'LogResult' in response: + if no_color: + print(base64.b64decode(response['LogResult'])) + else: + decoded = base64.b64decode(response['LogResult']).decode() + formatted = self.format_invoke_command(decoded) + colorized = self.colorize_invoke_command(formatted) + print(colorized) + else: + print(response) + + # For a successful request FunctionError is not in response. + # https://github.com/Miserlou/Zappa/pull/1254/ + if 'FunctionError' in response: + raise ClickException( + "{} error occurred while invoking command.".format(response['FunctionError']) +) + + def format_invoke_command(self, string): + """ + Formats correctly the string output from the invoke() method, + replacing line breaks and tabs when necessary. + """ + + string = string.replace('\\n', '\n') + + formated_response = '' + for line in string.splitlines(): + if line.startswith('REPORT'): + line = line.replace('\t', '\n') + if line.startswith('[DEBUG]'): + line = line.replace('\t', ' ') + formated_response += line + '\n' + formated_response = formated_response.replace('\n\n', '\n') + + return formated_response + + def colorize_invoke_command(self, string): + """ + Apply various heuristics to return a colorized version the invoke + command string. If these fail, simply return the string in plaintext. + + Inspired by colorize_log_entry(). + """ + + final_string = string + + try: + + # Line headers + try: + for token in ['START', 'END', 'REPORT', '[DEBUG]']: + if token in final_string: + format_string = '[{}]' + # match whole words only + pattern = r'\b{}\b' + if token == '[DEBUG]': + format_string = '{}' + pattern = re.escape(token) + repl = click.style( + format_string.format(token), + bold=True, + fg='cyan' + ) + final_string = re.sub( + pattern.format(token), repl, final_string + ) + except Exception: # pragma: no cover + pass + + # Green bold Tokens + try: + for token in [ + 'Zappa Event:', + 'RequestId:', + 'Version:', + 'Duration:', + 'Billed', + 'Memory Size:', + 'Max Memory Used:' + ]: + if token in final_string: + final_string = final_string.replace(token, click.style( + token, + bold=True, + fg='green' + )) + except Exception: # pragma: no cover + pass + + # UUIDs + for token in final_string.replace('\t', ' ').split(' '): + try: + if token.count('-') == 4 and token.replace('-', '').isalnum(): + final_string = final_string.replace( + token, + click.style(token, fg='magenta') + ) + except Exception: # pragma: no cover + pass + + return final_string + except Exception: + return string + + def status(self, return_json=False): + """ + Describe the status of the current deployment. + """ + + def tabular_print(title, value): + """ + Convenience function for priting formatted table items. + """ + click.echo('%-*s%s' % (32, click.style("\t" + title, fg='green') + ':', str(value))) + return + + # Lambda Env Details + lambda_versions = self.zappa.get_lambda_function_versions(self.lambda_name) + + if not lambda_versions: + raise ClickException(click.style("No Lambda %s detected in %s - have you deployed yet?" % + (self.lambda_name, self.zappa.aws_region), fg='red')) + + status_dict = collections.OrderedDict() + status_dict["Lambda Versions"] = len(lambda_versions) + function_response = self.zappa.lambda_client.get_function(FunctionName=self.lambda_name) + conf = function_response['Configuration'] + self.lambda_arn = conf['FunctionArn'] + status_dict["Lambda Name"] = self.lambda_name + status_dict["Lambda ARN"] = self.lambda_arn + status_dict["Lambda Role ARN"] = conf['Role'] + status_dict["Lambda Handler"] = conf['Handler'] + status_dict["Lambda Code Size"] = conf['CodeSize'] + status_dict["Lambda Version"] = conf['Version'] + status_dict["Lambda Last Modified"] = conf['LastModified'] + status_dict["Lambda Memory Size"] = conf['MemorySize'] + status_dict["Lambda Timeout"] = conf['Timeout'] + status_dict["Lambda Runtime"] = conf['Runtime'] + if 'VpcConfig' in conf.keys(): + status_dict["Lambda VPC ID"] = conf.get('VpcConfig', {}).get('VpcId', 'Not assigned') + else: + status_dict["Lambda VPC ID"] = None + + # Calculated statistics + try: + function_invocations = self.zappa.cloudwatch.get_metric_statistics( + Namespace='AWS/Lambda', + MetricName='Invocations', + StartTime=datetime.utcnow()-timedelta(days=1), + EndTime=datetime.utcnow(), + Period=1440, + Statistics=['Sum'], + Dimensions=[{'Name': 'FunctionName', + 'Value': '{}'.format(self.lambda_name)}] + )['Datapoints'][0]['Sum'] + except Exception as e: + function_invocations = 0 + try: + function_errors = self.zappa.cloudwatch.get_metric_statistics( + Namespace='AWS/Lambda', + MetricName='Errors', + StartTime=datetime.utcnow()-timedelta(days=1), + EndTime=datetime.utcnow(), + Period=1440, + Statistics=['Sum'], + Dimensions=[{'Name': 'FunctionName', + 'Value': '{}'.format(self.lambda_name)}] + )['Datapoints'][0]['Sum'] + except Exception as e: + function_errors = 0 + + try: + error_rate = "{0:.2f}%".format(function_errors / function_invocations * 100) + except: + error_rate = "Error calculating" + status_dict["Invocations (24h)"] = int(function_invocations) + status_dict["Errors (24h)"] = int(function_errors) + status_dict["Error Rate (24h)"] = error_rate + + # URLs + if self.use_apigateway: + api_url = self.zappa.get_api_url( + self.lambda_name, + self.api_stage) + + status_dict["API Gateway URL"] = api_url + + # Api Keys + api_id = self.zappa.get_api_id(self.lambda_name) + for api_key in self.zappa.get_api_keys(api_id, self.api_stage): + status_dict["API Gateway x-api-key"] = api_key + + # There literally isn't a better way to do this. + # AWS provides no way to tie a APIGW domain name to its Lambda function. + domain_url = self.stage_config.get('domain', None) + base_path = self.stage_config.get('base_path', None) + if domain_url: + status_dict["Domain URL"] = 'https://' + domain_url + if base_path: + status_dict["Domain URL"] += '/' + base_path + else: + status_dict["Domain URL"] = "None Supplied" + + # Scheduled Events + event_rules = self.zappa.get_event_rules_for_lambda(lambda_arn=self.lambda_arn) + status_dict["Num. Event Rules"] = len(event_rules) + if len(event_rules) > 0: + status_dict['Events'] = [] + for rule in event_rules: + event_dict = {} + rule_name = rule['Name'] + event_dict["Event Rule Name"] = rule_name + event_dict["Event Rule Schedule"] = rule.get('ScheduleExpression', None) + event_dict["Event Rule State"] = rule.get('State', None).title() + event_dict["Event Rule ARN"] = rule.get('Arn', None) + status_dict['Events'].append(event_dict) + + if return_json: + # Putting the status in machine readable format + # https://github.com/Miserlou/Zappa/issues/407 + print(json.dumpsJSON(status_dict)) + else: + click.echo("Status for " + click.style(self.lambda_name, bold=True) + ": ") + for k, v in status_dict.items(): + if k == 'Events': + # Events are a list of dicts + for event in v: + for item_k, item_v in event.items(): + tabular_print(item_k, item_v) + else: + tabular_print(k, v) + + # TODO: S3/SQS/etc. type events? + + return True + + def check_stage_name(self, stage_name): + """ + Make sure the stage name matches the AWS-allowed pattern + + (calls to apigateway_client.create_deployment, will fail with error + message "ClientError: An error occurred (BadRequestException) when + calling the CreateDeployment operation: Stage name only allows + a-zA-Z0-9_" if the pattern does not match) + """ + if self.stage_name_env_pattern.match(stage_name): + return True + raise ValueError("AWS requires stage name to match a-zA-Z0-9_") + + def check_environment(self, environment): + """ + Make sure the environment contains only strings + + (since putenv needs a string) + """ + + non_strings = [] + for (k,v) in environment.items(): + if not isinstance(v, basestring): + non_strings.append(k) + if non_strings: + raise ValueError("The following environment variables are not strings: {}".format(", ".join(non_strings))) + else: + return True + + def init(self, settings_file="zappa_settings.json"): + """ + Initialize a new Zappa project by creating a new zappa_settings.json in a guided process. + + This should probably be broken up into few separate componants once it's stable. + Testing these inputs requires monkeypatching with mock, which isn't pretty. + + """ + + # Make sure we're in a venv. + self.check_venv() + + # Ensure that we don't already have a zappa_settings file. + if os.path.isfile(settings_file): + raise ClickException("This project already has a " + click.style("{0!s} file".format(settings_file), fg="red", bold=True) + "!") + + # Explain system. + click.echo(click.style("""\n███████╗ █████╗ ██████╗ ██████╗ █████╗ +╚══███╔╝██╔══██╗██╔══██╗██╔══██╗██╔══██╗ + ███╔╝ ███████║██████╔╝██████╔╝███████║ + ███╔╝ ██╔══██║██╔═══╝ ██╔═══╝ ██╔══██║ +███████╗██║ ██║██║ ██║ ██║ ██║ +╚══════╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═╝ ╚═╝\n""", fg='green', bold=True)) + + click.echo(click.style("Welcome to ", bold=True) + click.style("Zappa", fg='green', bold=True) + click.style("!\n", bold=True)) + click.echo(click.style("Zappa", bold=True) + " is a system for running server-less Python web applications" + " on AWS Lambda and AWS API Gateway.") + click.echo("This `init` command will help you create and configure your new Zappa deployment.") + click.echo("Let's get started!\n") + + # Create Env + while True: + click.echo("Your Zappa configuration can support multiple production stages, like '" + + click.style("dev", bold=True) + "', '" + click.style("staging", bold=True) + "', and '" + + click.style("production", bold=True) + "'.") + env = input("What do you want to call this environment (default 'dev'): ") or "dev" + try: + self.check_stage_name(env) + break + except ValueError: + click.echo(click.style("Stage names must match a-zA-Z0-9_", fg="red")) + + # Detect AWS profiles and regions + # If anyone knows a more straightforward way to easily detect and parse AWS profiles I'm happy to change this, feels like a hack + session = botocore.session.Session() + config = session.full_config + profiles = config.get("profiles", {}) + profile_names = list(profiles.keys()) + + click.echo("\nAWS Lambda and API Gateway are only available in certain regions. "\ + "Let's check to make sure you have a profile set up in one that will work.") + + if not profile_names: + profile_name, profile = None, None + click.echo("We couldn't find an AWS profile to use. Before using Zappa, you'll need to set one up. See here for more info: {}" + .format(click.style(BOTO3_CONFIG_DOCS_URL, fg="blue", underline=True))) + elif len(profile_names) == 1: + profile_name = profile_names[0] + profile = profiles[profile_name] + click.echo("Okay, using profile {}!".format(click.style(profile_name, bold=True))) + else: + if "default" in profile_names: + default_profile = [p for p in profile_names if p == "default"][0] + else: + default_profile = profile_names[0] + + while True: + profile_name = input("We found the following profiles: {}, and {}. "\ + "Which would you like us to use? (default '{}'): " + .format( + ', '.join(profile_names[:-1]), + profile_names[-1], + default_profile + )) or default_profile + if profile_name in profiles: + profile = profiles[profile_name] + break + else: + click.echo("Please enter a valid name for your AWS profile.") + + profile_region = profile.get("region") if profile else None + + # Create Bucket + click.echo("\nYour Zappa deployments will need to be uploaded to a " + click.style("private S3 bucket", bold=True) + ".") + click.echo("If you don't have a bucket yet, we'll create one for you too.") + default_bucket = "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9)) + while True: + bucket = input("What do you want to call your bucket? (default '%s'): " % default_bucket) or default_bucket + + if is_valid_bucket_name(bucket): + break + + click.echo(click.style("Invalid bucket name!", bold=True)) + click.echo("S3 buckets must be named according to the following rules:") + click.echo("""* Bucket names must be unique across all existing bucket names in Amazon S3. +* Bucket names must comply with DNS naming conventions. +* Bucket names must be at least 3 and no more than 63 characters long. +* Bucket names must not contain uppercase characters or underscores. +* Bucket names must start with a lowercase letter or number. +* Bucket names must be a series of one or more labels. Adjacent labels are separated + by a single period (.). Bucket names can contain lowercase letters, numbers, and + hyphens. Each label must start and end with a lowercase letter or a number. +* Bucket names must not be formatted as an IP address (for example, 192.168.5.4). +* When you use virtual hosted–style buckets with Secure Sockets Layer (SSL), the SSL + wildcard certificate only matches buckets that don't contain periods. To work around + this, use HTTP or write your own certificate verification logic. We recommend that + you do not use periods (".") in bucket names when using virtual hosted–style buckets. +""") + + + # Detect Django/Flask + try: # pragma: no cover + import django + has_django = True + except ImportError as e: + has_django = False + + try: # pragma: no cover + import flask + has_flask = True + except ImportError as e: + has_flask = False + + print('') + # App-specific + if has_django: # pragma: no cover + click.echo("It looks like this is a " + click.style("Django", bold=True) + " application!") + click.echo("What is the " + click.style("module path", bold=True) + " to your projects's Django settings?") + django_settings = None + + matches = detect_django_settings() + while django_settings in [None, '']: + if matches: + click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) + django_settings = input("Where are your project's settings? (default '%s'): " % matches[0]) or matches[0] + else: + click.echo("(This will likely be something like 'your_project.settings')") + django_settings = input("Where are your project's settings?: ") + django_settings = django_settings.replace("'", "") + django_settings = django_settings.replace('"', "") + else: + matches = None + if has_flask: + click.echo("It looks like this is a " + click.style("Flask", bold=True) + " application.") + matches = detect_flask_apps() + click.echo("What's the " + click.style("modular path", bold=True) + " to your app's function?") + click.echo("This will likely be something like 'your_module.app'.") + app_function = None + while app_function in [None, '']: + if matches: + click.echo("We discovered: " + click.style(', '.join('{}'.format(i) for v, i in enumerate(matches)), bold=True)) + app_function = input("Where is your app's function? (default '%s'): " % matches[0]) or matches[0] + else: + app_function = input("Where is your app's function?: ") + app_function = app_function.replace("'", "") + app_function = app_function.replace('"', "") + + # TODO: Create VPC? + # Memory size? Time limit? + # Domain? LE keys? Region? + # 'Advanced Settings' mode? + + # Globalize + click.echo("\nYou can optionally deploy to " + click.style("all available regions", bold=True) + " in order to provide fast global service.") + click.echo("If you are using Zappa for the first time, you probably don't want to do this!") + global_deployment = False + while True: + global_type = input("Would you like to deploy this application " + click.style("globally", bold=True) + "? (default 'n') [y/n/(p)rimary]: ") + if not global_type: + break + if global_type.lower() in ["y", "yes", "p", "primary"]: + global_deployment = True + break + if global_type.lower() in ["n", "no"]: + global_deployment = False + break + + # The given environment name + zappa_settings = { + env: { + 'profile_name': profile_name, + 's3_bucket': bucket, + 'runtime': get_venv_from_python_version(), + 'project_name': self.get_project_name() + } + } + + if profile_region: + zappa_settings[env]['aws_region'] = profile_region + + if has_django: + zappa_settings[env]['django_settings'] = django_settings + else: + zappa_settings[env]['app_function'] = app_function + + # Global Region Deployment + if global_deployment: + additional_regions = [r for r in API_GATEWAY_REGIONS if r != profile_region] + # Create additional stages + if global_type.lower() in ["p", "primary"]: + additional_regions = [r for r in additional_regions if '-1' in r] + + for region in additional_regions: + env_name = env + '_' + region.replace('-', '_') + g_env = { + env_name: { + 'extends': env, + 'aws_region': region + } + } + zappa_settings.update(g_env) + + import json as json # hjson is fine for loading, not fine for writing. + zappa_settings_json = json.dumps(zappa_settings, sort_keys=True, indent=4) + + click.echo("\nOkay, here's your " + click.style("zappa_settings.json", bold=True) + ":\n") + click.echo(click.style(zappa_settings_json, fg="yellow", bold=False)) + + confirm = input("\nDoes this look " + click.style("okay", bold=True, fg="green") + "? (default 'y') [y/n]: ") or 'yes' + if confirm[0] not in ['y', 'Y', 'yes', 'YES']: + click.echo("" + click.style("Sorry", bold=True, fg='red') + " to hear that! Please init again.") + return + + # Write + with open("zappa_settings.json", "w") as zappa_settings_file: + zappa_settings_file.write(zappa_settings_json) + + if global_deployment: + click.echo("\n" + click.style("Done", bold=True) + "! You can also " + click.style("deploy all", bold=True) + " by executing:\n") + click.echo(click.style("\t$ zappa deploy --all", bold=True)) + + click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") + click.echo(click.style("\t$ zappa update --all", bold=True)) + else: + click.echo("\n" + click.style("Done", bold=True) + "! Now you can " + click.style("deploy", bold=True) + " your Zappa application by executing:\n") + click.echo(click.style("\t$ zappa deploy %s" % env, bold=True)) + + click.echo("\nAfter that, you can " + click.style("update", bold=True) + " your application code with:\n") + click.echo(click.style("\t$ zappa update %s" % env, bold=True)) + + click.echo("\nTo learn more, check out our project page on " + click.style("GitHub", bold=True) + + " here: " + click.style("https://github.com/Miserlou/Zappa", fg="cyan", bold=True)) + click.echo("and stop by our " + click.style("Slack", bold=True) + " channel here: " + + click.style("https://zappateam.slack.com", fg="cyan", bold=True)) + click.echo("\nEnjoy!,") + click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!") + + return + + def certify(self, no_confirm=True, manual=False): + """ + Register or update a domain certificate for this env. + """ + + if not self.domain: + raise ClickException("Can't certify a domain without " + click.style("domain", fg="red", bold=True) + " configured!") + + if not no_confirm: # pragma: no cover + confirm = input("Are you sure you want to certify? [y/n] ") + if confirm != 'y': + return + + # Make sure this isn't already deployed. + deployed_versions = self.zappa.get_lambda_function_versions(self.lambda_name) + if len(deployed_versions) == 0: + raise ClickException("This application " + click.style("isn't deployed yet", fg="red") + + " - did you mean to call " + click.style("deploy", bold=True) + "?") + + + account_key_location = self.stage_config.get('lets_encrypt_key', None) + cert_location = self.stage_config.get('certificate', None) + cert_key_location = self.stage_config.get('certificate_key', None) + cert_chain_location = self.stage_config.get('certificate_chain', None) + cert_arn = self.stage_config.get('certificate_arn', None) + base_path = self.stage_config.get('base_path', None) + + # These are sensitive + certificate_body = None + certificate_private_key = None + certificate_chain = None + + # Prepare for custom Let's Encrypt + if not cert_location and not cert_arn: + if not account_key_location: + raise ClickException("Can't certify a domain without " + click.style("lets_encrypt_key", fg="red", bold=True) + + " or " + click.style("certificate", fg="red", bold=True)+ + " or " + click.style("certificate_arn", fg="red", bold=True) + " configured!") + + # Get install account_key to /tmp/account_key.pem + from .letsencrypt import gettempdir + if account_key_location.startswith('s3://'): + bucket, key_name = parse_s3_url(account_key_location) + self.zappa.s3_client.download_file(bucket, key_name, os.path.join(gettempdir(), 'account.key')) + else: + from shutil import copyfile + copyfile(account_key_location, os.path.join(gettempdir(), 'account.key')) + + # Prepare for Custom SSL + elif not account_key_location and not cert_arn: + if not cert_location or not cert_key_location or not cert_chain_location: + raise ClickException("Can't certify a domain without " + + click.style("certificate, certificate_key and certificate_chain", fg="red", bold=True) + " configured!") + + # Read the supplied certificates. + with open(cert_location) as f: + certificate_body = f.read() + + with open(cert_key_location) as f: + certificate_private_key = f.read() + + with open(cert_chain_location) as f: + certificate_chain = f.read() + + + click.echo("Certifying domain " + click.style(self.domain, fg="green", bold=True) + "..") + + # Get cert and update domain. + + # Let's Encrypt + if not cert_location and not cert_arn: + from .letsencrypt import get_cert_and_update_domain + cert_success = get_cert_and_update_domain( + self.zappa, + self.lambda_name, + self.api_stage, + self.domain, + manual + ) + + # Custom SSL / ACM + else: + route53 = self.stage_config.get('route53_enabled', True) + if not self.zappa.get_domain_name(self.domain, route53=route53): + dns_name = self.zappa.create_domain_name( + domain_name=self.domain, + certificate_name=self.domain + "-Zappa-Cert", + certificate_body=certificate_body, + certificate_private_key=certificate_private_key, + certificate_chain=certificate_chain, + certificate_arn=cert_arn, + lambda_name=self.lambda_name, + stage=self.api_stage, + base_path=base_path + ) + if route53: + self.zappa.update_route53_records(self.domain, dns_name) + print("Created a new domain name with supplied certificate. Please note that it can take up to 40 minutes for this domain to be " + "created and propagated through AWS, but it requires no further work on your part.") + else: + self.zappa.update_domain_name( + domain_name=self.domain, + certificate_name=self.domain + "-Zappa-Cert", + certificate_body=certificate_body, + certificate_private_key=certificate_private_key, + certificate_chain=certificate_chain, + certificate_arn=cert_arn, + lambda_name=self.lambda_name, + stage=self.api_stage, + route53=route53, + base_path=base_path + ) + + cert_success = True + + if cert_success: + click.echo("Certificate " + click.style("updated", fg="green", bold=True) + "!") + else: + click.echo(click.style("Failed", fg="red", bold=True) + " to generate or install certificate! :(") + click.echo("\n==============\n") + shamelessly_promote() + + ## + # Shell + ## + def shell(self): + """ + Spawn a debug shell. + """ + click.echo(click.style("NOTICE!", fg="yellow", bold=True) + " This is a " + click.style("local", fg="green", bold=True) + " shell, inside a " + click.style("Zappa", bold=True) + " object!") + self.zappa.shell() + return + + ## + # Utility + ## + + def callback(self, position): + """ + Allows the execution of custom code between creation of the zip file and deployment to AWS. + + :return: None + """ + + callbacks = self.stage_config.get('callbacks', {}) + callback = callbacks.get(position) + + if callback: + (mod_path, cb_func_name) = callback.rsplit('.', 1) + + try: # Prefer callback in working directory + if mod_path.count('.') >= 1: # Callback function is nested in a folder + (mod_folder_path, mod_name) = mod_path.rsplit('.', 1) + mod_folder_path_fragments = mod_folder_path.split('.') + working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments) + else: + mod_name = mod_path + working_dir = os.getcwd() + + working_dir_importer = pkgutil.get_importer(working_dir) + module_ = working_dir_importer.find_module(mod_name).load_module(mod_name) + + except (ImportError, AttributeError): + + try: # Callback func might be in virtualenv + module_ = importlib.import_module(mod_path) + except ImportError: # pragma: no cover + raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( + "import {position} callback ".format(position=position), + bold=True) + 'module: "{mod_path}"'.format(mod_path=click.style(mod_path, bold=True))) + + if not hasattr(module_, cb_func_name): # pragma: no cover + raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( + "find {position} callback ".format(position=position), bold=True) + 'function: "{cb_func_name}" '.format( + cb_func_name=click.style(cb_func_name, bold=True)) + 'in module "{mod_path}"'.format(mod_path=mod_path)) + + + cb_func = getattr(module_, cb_func_name) + cb_func(self) # Call the function passing self + + def check_for_update(self): + """ + Print a warning if there's a new Zappa version available. + """ + try: + version = pkg_resources.require("zappa")[0].version + updateable = check_new_version_available(version) + if updateable: + click.echo(click.style("Important!", fg="yellow", bold=True) + + " A new version of " + click.style("Zappa", bold=True) + " is available!") + click.echo("Upgrade with: " + click.style("pip install zappa --upgrade", bold=True)) + click.echo("Visit the project page on GitHub to see the latest changes: " + + click.style("https://github.com/Miserlou/Zappa", bold=True)) + except Exception as e: # pragma: no cover + print(e) + return + + def load_settings(self, settings_file=None, session=None): + """ + Load the local zappa_settings file. + + An existing boto session can be supplied, though this is likely for testing purposes. + + Returns the loaded Zappa object. + """ + + # Ensure we're passed a valid settings file. + if not settings_file: + settings_file = self.get_json_or_yaml_settings() + if not os.path.isfile(settings_file): + raise ClickException("Please configure your zappa_settings file.") + + # Load up file + self.load_settings_file(settings_file) + + # Make sure that the stages are valid names: + for stage_name in self.zappa_settings.keys(): + try: + self.check_stage_name(stage_name) + except ValueError: + raise ValueError("API stage names must match a-zA-Z0-9_ ; '{0!s}' does not.".format(stage_name)) + + # Make sure that this stage is our settings + if self.api_stage not in self.zappa_settings.keys(): + raise ClickException("Please define stage '{0!s}' in your Zappa settings.".format(self.api_stage)) + + # We need a working title for this project. Use one if supplied, else cwd dirname. + if 'project_name' in self.stage_config: # pragma: no cover + # If the name is invalid, this will throw an exception with message up stack + self.project_name = validate_name(self.stage_config['project_name']) + else: + self.project_name = self.get_project_name() + + # The name of the actual AWS Lambda function, ex, 'helloworld-dev' + # Assume that we already have have validated the name beforehand. + # Related: https://github.com/Miserlou/Zappa/pull/664 + # https://github.com/Miserlou/Zappa/issues/678 + # And various others from Slack. + self.lambda_name = slugify.slugify(self.project_name + '-' + self.api_stage) + + # Load stage-specific settings + self.s3_bucket_name = self.stage_config.get('s3_bucket', "zappa-" + ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(9))) + self.vpc_config = self.stage_config.get('vpc_config', {}) + self.memory_size = self.stage_config.get('memory_size', 512) + self.app_function = self.stage_config.get('app_function', None) + self.exception_handler = self.stage_config.get('exception_handler', None) + self.aws_region = self.stage_config.get('aws_region', None) + self.debug = self.stage_config.get('debug', True) + self.prebuild_script = self.stage_config.get('prebuild_script', None) + self.profile_name = self.stage_config.get('profile_name', None) + self.log_level = self.stage_config.get('log_level', "DEBUG") + self.domain = self.stage_config.get('domain', None) + self.base_path = self.stage_config.get('base_path', None) + self.timeout_seconds = self.stage_config.get('timeout_seconds', 30) + dead_letter_arn = self.stage_config.get('dead_letter_arn', '') + self.dead_letter_config = {'TargetArn': dead_letter_arn} if dead_letter_arn else {} + self.cognito = self.stage_config.get('cognito', None) + self.num_retained_versions = self.stage_config.get('num_retained_versions',None) + + # Check for valid values of num_retained_versions + if self.num_retained_versions is not None and type(self.num_retained_versions) is not int: + raise ClickException("Please supply either an integer or null for num_retained_versions in the zappa_settings.json. Found %s" % type(self.num_retained_versions)) + elif type(self.num_retained_versions) is int and self.num_retained_versions<1: + raise ClickException("The value for num_retained_versions in the zappa_settings.json should be greater than 0.") + + # Provide legacy support for `use_apigateway`, now `apigateway_enabled`. + # https://github.com/Miserlou/Zappa/issues/490 + # https://github.com/Miserlou/Zappa/issues/493 + self.use_apigateway = self.stage_config.get('use_apigateway', True) + if self.use_apigateway: + self.use_apigateway = self.stage_config.get('apigateway_enabled', True) + self.apigateway_description = self.stage_config.get('apigateway_description', None) + + self.lambda_handler = self.stage_config.get('lambda_handler', 'handler.lambda_handler') + # DEPRECATED. https://github.com/Miserlou/Zappa/issues/456 + self.remote_env_bucket = self.stage_config.get('remote_env_bucket', None) + self.remote_env_file = self.stage_config.get('remote_env_file', None) + self.remote_env = self.stage_config.get('remote_env', None) + self.settings_file = self.stage_config.get('settings_file', None) + self.django_settings = self.stage_config.get('django_settings', None) + self.manage_roles = self.stage_config.get('manage_roles', True) + self.binary_support = self.stage_config.get('binary_support', True) + self.api_key_required = self.stage_config.get('api_key_required', False) + self.api_key = self.stage_config.get('api_key') + self.endpoint_configuration = self.stage_config.get('endpoint_configuration', None) + self.iam_authorization = self.stage_config.get('iam_authorization', False) + self.cors = self.stage_config.get("cors", False) + self.lambda_description = self.stage_config.get('lambda_description', "Zappa Deployment") + self.lambda_concurrency = self.stage_config.get('lambda_concurrency', None) + self.environment_variables = self.stage_config.get('environment_variables', {}) + self.aws_environment_variables = self.stage_config.get('aws_environment_variables', {}) + self.check_environment(self.environment_variables) + self.authorizer = self.stage_config.get('authorizer', {}) + self.runtime = self.stage_config.get('runtime', get_runtime_from_python_version()) + self.aws_kms_key_arn = self.stage_config.get('aws_kms_key_arn', '') + self.context_header_mappings = self.stage_config.get('context_header_mappings', {}) + self.xray_tracing = self.stage_config.get('xray_tracing', False) + self.desired_role_arn = self.stage_config.get('role_arn') + self.layers = self.stage_config.get('layers', None) + + # Load ALB-related settings + self.use_alb = self.stage_config.get('alb_enabled', False) + self.alb_vpc_config = self.stage_config.get('alb_vpc_config', {}) + + # Additional tags + self.tags = self.stage_config.get('tags', {}) + + desired_role_name = self.lambda_name + "-ZappaLambdaExecutionRole" + self.zappa = Zappa( boto_session=session, + profile_name=self.profile_name, + aws_region=self.aws_region, + load_credentials=self.load_credentials, + desired_role_name=desired_role_name, + desired_role_arn=self.desired_role_arn, + runtime=self.runtime, + tags=self.tags, + endpoint_urls=self.stage_config.get('aws_endpoint_urls',{}), + xray_tracing=self.xray_tracing + ) + + for setting in CUSTOM_SETTINGS: + if setting in self.stage_config: + setting_val = self.stage_config[setting] + # Read the policy file contents. + if setting.endswith('policy'): + with open(setting_val, 'r') as f: + setting_val = f.read() + setattr(self.zappa, setting, setting_val) + + if self.app_function: + self.collision_warning(self.app_function) + if self.app_function[-3:] == '.py': + click.echo(click.style("Warning!", fg="red", bold=True) + + " Your app_function is pointing to a " + click.style("file and not a function", bold=True) + + "! It should probably be something like 'my_file.app', not 'my_file.py'!") + + return self.zappa + + def get_json_or_yaml_settings(self, settings_name="zappa_settings"): + """ + Return zappa_settings path as JSON or YAML (or TOML), as appropriate. + """ + zs_json = settings_name + ".json" + zs_yml = settings_name + ".yml" + zs_yaml = settings_name + ".yaml" + zs_toml = settings_name + ".toml" + + # Must have at least one + if not os.path.isfile(zs_json) \ + and not os.path.isfile(zs_yml) \ + and not os.path.isfile(zs_yaml) \ + and not os.path.isfile(zs_toml): + raise ClickException("Please configure a zappa_settings file or call `zappa init`.") + + # Prefer JSON + if os.path.isfile(zs_json): + settings_file = zs_json + elif os.path.isfile(zs_toml): + settings_file = zs_toml + elif os.path.isfile(zs_yml): + settings_file = zs_yml + else: + settings_file = zs_yaml + + return settings_file + + def load_settings_file(self, settings_file=None): + """ + Load our settings file. + """ + + if not settings_file: + settings_file = self.get_json_or_yaml_settings() + if not os.path.isfile(settings_file): + raise ClickException("Please configure your zappa_settings file or call `zappa init`.") + + path, ext = os.path.splitext(settings_file) + if ext == '.yml' or ext == '.yaml': + with open(settings_file) as yaml_file: + try: + self.zappa_settings = yaml.safe_load(yaml_file) + except ValueError: # pragma: no cover + raise ValueError("Unable to load the Zappa settings YAML. It may be malformed.") + elif ext == '.toml': + with open(settings_file) as toml_file: + try: + self.zappa_settings = toml.load(toml_file) + except ValueError: # pragma: no cover + raise ValueError("Unable to load the Zappa settings TOML. It may be malformed.") + else: + with open(settings_file) as json_file: + try: + self.zappa_settings = json.load(json_file) + except ValueError: # pragma: no cover + raise ValueError("Unable to load the Zappa settings JSON. It may be malformed.") + + def create_package(self, output=None): + """ + Ensure that the package can be properly configured, + and then create it. + + """ + + # Create the Lambda zip package (includes project and virtualenvironment) + # Also define the path the handler file so it can be copied to the zip + # root for Lambda. + current_file = os.path.dirname(os.path.abspath( + inspect.getfile(inspect.currentframe()))) + handler_file = os.sep.join(current_file.split(os.sep)[0:]) + os.sep + 'handler.py' + + # Create the zip file(s) + if self.stage_config.get('slim_handler', False): + # Create two zips. One with the application and the other with just the handler. + # https://github.com/Miserlou/Zappa/issues/510 + self.zip_path = self.zappa.create_lambda_zip( + prefix=self.lambda_name, + use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True), + exclude=self.stage_config.get('exclude', []), + exclude_glob=self.stage_config.get('exclude_glob', []), + disable_progress=self.disable_progress, + archive_format='tarball' + ) + + # Make sure the normal venv is not included in the handler's zip + exclude = self.stage_config.get('exclude', []) + cur_venv = self.zappa.get_current_venv() + exclude.append(cur_venv.split('/')[-1]) + self.handler_path = self.zappa.create_lambda_zip( + prefix='handler_{0!s}'.format(self.lambda_name), + venv=self.zappa.create_handler_venv(), + handler_file=handler_file, + slim_handler=True, + exclude=exclude, + exclude_glob=self.stage_config.get('exclude_glob', []), + output=output, + disable_progress=self.disable_progress + ) + else: + # This could be python3.6 optimized. + exclude = self.stage_config.get( + 'exclude', [ + "boto3", + "dateutil", + "botocore", + "s3transfer", + "concurrent" + ]) + + # Create a single zip that has the handler and application + self.zip_path = self.zappa.create_lambda_zip( + prefix=self.lambda_name, + handler_file=handler_file, + use_precompiled_packages=self.stage_config.get('use_precompiled_packages', True), + exclude=exclude, + exclude_glob=self.stage_config.get('exclude_glob', []), + output=output, + disable_progress=self.disable_progress + ) + + # Warn if this is too large for Lambda. + file_stats = os.stat(self.zip_path) + if file_stats.st_size > 52428800: # pragma: no cover + print('\n\nWarning: Application zip package is likely to be too large for AWS Lambda. ' + 'Try setting "slim_handler" to true in your Zappa settings file.\n\n') + + # Throw custom settings into the zip that handles requests + if self.stage_config.get('slim_handler', False): + handler_zip = self.handler_path + else: + handler_zip = self.zip_path + + with zipfile.ZipFile(handler_zip, 'a') as lambda_zip: + + settings_s = "# Generated by Zappa\n" + + if self.app_function: + if '.' not in self.app_function: # pragma: no cover + raise ClickException("Your " + click.style("app_function", fg='red', bold=True) + " value is not a modular path." + + " It needs to be in the format `" + click.style("your_module.your_app_object", bold=True) + "`.") + app_module, app_function = self.app_function.rsplit('.', 1) + settings_s = settings_s + "APP_MODULE='{0!s}'\nAPP_FUNCTION='{1!s}'\n".format(app_module, app_function) + + if self.exception_handler: + settings_s += "EXCEPTION_HANDLER='{0!s}'\n".format(self.exception_handler) + else: + settings_s += "EXCEPTION_HANDLER=None\n" + + if self.debug: + settings_s = settings_s + "DEBUG=True\n" + else: + settings_s = settings_s + "DEBUG=False\n" + + settings_s = settings_s + "LOG_LEVEL='{0!s}'\n".format((self.log_level)) + + if self.binary_support: + settings_s = settings_s + "BINARY_SUPPORT=True\n" + else: + settings_s = settings_s + "BINARY_SUPPORT=False\n" + + head_map_dict = {} + head_map_dict.update(dict(self.context_header_mappings)) + settings_s = settings_s + "CONTEXT_HEADER_MAPPINGS={0}\n".format( + head_map_dict + ) + + # If we're on a domain, we don't need to define the /<> in + # the WSGI PATH + if self.domain: + settings_s = settings_s + "DOMAIN='{0!s}'\n".format((self.domain)) + else: + settings_s = settings_s + "DOMAIN=None\n" + + if self.base_path: + settings_s = settings_s + "BASE_PATH='{0!s}'\n".format((self.base_path)) + else: + settings_s = settings_s + "BASE_PATH=None\n" + + # Pass through remote config bucket and path + if self.remote_env: + settings_s = settings_s + "REMOTE_ENV='{0!s}'\n".format( + self.remote_env + ) + # DEPRECATED. use remove_env instead + elif self.remote_env_bucket and self.remote_env_file: + settings_s = settings_s + "REMOTE_ENV='s3://{0!s}/{1!s}'\n".format( + self.remote_env_bucket, self.remote_env_file + ) + + # Local envs + env_dict = {} + if self.aws_region: + env_dict['AWS_REGION'] = self.aws_region + env_dict.update(dict(self.environment_variables)) + + # Environment variable keys must be ascii + # https://github.com/Miserlou/Zappa/issues/604 + # https://github.com/Miserlou/Zappa/issues/998 + try: + env_dict = dict((k.encode('ascii').decode('ascii'), v) for (k, v) in env_dict.items()) + except Exception: + raise ValueError("Environment variable keys must be ascii.") + + settings_s = settings_s + "ENVIRONMENT_VARIABLES={0}\n".format( + env_dict + ) + + # We can be environment-aware + settings_s = settings_s + "API_STAGE='{0!s}'\n".format((self.api_stage)) + settings_s = settings_s + "PROJECT_NAME='{0!s}'\n".format((self.project_name)) + + if self.settings_file: + settings_s = settings_s + "SETTINGS_FILE='{0!s}'\n".format((self.settings_file)) + else: + settings_s = settings_s + "SETTINGS_FILE=None\n" + + if self.django_settings: + settings_s = settings_s + "DJANGO_SETTINGS='{0!s}'\n".format((self.django_settings)) + else: + settings_s = settings_s + "DJANGO_SETTINGS=None\n" + + # If slim handler, path to project zip + if self.stage_config.get('slim_handler', False): + settings_s += "ARCHIVE_PATH='s3://{0!s}/{1!s}_{2!s}_current_project.tar.gz'\n".format( + self.s3_bucket_name, self.api_stage, self.project_name) + + # since includes are for slim handler add the setting here by joining arbitrary list from zappa_settings file + # and tell the handler we are the slim_handler + # https://github.com/Miserlou/Zappa/issues/776 + settings_s += "SLIM_HANDLER=True\n" + + include = self.stage_config.get('include', []) + if len(include) >= 1: + settings_s += "INCLUDE=" + str(include) + '\n' + + # AWS Events function mapping + event_mapping = {} + events = self.stage_config.get('events', []) + for event in events: + arn = event.get('event_source', {}).get('arn') + function = event.get('function') + if arn and function: + event_mapping[arn] = function + settings_s = settings_s + "AWS_EVENT_MAPPING={0!s}\n".format(event_mapping) + + # Map Lext bot events + bot_events = self.stage_config.get('bot_events', []) + bot_events_mapping = {} + for bot_event in bot_events: + event_source = bot_event.get('event_source', {}) + intent = event_source.get('intent') + invocation_source = event_source.get('invocation_source') + function = bot_event.get('function') + if intent and invocation_source and function: + bot_events_mapping[str(intent) + ':' + str(invocation_source)] = function + + settings_s = settings_s + "AWS_BOT_EVENT_MAPPING={0!s}\n".format(bot_events_mapping) + + # Map cognito triggers + cognito_trigger_mapping = {} + cognito_config = self.stage_config.get('cognito', {}) + triggers = cognito_config.get('triggers', []) + for trigger in triggers: + source = trigger.get('source') + function = trigger.get('function') + if source and function: + cognito_trigger_mapping[source] = function + settings_s = settings_s + "COGNITO_TRIGGER_MAPPING={0!s}\n".format(cognito_trigger_mapping) + + # Authorizer config + authorizer_function = self.authorizer.get('function', None) + if authorizer_function: + settings_s += "AUTHORIZER_FUNCTION='{0!s}'\n".format(authorizer_function) + + # Copy our Django app into root of our package. + # It doesn't work otherwise. + if self.django_settings: + base = __file__.rsplit(os.sep, 1)[0] + django_py = ''.join(os.path.join(base, 'ext', 'django_zappa.py')) + lambda_zip.write(django_py, 'django_zappa_app.py') + + # async response + async_response_table = self.stage_config.get('async_response_table', '') + settings_s += "ASYNC_RESPONSE_TABLE='{0!s}'\n".format(async_response_table) + + # Lambda requires a specific chmod + temp_settings = tempfile.NamedTemporaryFile(delete=False) + os.chmod(temp_settings.name, 0o644) + temp_settings.write(bytes(settings_s, "utf-8")) + temp_settings.close() + lambda_zip.write(temp_settings.name, 'zappa_settings.py') + os.unlink(temp_settings.name) + + def remove_local_zip(self): + """ + Remove our local zip file. + """ + + if self.stage_config.get('delete_local_zip', True): + try: + if os.path.isfile(self.zip_path): + os.remove(self.zip_path) + if self.handler_path and os.path.isfile(self.handler_path): + os.remove(self.handler_path) + except Exception as e: # pragma: no cover + sys.exit(-1) + + def remove_uploaded_zip(self): + """ + Remove the local and S3 zip file after uploading and updating. + """ + + # Remove the uploaded zip from S3, because it is now registered.. + if self.stage_config.get('delete_s3_zip', True): + self.zappa.remove_from_s3(self.zip_path, self.s3_bucket_name) + if self.stage_config.get('slim_handler', False): + # Need to keep the project zip as the slim handler uses it. + self.zappa.remove_from_s3(self.handler_path, self.s3_bucket_name) + + def on_exit(self): + """ + Cleanup after the command finishes. + Always called: SystemExit, KeyboardInterrupt and any other Exception that occurs. + """ + if self.zip_path: + # Only try to remove uploaded zip if we're running a command that has loaded credentials + if self.load_credentials: + self.remove_uploaded_zip() + + self.remove_local_zip() + + def print_logs(self, logs, colorize=True, http=False, non_http=False, force_colorize=None): + """ + Parse, filter and print logs to the console. + + """ + + for log in logs: + timestamp = log['timestamp'] + message = log['message'] + if "START RequestId" in message: + continue + if "REPORT RequestId" in message: + continue + if "END RequestId" in message: + continue + + if not colorize and not force_colorize: + if http: + if self.is_http_log_entry(message.strip()): + print("[" + str(timestamp) + "] " + message.strip()) + elif non_http: + if not self.is_http_log_entry(message.strip()): + print("[" + str(timestamp) + "] " + message.strip()) + else: + print("[" + str(timestamp) + "] " + message.strip()) + else: + if http: + if self.is_http_log_entry(message.strip()): + click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) + elif non_http: + if not self.is_http_log_entry(message.strip()): + click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) + else: + click.echo(click.style("[", fg='cyan') + click.style(str(timestamp), bold=True) + click.style("]", fg='cyan') + self.colorize_log_entry(message.strip()), color=force_colorize) + + def is_http_log_entry(self, string): + """ + Determines if a log entry is an HTTP-formatted log string or not. + """ + # Debug event filter + if 'Zappa Event' in string: + return False + + # IP address filter + for token in string.replace('\t', ' ').split(' '): + try: + if (token.count('.') == 3 and token.replace('.', '').isnumeric()): + return True + except Exception: # pragma: no cover + pass + + return False + + def get_project_name(self): + return slugify.slugify(os.getcwd().split(os.sep)[-1])[:15] + + def colorize_log_entry(self, string): + """ + Apply various heuristics to return a colorized version of a string. + If these fail, simply return the string in plaintext. + """ + + final_string = string + try: + + # First, do stuff in square brackets + inside_squares = re.findall(r'\[([^]]*)\]', string) + for token in inside_squares: + if token in ['CRITICAL', 'ERROR', 'WARNING', 'DEBUG', 'INFO', 'NOTSET']: + final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, fg='cyan', bold=True) + click.style("]", fg='cyan')) + else: + final_string = final_string.replace('[' + token + ']', click.style("[", fg='cyan') + click.style(token, bold=True) + click.style("]", fg='cyan')) + + # Then do quoted strings + quotes = re.findall(r'"[^"]*"', string) + for token in quotes: + final_string = final_string.replace(token, click.style(token, fg="yellow")) + + # And UUIDs + for token in final_string.replace('\t', ' ').split(' '): + try: + if token.count('-') == 4 and token.replace('-', '').isalnum(): + final_string = final_string.replace(token, click.style(token, fg="magenta")) + except Exception: # pragma: no cover + pass + + # And IP addresses + try: + if token.count('.') == 3 and token.replace('.', '').isnumeric(): + final_string = final_string.replace(token, click.style(token, fg="red")) + except Exception: # pragma: no cover + pass + + # And status codes + try: + if token in ['200']: + final_string = final_string.replace(token, click.style(token, fg="green")) + if token in ['400', '401', '403', '404', '405', '500']: + final_string = final_string.replace(token, click.style(token, fg="red")) + except Exception: # pragma: no cover + pass + + # And Zappa Events + try: + if "Zappa Event:" in final_string: + final_string = final_string.replace("Zappa Event:", click.style("Zappa Event:", bold=True, fg="green")) + except Exception: # pragma: no cover + pass + + # And dates + for token in final_string.split('\t'): + try: + is_date = parser.parse(token) + final_string = final_string.replace(token, click.style(token, fg="green")) + except Exception: # pragma: no cover + pass + + final_string = final_string.replace('\t', ' ').replace(' ', ' ') + if final_string[0] != ' ': + final_string = ' ' + final_string + return final_string + except Exception as e: # pragma: no cover + return string + + def execute_prebuild_script(self): + """ + Parse and execute the prebuild_script from the zappa_settings. + + """ + + (pb_mod_path, pb_func) = self.prebuild_script.rsplit('.', 1) + + try: # Prefer prebuild script in working directory + if pb_mod_path.count('.') >= 1: # Prebuild script func is nested in a folder + (mod_folder_path, mod_name) = pb_mod_path.rsplit('.', 1) + mod_folder_path_fragments = mod_folder_path.split('.') + working_dir = os.path.join(os.getcwd(), *mod_folder_path_fragments) + else: + mod_name = pb_mod_path + working_dir = os.getcwd() + + working_dir_importer = pkgutil.get_importer(working_dir) + module_ = working_dir_importer.find_module(mod_name).load_module(mod_name) + + except (ImportError, AttributeError): + + try: # Prebuild func might be in virtualenv + module_ = importlib.import_module(pb_mod_path) + except ImportError: # pragma: no cover + raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( + "import prebuild script ", bold=True) + 'module: "{pb_mod_path}"'.format( + pb_mod_path=click.style(pb_mod_path, bold=True))) + + if not hasattr(module_, pb_func): # pragma: no cover + raise ClickException(click.style("Failed ", fg="red") + 'to ' + click.style( + "find prebuild script ", bold=True) + 'function: "{pb_func}" '.format( + pb_func=click.style(pb_func, bold=True)) + 'in module "{pb_mod_path}"'.format( + pb_mod_path=pb_mod_path)) + + prebuild_function = getattr(module_, pb_func) + prebuild_function() # Call the function + + def collision_warning(self, item): + """ + Given a string, print a warning if this could + collide with a Zappa core package module. + + Use for app functions and events. + """ + + namespace_collisions = [ + "zappa.", "wsgi.", "middleware.", "handler.", "util.", "letsencrypt.", "cli." + ] + for namespace_collision in namespace_collisions: + if item.startswith(namespace_collision): + click.echo(click.style("Warning!", fg="red", bold=True) + + " You may have a namespace collision between " + + click.style(item, bold=True) + + " and " + + click.style(namespace_collision, bold=True) + + "! You may want to rename that file.") + + def deploy_api_gateway(self, api_id): + cache_cluster_enabled = self.stage_config.get('cache_cluster_enabled', False) + cache_cluster_size = str(self.stage_config.get('cache_cluster_size', .5)) + endpoint_url = self.zappa.deploy_api_gateway( + api_id=api_id, + stage_name=self.api_stage, + cache_cluster_enabled=cache_cluster_enabled, + cache_cluster_size=cache_cluster_size, + cloudwatch_log_level=self.stage_config.get('cloudwatch_log_level', 'OFF'), + cloudwatch_data_trace=self.stage_config.get('cloudwatch_data_trace', False), + cloudwatch_metrics_enabled=self.stage_config.get('cloudwatch_metrics_enabled', False), + cache_cluster_ttl=self.stage_config.get('cache_cluster_ttl', 300), + cache_cluster_encrypted=self.stage_config.get('cache_cluster_encrypted', False) + ) + return endpoint_url + + def check_venv(self): + """ Ensure we're inside a virtualenv. """ + if self.vargs and self.vargs.get("no_venv"): + return + if self.zappa: + venv = self.zappa.get_current_venv() + else: + # Just for `init`, when we don't have settings yet. + venv = Zappa.get_current_venv() + if not venv: + raise ClickException( + click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + + "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan")) + + def silence(self): + """ + Route all stdout to null. + """ + + sys.stdout = open(os.devnull, 'w') + sys.stderr = open(os.devnull, 'w') + + def touch_endpoint(self, endpoint_url): + """ + Test the deployed endpoint with a GET request. + """ + + # Private APIGW endpoints most likely can't be reached by a deployer + # unless they're connected to the VPC by VPN. Instead of trying + # connect to the service, print a warning and let the user know + # to check it manually. + # See: https://github.com/Miserlou/Zappa/pull/1719#issuecomment-471341565 + if 'PRIVATE' in self.stage_config.get('endpoint_configuration', []): + print( + click.style("Warning!", fg="yellow", bold=True) + + " Since you're deploying a private API Gateway endpoint," + " Zappa cannot determine if your function is returning " + " a correct status code. You should check your API's response" + " manually before considering this deployment complete." + ) + return + + touch_path = self.stage_config.get('touch_path', '/') + req = requests.get(endpoint_url + touch_path) + + # Sometimes on really large packages, it can take 60-90 secs to be + # ready and requests will return 504 status_code until ready. + # So, if we get a 504 status code, rerun the request up to 4 times or + # until we don't get a 504 error + if req.status_code == 504: + i = 0 + status_code = 504 + while status_code == 504 and i <= 4: + req = requests.get(endpoint_url + touch_path) + status_code = req.status_code + i += 1 + + if req.status_code >= 500: + raise ClickException(click.style("Warning!", fg="red", bold=True) + + " Status check on the deployed lambda failed." + + " A GET request to '" + touch_path + "' yielded a " + + click.style(str(req.status_code), fg="red", bold=True) + " response code.") + +#################################################################### +# Main +#################################################################### + +def shamelessly_promote(): + """ + Shamelessly promote our little community. + """ + + click.echo("Need " + click.style("help", fg='green', bold=True) + + "? Found a " + click.style("bug", fg='green', bold=True) + + "? Let us " + click.style("know", fg='green', bold=True) + "! :D") + click.echo("File bug reports on " + click.style("GitHub", bold=True) + " here: " + + click.style("https://github.com/Miserlou/Zappa", fg='cyan', bold=True)) + click.echo("And join our " + click.style("Slack", bold=True) + " channel here: " + + click.style("https://zappateam.slack.com", fg='cyan', bold=True)) + click.echo("Love!,") + click.echo(" ~ Team " + click.style("Zappa", bold=True) + "!") + +def disable_click_colors(): + """ + Set a Click context where colors are disabled. Creates a throwaway BaseCommand + to play nicely with the Context constructor. + The intended side-effect here is that click.echo() checks this context and will + suppress colors. + https://github.com/pallets/click/blob/e1aa43a3/click/globals.py#L39 + """ + + ctx = Context(BaseCommand('AllYourBaseAreBelongToUs')) + ctx.color = False + push_context(ctx) + +def handle(): # pragma: no cover + """ + Main program execution handler. + """ + + try: + cli = ZappaCLI() + sys.exit(cli.handle()) + except SystemExit as e: # pragma: no cover + cli.on_exit() + sys.exit(e.code) + + except KeyboardInterrupt: # pragma: no cover + cli.on_exit() + sys.exit(130) + except Exception as e: + cli.on_exit() + + click.echo("Oh no! An " + click.style("error occurred", fg='red', bold=True) + "! :(") + click.echo("\n==============\n") + import traceback + traceback.print_exc() + click.echo("\n==============\n") + shamelessly_promote() + + sys.exit(-1) + +if __name__ == '__main__': # pragma: no cover + handle() diff --git a/zappa/handler.py b/zappa/handler.py new file mode 100644 index 000000000..78a31cfda --- /dev/null +++ b/zappa/handler.py @@ -0,0 +1,615 @@ +import base64 +import boto3 +import collections +import datetime +import importlib +import inspect +import json +import logging +import os +import sys +import traceback +import tarfile + +from builtins import str +from werkzeug.wrappers import Response + +# This file may be copied into a project's root, +# so handle both scenarios. +try: + from zappa.middleware import ZappaWSGIMiddleware + from zappa.wsgi import create_wsgi_request, common_log + from zappa.utilities import merge_headers, parse_s3_url +except ImportError as e: # pragma: no cover + from .middleware import ZappaWSGIMiddleware + from .wsgi import create_wsgi_request, common_log + from .utilities import merge_headers, parse_s3_url + + +# Set up logging +logging.basicConfig() +logger = logging.getLogger() +logger.setLevel(logging.INFO) + + +class LambdaHandler: + """ + Singleton for avoiding duplicate setup. + + Pattern provided by @benbangert. + """ + + __instance = None + settings = None + settings_name = None + session = None + + # Application + app_module = None + wsgi_app = None + trailing_slash = False + + def __new__(cls, settings_name="zappa_settings", session=None): + """Singleton instance to avoid repeat setup""" + if LambdaHandler.__instance is None: + print("Instancing..") + LambdaHandler.__instance = object.__new__(cls) + return LambdaHandler.__instance + + def __init__(self, settings_name="zappa_settings", session=None): + + # We haven't cached our settings yet, load the settings and app. + if not self.settings: + # Loading settings from a python module + self.settings = importlib.import_module(settings_name) + self.settings_name = settings_name + self.session = session + + # Custom log level + if self.settings.LOG_LEVEL: + level = logging.getLevelName(self.settings.LOG_LEVEL) + logger.setLevel(level) + + remote_env = getattr(self.settings, 'REMOTE_ENV', None) + remote_bucket, remote_file = parse_s3_url(remote_env) + + if remote_bucket and remote_file: + self.load_remote_settings(remote_bucket, remote_file) + + # Let the system know that this will be a Lambda/Zappa/Stack + os.environ["SERVERTYPE"] = "AWS Lambda" + os.environ["FRAMEWORK"] = "Zappa" + try: + os.environ["PROJECT"] = self.settings.PROJECT_NAME + os.environ["STAGE"] = self.settings.API_STAGE + except Exception: # pragma: no cover + pass + + # Set any locally defined env vars + # Environment variable keys can't be Unicode + # https://github.com/Miserlou/Zappa/issues/604 + for key in self.settings.ENVIRONMENT_VARIABLES.keys(): + os.environ[str(key)] = self.settings.ENVIRONMENT_VARIABLES[key] + + # Pulling from S3 if given a zip path + project_archive_path = getattr(self.settings, 'ARCHIVE_PATH', None) + if project_archive_path: + self.load_remote_project_archive(project_archive_path) + + + # Load compiled library to the PythonPath + # checks if we are the slim_handler since this is not needed otherwise + # https://github.com/Miserlou/Zappa/issues/776 + is_slim_handler = getattr(self.settings, 'SLIM_HANDLER', False) + if is_slim_handler: + included_libraries = getattr(self.settings, 'INCLUDE', ['libmysqlclient.so.18']) + try: + from ctypes import cdll, util + for library in included_libraries: + try: + cdll.LoadLibrary(os.path.join(os.getcwd(), library)) + except OSError: + print("Failed to find library: {}...right filename?".format(library)) + except ImportError: + print ("Failed to import cytpes library") + + # This is a non-WSGI application + # https://github.com/Miserlou/Zappa/pull/748 + if not hasattr(self.settings, 'APP_MODULE') and not self.settings.DJANGO_SETTINGS: + self.app_module = None + wsgi_app_function = None + # This is probably a normal WSGI app (Or django with overloaded wsgi application) + # https://github.com/Miserlou/Zappa/issues/1164 + elif hasattr(self.settings, 'APP_MODULE'): + if self.settings.DJANGO_SETTINGS: + sys.path.append('/var/task') + from django.conf import ENVIRONMENT_VARIABLE as SETTINGS_ENVIRONMENT_VARIABLE + # add the Lambda root path into the sys.path + self.trailing_slash = True + os.environ[SETTINGS_ENVIRONMENT_VARIABLE] = self.settings.DJANGO_SETTINGS + else: + self.trailing_slash = False + + # The app module + self.app_module = importlib.import_module(self.settings.APP_MODULE) + + # The application + wsgi_app_function = getattr(self.app_module, self.settings.APP_FUNCTION) + # Django gets special treatment. + else: + try: # Support both for tests + from zappa.ext.django_zappa import get_django_wsgi + except ImportError: # pragma: no cover + from django_zappa_app import get_django_wsgi + + # Get the Django WSGI app from our extension + wsgi_app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) + self.trailing_slash = True + + self.wsgi_app = ZappaWSGIMiddleware(wsgi_app_function) + + def load_remote_project_archive(self, project_zip_path): + """ + Puts the project files from S3 in /tmp and adds to path + """ + project_folder = '/tmp/{0!s}'.format(self.settings.PROJECT_NAME) + if not os.path.isdir(project_folder): + # The project folder doesn't exist in this cold lambda, get it from S3 + if not self.session: + boto_session = boto3.Session() + else: + boto_session = self.session + + # Download zip file from S3 + remote_bucket, remote_file = parse_s3_url(project_zip_path) + s3 = boto_session.resource('s3') + archive_on_s3 = s3.Object(remote_bucket, remote_file).get() + + with tarfile.open(fileobj=archive_on_s3['Body'], mode="r|gz") as t: + t.extractall(project_folder) + + # Add to project path + sys.path.insert(0, project_folder) + + # Change working directory to project folder + # Related: https://github.com/Miserlou/Zappa/issues/702 + os.chdir(project_folder) + return True + + def load_remote_settings(self, remote_bucket, remote_file): + """ + Attempt to read a file from s3 containing a flat json object. Adds each + key->value pair as environment variables. Helpful for keeping + sensitiZve or stage-specific configuration variables in s3 instead of + version control. + """ + if not self.session: + boto_session = boto3.Session() + else: + boto_session = self.session + + s3 = boto_session.resource('s3') + try: + remote_env_object = s3.Object(remote_bucket, remote_file).get() + except Exception as e: # pragma: no cover + # catch everything aws might decide to raise + print('Could not load remote settings file.', e) + return + + try: + content = remote_env_object['Body'].read() + except Exception as e: # pragma: no cover + # catch everything aws might decide to raise + print('Exception while reading remote settings file.', e) + return + + try: + settings_dict = json.loads(content) + except (ValueError, TypeError): # pragma: no cover + print('Failed to parse remote settings!') + return + + # add each key-value to environment - overwrites existing keys! + for key, value in settings_dict.items(): + if self.settings.LOG_LEVEL == "DEBUG": + print('Adding {} -> {} to environment'.format( + key, + value + )) + # Environment variable keys can't be Unicode + # https://github.com/Miserlou/Zappa/issues/604 + try: + os.environ[str(key)] = value + except Exception: + if self.settings.LOG_LEVEL == "DEBUG": + print("Environment variable keys must be non-unicode!") + + @staticmethod + def import_module_and_get_function(whole_function): + """ + Given a modular path to a function, import that module + and return the function. + """ + module, function = whole_function.rsplit('.', 1) + app_module = importlib.import_module(module) + app_function = getattr(app_module, function) + return app_function + + @classmethod + def lambda_handler(cls, event, context): # pragma: no cover + handler = cls() + exception_handler = handler.settings.EXCEPTION_HANDLER + try: + return handler.handler(event, context) + except Exception as ex: + exception_processed = cls._process_exception(exception_handler=exception_handler, + event=event, context=context, exception=ex) + if not exception_processed: + # Only re-raise exception if handler directed so. Allows handler to control if lambda has to retry + # an event execution in case of failure. + raise + + @classmethod + def _process_exception(cls, exception_handler, event, context, exception): + exception_processed = False + if exception_handler: + try: + handler_function = cls.import_module_and_get_function(exception_handler) + exception_processed = handler_function(exception, event, context) + except Exception as cex: + logger.error(msg='Failed to process exception via custom handler.') + print(cex) + return exception_processed + + @staticmethod + def run_function(app_function, event, context): + """ + Given a function and event context, + detect signature and execute, returning any result. + """ + # getargspec does not support python 3 method with type hints + # Related issue: https://github.com/Miserlou/Zappa/issues/1452 + if hasattr(inspect, "getfullargspec"): # Python 3 + args, varargs, keywords, defaults, _, _, _ = inspect.getfullargspec(app_function) + else: # Python 2 + args, varargs, keywords, defaults = inspect.getargspec(app_function) + num_args = len(args) + if num_args == 0: + result = app_function(event, context) if varargs else app_function() + elif num_args == 1: + result = app_function(event, context) if varargs else app_function(event) + elif num_args == 2: + result = app_function(event, context) + else: + raise RuntimeError("Function signature is invalid. Expected a function that accepts at most " + "2 arguments or varargs.") + return result + + def get_function_for_aws_event(self, record): + """ + Get the associated function to execute for a triggered AWS event + + Support S3, SNS, DynamoDB, kinesis and SQS events + """ + if 's3' in record: + if ':' in record['s3']['configurationId']: + return record['s3']['configurationId'].split(':')[-1] + + arn = None + if 'Sns' in record: + try: + message = json.loads(record['Sns']['Message']) + if message.get('command'): + return message['command'] + except ValueError: + pass + arn = record['Sns'].get('TopicArn') + elif 'dynamodb' in record or 'kinesis' in record: + arn = record.get('eventSourceARN') + elif 'eventSource' in record and record.get('eventSource') == 'aws:sqs': + arn = record.get('eventSourceARN') + elif 's3' in record: + arn = record['s3']['bucket']['arn'] + + if arn: + return self.settings.AWS_EVENT_MAPPING.get(arn) + + return None + + def get_function_from_bot_intent_trigger(self, event): + """ + For the given event build ARN and return the configured function + """ + intent = event.get('currentIntent') + if intent: + intent = intent.get('name') + if intent: + return self.settings.AWS_BOT_EVENT_MAPPING.get( + "{}:{}".format(intent, event.get('invocationSource')) + ) + + def get_function_for_cognito_trigger(self, trigger): + """ + Get the associated function to execute for a cognito trigger + """ + print("get_function_for_cognito_trigger", self.settings.COGNITO_TRIGGER_MAPPING, trigger, self.settings.COGNITO_TRIGGER_MAPPING.get(trigger)) + return self.settings.COGNITO_TRIGGER_MAPPING.get(trigger) + + def handler(self, event, context): + """ + An AWS Lambda function which parses specific API Gateway input into a + WSGI request, feeds it to our WSGI app, processes the response, and returns + that back to the API Gateway. + + """ + settings = self.settings + + # If in DEBUG mode, log all raw incoming events. + if settings.DEBUG: + logger.debug('Zappa Event: {}'.format(event)) + + # Set any API Gateway defined Stage Variables + # as env vars + if event.get('stageVariables'): + for key in event['stageVariables'].keys(): + os.environ[str(key)] = event['stageVariables'][key] + + # This is the result of a keep alive, recertify + # or scheduled event. + if event.get('detail-type') == 'Scheduled Event': + + whole_function = event['resources'][0].split('/')[-1].split('-')[-1] + + # This is a scheduled function. + if '.' in whole_function: + app_function = self.import_module_and_get_function(whole_function) + + # Execute the function! + return self.run_function(app_function, event, context) + + # Else, let this execute as it were. + + # This is a direct command invocation. + elif event.get('command', None): + + whole_function = event['command'] + app_function = self.import_module_and_get_function(whole_function) + result = self.run_function(app_function, event, context) + print("Result of %s:" % whole_function) + print(result) + return result + + # This is a direct, raw python invocation. + # It's _extremely_ important we don't allow this event source + # to be overridden by unsanitized, non-admin user input. + elif event.get('raw_command', None): + + raw_command = event['raw_command'] + exec(raw_command) + return + + # This is a Django management command invocation. + elif event.get('manage', None): + + from django.core import management + + try: # Support both for tests + from zappa.ext.django_zappa import get_django_wsgi + except ImportError as e: # pragma: no cover + from django_zappa_app import get_django_wsgi + + # Get the Django WSGI app from our extension + # We don't actually need the function, + # but we do need to do all of the required setup for it. + app_function = get_django_wsgi(self.settings.DJANGO_SETTINGS) + + # Couldn't figure out how to get the value into stdout with StringIO.. + # Read the log for now. :[] + management.call_command(*event['manage'].split(' ')) + return {} + + # This is an AWS-event triggered invocation. + elif event.get('Records', None): + + records = event.get('Records') + result = None + whole_function = self.get_function_for_aws_event(records[0]) + if whole_function: + app_function = self.import_module_and_get_function(whole_function) + result = self.run_function(app_function, event, context) + logger.debug(result) + else: + logger.error("Cannot find a function to process the triggered event.") + return result + + # this is an AWS-event triggered from Lex bot's intent + elif event.get('bot'): + result = None + whole_function = self.get_function_from_bot_intent_trigger(event) + if whole_function: + app_function = self.import_module_and_get_function(whole_function) + result = self.run_function(app_function, event, context) + logger.debug(result) + else: + logger.error("Cannot find a function to process the triggered event.") + return result + + # This is an API Gateway authorizer event + elif event.get('type') == 'TOKEN': + whole_function = self.settings.AUTHORIZER_FUNCTION + if whole_function: + app_function = self.import_module_and_get_function(whole_function) + policy = self.run_function(app_function, event, context) + return policy + else: + logger.error("Cannot find a function to process the authorization request.") + raise Exception('Unauthorized') + + # This is an AWS Cognito Trigger Event + elif event.get('triggerSource', None): + triggerSource = event.get('triggerSource') + whole_function = self.get_function_for_cognito_trigger(triggerSource) + result = event + if whole_function: + app_function = self.import_module_and_get_function(whole_function) + result = self.run_function(app_function, event, context) + logger.debug(result) + else: + logger.error("Cannot find a function to handle cognito trigger {}".format(triggerSource)) + return result + + # This is a CloudWatch event + # Related: https://github.com/Miserlou/Zappa/issues/1924 + elif event.get('awslogs', None): + result = None + whole_function = '{}.{}'.format(settings.APP_MODULE, settings.APP_FUNCTION) + app_function = self.import_module_and_get_function(whole_function) + if app_function: + result = self.run_function(app_function, event, context) + logger.debug("Result of %s:" % whole_function) + logger.debug(result) + else: + logger.error("Cannot find a function to process the triggered event.") + return result + + # Normal web app flow + try: + # Timing + time_start = datetime.datetime.now() + + # This is a normal HTTP request + if event.get('httpMethod', None): + script_name = '' + is_elb_context = False + headers = merge_headers(event) + if event.get('requestContext', None) and event['requestContext'].get('elb', None): + # Related: https://github.com/Miserlou/Zappa/issues/1715 + # inputs/outputs for lambda loadbalancer + # https://docs.aws.amazon.com/elasticloadbalancing/latest/application/lambda-functions.html + is_elb_context = True + # host is lower-case when forwarded from ELB + host = headers.get('host') + # TODO: pathParameters is a first-class citizen in apigateway but not available without + # some parsing work for ELB (is this parameter used for anything?) + event['pathParameters'] = '' + else: + if headers: + host = headers.get('Host') + else: + host = None + logger.debug('host found: [{}]'.format(host)) + + if host: + if 'amazonaws.com' in host: + logger.debug('amazonaws found in host') + # The path provided in th event doesn't include the + # stage, so we must tell Flask to include the API + # stage in the url it calculates. See https://github.com/Miserlou/Zappa/issues/1014 + script_name = '/' + settings.API_STAGE + else: + # This is a test request sent from the AWS console + if settings.DOMAIN: + # Assume the requests received will be on the specified + # domain. No special handling is required + pass + else: + # Assume the requests received will be to the + # amazonaws.com endpoint, so tell Flask to include the + # API stage + script_name = '/' + settings.API_STAGE + + base_path = getattr(settings, 'BASE_PATH', None) + + # Create the environment for WSGI and handle the request + environ = create_wsgi_request( + event, + script_name=script_name, + base_path=base_path, + trailing_slash=self.trailing_slash, + binary_support=settings.BINARY_SUPPORT, + context_header_mappings=settings.CONTEXT_HEADER_MAPPINGS + ) + + # We are always on https on Lambda, so tell our wsgi app that. + environ['HTTPS'] = 'on' + environ['wsgi.url_scheme'] = 'https' + environ['lambda.context'] = context + environ['lambda.event'] = event + + # Execute the application + with Response.from_app(self.wsgi_app, environ) as response: + # This is the object we're going to return. + # Pack the WSGI response into our special dictionary. + zappa_returndict = dict() + + # Issue #1715: ALB support. ALB responses must always include + # base64 encoding and status description + if is_elb_context: + zappa_returndict.setdefault('isBase64Encoded', False) + zappa_returndict.setdefault('statusDescription', response.status) + + if response.data: + if settings.BINARY_SUPPORT and \ + not response.mimetype.startswith("text/") \ + and response.mimetype != "application/json": + zappa_returndict['body'] = base64.b64encode(response.data).decode('utf-8') + zappa_returndict["isBase64Encoded"] = True + else: + zappa_returndict['body'] = response.get_data(as_text=True) + + zappa_returndict['statusCode'] = response.status_code + if 'headers' in event: + zappa_returndict['headers'] = {} + for key, value in response.headers: + zappa_returndict['headers'][key] = value + if 'multiValueHeaders' in event: + zappa_returndict['multiValueHeaders'] = {} + for key, value in response.headers: + zappa_returndict['multiValueHeaders'][key] = response.headers.getlist(key) + + # Calculate the total response time, + # and log it in the Common Log format. + time_end = datetime.datetime.now() + delta = time_end - time_start + response_time_ms = delta.total_seconds() * 1000 + response.content = response.data + common_log(environ, response, response_time=response_time_ms) + + return zappa_returndict + except Exception as e: # pragma: no cover + # Print statements are visible in the logs either way + print(e) + exc_info = sys.exc_info() + message = ('An uncaught exception happened while servicing this request. ' + 'You can investigate this with the `zappa tail` command.') + + # If we didn't even build an app_module, just raise. + if not settings.DJANGO_SETTINGS: + try: + self.app_module + except NameError as ne: + message = 'Failed to import module: {}'.format(ne.message) + + # Call exception handler for unhandled exceptions + exception_handler = self.settings.EXCEPTION_HANDLER + self._process_exception(exception_handler=exception_handler, + event=event, context=context, exception=e) + + # Return this unspecified exception as a 500, using template that API Gateway expects. + content = collections.OrderedDict() + content['statusCode'] = 500 + body = {'message': message} + if settings.DEBUG: # only include traceback if debug is on. + body['traceback'] = traceback.format_exception(*exc_info) # traceback as a list for readability. + content['body'] = json.dumps(str(body), sort_keys=True, indent=4) + return content + + +def lambda_handler(event, context): # pragma: no cover + return LambdaHandler.lambda_handler(event, context) + + +def keep_warm_callback(event, context): + """Method is triggered by the CloudWatch event scheduled when keep_warm setting is set to true.""" + lambda_handler(event={}, context=context) # overriding event with an empty one so that web app initialization will + # be triggered. diff --git a/zappa/utilities.py b/zappa/utilities.py new file mode 100644 index 000000000..49c3b3351 --- /dev/null +++ b/zappa/utilities.py @@ -0,0 +1,583 @@ +import botocore +import calendar +import datetime +import durationpy +import fnmatch +import io +import json +import logging +import os +import re +import shutil +import stat +import sys + +from past.builtins import basestring + +from urllib.parse import urlparse + +LOG = logging.getLogger(__name__) + +## +# Settings / Packaging +## + +def copytree(src, dst, metadata=True, symlinks=False, ignore=None): + """ + This is a contributed re-implementation of 'copytree' that + should work with the exact same behavior on multiple platforms. + + When `metadata` is False, file metadata such as permissions and modification + times are not copied. + """ + + def copy_file(src, dst, item): + s = os.path.join(src, item) + d = os.path.join(dst, item) + + if symlinks and os.path.islink(s): # pragma: no cover + if os.path.lexists(d): + os.remove(d) + os.symlink(os.readlink(s), d) + if metadata: + try: + st = os.lstat(s) + mode = stat.S_IMODE(st.st_mode) + os.lchmod(d, mode) + except: + pass # lchmod not available + elif os.path.isdir(s): + copytree(s, d, metadata, symlinks, ignore) + else: + shutil.copy2(s, d) if metadata else shutil.copy(s, d) + try: + lst = os.listdir(src) + if not os.path.exists(dst): + os.makedirs(dst) + if metadata: + shutil.copystat(src, dst) + except NotADirectoryError: # egg-link files + copy_file(os.path.dirname(src), os.path.dirname(dst), os.path.basename(src)) + return + + if ignore: + excl = ignore(src, lst) + lst = [x for x in lst if x not in excl] + + for item in lst: + copy_file(src, dst, item) + + +def parse_s3_url(url): + """ + Parses S3 URL. + + Returns bucket (domain) and file (full path). + """ + bucket = '' + path = '' + if url: + result = urlparse(url) + bucket = result.netloc + path = result.path.strip('/') + return bucket, path + +def human_size(num, suffix='B'): + """ + Convert bytes length to a human-readable version + """ + for unit in ('', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi'): + if abs(num) < 1024.0: + return "{0:3.1f}{1!s}{2!s}".format(num, unit, suffix) + num /= 1024.0 + return "{0:.1f}{1!s}{2!s}".format(num, 'Yi', suffix) + +def string_to_timestamp(timestring): + """ + Accepts a str, returns an int timestamp. + """ + + ts = None + + # Uses an extended version of Go's duration string. + try: + delta = durationpy.from_str(timestring); + past = datetime.datetime.utcnow() - delta + ts = calendar.timegm(past.timetuple()) + return ts + except Exception as e: + pass + + if ts: + return ts + # else: + # print("Unable to parse timestring.") + return 0 + +## +# `init` related +## + +def detect_django_settings(): + """ + Automatically try to discover Django settings files, + return them as relative module paths. + """ + + matches = [] + for root, dirnames, filenames in os.walk(os.getcwd()): + for filename in fnmatch.filter(filenames, '*settings.py'): + full = os.path.join(root, filename) + if 'site-packages' in full: + continue + full = os.path.join(root, filename) + package_path = full.replace(os.getcwd(), '') + package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') + + matches.append(package_module) + return matches + +def detect_flask_apps(): + """ + Automatically try to discover Flask apps files, + return them as relative module paths. + """ + + matches = [] + for root, dirnames, filenames in os.walk(os.getcwd()): + for filename in fnmatch.filter(filenames, '*.py'): + full = os.path.join(root, filename) + if 'site-packages' in full: + continue + + full = os.path.join(root, filename) + + with io.open(full, 'r', encoding='utf-8') as f: + lines = f.readlines() + for line in lines: + app = None + + # Kind of janky.. + if '= Flask(' in line: + app = line.split('= Flask(')[0].strip() + if '=Flask(' in line: + app = line.split('=Flask(')[0].strip() + + if not app: + continue + + package_path = full.replace(os.getcwd(), '') + package_module = package_path.replace(os.sep, '.').split('.', 1)[1].replace('.py', '') + app_module = package_module + '.' + app + + matches.append(app_module) + + return matches + +def get_venv_from_python_version(): + return 'python{}.{}'.format(*sys.version_info) + +def get_runtime_from_python_version(): + """ + """ + if sys.version_info[0] < 3: + raise ValueError("Python 2.x is no longer supported.") + else: + if sys.version_info[1] <= 6: + return 'python3.6' + elif sys.version_info[1] <= 7: + return 'python3.7' + else: + return 'python3.8' + +## +# Async Tasks +## + +def get_topic_name(lambda_name): + """ Topic name generation """ + return '%s-zappa-async' % lambda_name + +## +# Event sources / Kappa +## + +def get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): + """ + + Given an event_source dictionary item, a session and a lambda_arn, + hack into Kappa's Gibson, create out an object we can call + to schedule this event, and return the event source. + + """ + import kappa.function + import kappa.restapi + import kappa.event_source.base + import kappa.event_source.dynamodb_stream + import kappa.event_source.kinesis + import kappa.event_source.s3 + import kappa.event_source.sns + import kappa.event_source.cloudwatch + import kappa.policy + import kappa.role + import kappa.awsclient + + class PseudoContext: + def __init__(self): + return + + class PseudoFunction: + def __init__(self): + return + + # Mostly adapted from kappa - will probably be replaced by kappa support + class SqsEventSource(kappa.event_source.base.EventSource): + + def __init__(self, context, config): + super().__init__(context, config) + self._lambda = kappa.awsclient.create_client( + 'lambda', context.session) + + def _get_uuid(self, function): + uuid = None + response = self._lambda.call( + 'list_event_source_mappings', + FunctionName=function.name, + EventSourceArn=self.arn) + LOG.debug(response) + if len(response['EventSourceMappings']) > 0: + uuid = response['EventSourceMappings'][0]['UUID'] + return uuid + + def add(self, function): + try: + response = self._lambda.call( + 'create_event_source_mapping', + FunctionName=function.name, + EventSourceArn=self.arn, + BatchSize=self.batch_size, + Enabled=self.enabled + ) + LOG.debug(response) + except Exception: + LOG.exception('Unable to add event source') + + def enable(self, function): + self._config['enabled'] = True + try: + response = self._lambda.call( + 'update_event_source_mapping', + UUID=self._get_uuid(function), + Enabled=self.enabled + ) + LOG.debug(response) + except Exception: + LOG.exception('Unable to enable event source') + + def disable(self, function): + self._config['enabled'] = False + try: + response = self._lambda.call( + 'update_event_source_mapping', + FunctionName=function.name, + Enabled=self.enabled + ) + LOG.debug(response) + except Exception: + LOG.exception('Unable to disable event source') + + def update(self, function): + response = None + uuid = self._get_uuid(function) + if uuid: + try: + response = self._lambda.call( + 'update_event_source_mapping', + BatchSize=self.batch_size, + Enabled=self.enabled, + FunctionName=function.arn) + LOG.debug(response) + except Exception: + LOG.exception('Unable to update event source') + + def remove(self, function): + response = None + uuid = self._get_uuid(function) + if uuid: + response = self._lambda.call( + 'delete_event_source_mapping', + UUID=uuid) + LOG.debug(response) + return response + + def status(self, function): + response = None + LOG.debug('getting status for event source %s', self.arn) + uuid = self._get_uuid(function) + if uuid: + try: + response = self._lambda.call( + 'get_event_source_mapping', + UUID=self._get_uuid(function)) + LOG.debug(response) + except botocore.exceptions.ClientError: + LOG.debug('event source %s does not exist', self.arn) + response = None + else: + LOG.debug('No UUID for event source %s', self.arn) + return response + + class ExtendedSnsEventSource(kappa.event_source.sns.SNSEventSource): + @property + def filters(self): + return self._config.get('filters') + + def add_filters(self, function): + try: + subscription = self.exists(function) + if subscription: + response = self._sns.call( + 'set_subscription_attributes', + SubscriptionArn=subscription['SubscriptionArn'], + AttributeName='FilterPolicy', + AttributeValue=json.dumps(self.filters) + ) + kappa.event_source.sns.LOG.debug(response) + except Exception: + kappa.event_source.sns.LOG.exception('Unable to add filters for SNS topic %s', self.arn) + + def add(self, function): + super().add(function) + if self.filters: + self.add_filters(function) + + event_source_map = { + 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, + 'kinesis': kappa.event_source.kinesis.KinesisEventSource, + 's3': kappa.event_source.s3.S3EventSource, + 'sns': ExtendedSnsEventSource, + 'sqs': SqsEventSource, + 'events': kappa.event_source.cloudwatch.CloudWatchEventSource + } + + arn = event_source['arn'] + _, _, svc, _ = arn.split(':', 3) + + event_source_func = event_source_map.get(svc, None) + if not event_source_func: + raise ValueError('Unknown event source: {0}'.format(arn)) + + def autoreturn(self, function_name): + return function_name + + event_source_func._make_notification_id = autoreturn + + ctx = PseudoContext() + ctx.session = boto_session + + funk = PseudoFunction() + funk.name = lambda_arn + + # Kappa 0.6.0 requires this nasty hacking, + # hopefully we can remove at least some of this soon. + # Kappa 0.7.0 introduces a whole host over other changes we don't + # really want, so we're stuck here for a little while. + + # Related: https://github.com/Miserlou/Zappa/issues/684 + # https://github.com/Miserlou/Zappa/issues/688 + # https://github.com/Miserlou/Zappa/commit/3216f7e5149e76921ecdf9451167846b95616313 + if svc == 's3': + split_arn = lambda_arn.split(':') + arn_front = ':'.join(split_arn[:-1]) + arn_back = split_arn[-1] + ctx.environment = arn_back + funk.arn = arn_front + funk.name = ':'.join([arn_back, target_function]) + else: + funk.arn = lambda_arn + + funk._context = ctx + + event_source_obj = event_source_func(ctx, event_source) + + return event_source_obj, ctx, funk + +def add_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): + """ + Given an event_source dictionary, create the object and add the event source. + """ + + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) + # TODO: Detect changes in config and refine exists algorithm + if not dry: + if not event_source_obj.status(funk): + event_source_obj.add(funk) + return 'successful' if event_source_obj.status(funk) else 'failed' + else: + return 'exists' + + return 'dryrun' + +def remove_event_source(event_source, lambda_arn, target_function, boto_session, dry=False): + """ + Given an event_source dictionary, create the object and remove the event source. + """ + + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) + + # This is slightly dirty, but necessary for using Kappa this way. + funk.arn = lambda_arn + if not dry: + rule_response = event_source_obj.remove(funk) + return rule_response + else: + return event_source_obj + +def get_event_source_status(event_source, lambda_arn, target_function, boto_session, dry=False): + """ + Given an event_source dictionary, create the object and get the event source status. + """ + + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) + return event_source_obj.status(funk) + +## +# Analytics / Surveillance / Nagging +## + +def check_new_version_available(this_version): + """ + Checks if a newer version of Zappa is available. + + Returns True is updateable, else False. + + """ + import requests + + pypi_url = 'https://pypi.org/pypi/Zappa/json' + resp = requests.get(pypi_url, timeout=1.5) + top_version = resp.json()['info']['version'] + + return this_version != top_version + + +class InvalidAwsLambdaName(Exception): + """Exception: proposed AWS Lambda name is invalid""" + pass + + +def validate_name(name, maxlen=80): + """Validate name for AWS Lambda function. + name: actual name (without `arn:aws:lambda:...:` prefix and without + `:$LATEST`, alias or version suffix. + maxlen: max allowed length for name without prefix and suffix. + + The value 80 was calculated from prefix with longest known region name + and assuming that no alias or version would be longer than `$LATEST`. + + Based on AWS Lambda spec + http://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html + + Return: the name + Raise: InvalidAwsLambdaName, if the name is invalid. + """ + if not isinstance(name, basestring): + msg = "Name must be of type string" + raise InvalidAwsLambdaName(msg) + if len(name) > maxlen: + msg = "Name is longer than {maxlen} characters." + raise InvalidAwsLambdaName(msg.format(maxlen=maxlen)) + if len(name) == 0: + msg = "Name must not be empty string." + raise InvalidAwsLambdaName(msg) + if not re.match("^[a-zA-Z0-9-_]+$", name): + msg = "Name can only contain characters from a-z, A-Z, 0-9, _ and -" + raise InvalidAwsLambdaName(msg) + return name + + +def contains_python_files_or_subdirs(folder): + """ + Checks (recursively) if the directory contains .py or .pyc files + """ + for root, dirs, files in os.walk(folder): + if [filename for filename in files if filename.endswith('.py') or filename.endswith('.pyc')]: + return True + + for d in dirs: + for _, subdirs, subfiles in os.walk(d): + if [filename for filename in subfiles if filename.endswith('.py') or filename.endswith('.pyc')]: + return True + + return False + + +def conflicts_with_a_neighbouring_module(directory_path): + """ + Checks if a directory lies in the same directory as a .py file with the same name. + """ + parent_dir_path, current_dir_name = os.path.split(os.path.normpath(directory_path)) + neighbours = os.listdir(parent_dir_path) + conflicting_neighbour_filename = current_dir_name+'.py' + return conflicting_neighbour_filename in neighbours + + +# https://github.com/Miserlou/Zappa/issues/1188 +def titlecase_keys(d): + """ + Takes a dict with keys of type str and returns a new dict with all keys titlecased. + """ + return {k.title(): v for k, v in d.items()} + + +# https://github.com/Miserlou/Zappa/issues/1688 +def is_valid_bucket_name(name): + """ + Checks if an S3 bucket name is valid according to https://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html#bucketnamingrules + """ + # Bucket names must be at least 3 and no more than 63 characters long. + if (len(name) < 3 or len(name) > 63): + return False + # Bucket names must not contain uppercase characters or underscores. + if (any(x.isupper() for x in name)): + return False + if "_" in name: + return False + # Bucket names must start with a lowercase letter or number. + if not (name[0].islower() or name[0].isdigit()): + return False + # Bucket names must be a series of one or more labels. Adjacent labels are separated by a single period (.). + for label in name.split("."): + # Each label must start and end with a lowercase letter or a number. + if len(label) < 1: + return False + if not (label[0].islower() or label[0].isdigit()): + return False + if not (label[-1].islower() or label[-1].isdigit()): + return False + # Bucket names must not be formatted as an IP address (for example, 192.168.5.4). + looks_like_IP = True + for label in name.split("."): + if not label.isdigit(): + looks_like_IP = False + break + if looks_like_IP: + return False + + return True + + +def merge_headers(event): + """ + Merge the values of headers and multiValueHeaders into a single dict. + Opens up support for multivalue headers via API Gateway and ALB. + See: https://github.com/Miserlou/Zappa/pull/1756 + """ + headers = event.get('headers') or {} + multi_headers = (event.get('multiValueHeaders') or {}).copy() + for h in set(headers.keys()): + if h not in multi_headers: + multi_headers[h] = [headers[h]] + for h in multi_headers.keys(): + multi_headers[h] = ', '.join(multi_headers[h]) + return multi_headers From 23df4378d8530c17b033563d39f6d8e44f238063 Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Mon, 22 Jun 2020 17:40:43 -0500 Subject: [PATCH 20/27] working for multiple stages --- zappa/utilities.py | 85 +++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/zappa/utilities.py b/zappa/utilities.py index 49c3b3351..5df3facd0 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -210,6 +210,9 @@ def get_event_source(event_source, lambda_arn, target_function, boto_session, dr to schedule this event, and return the event source. """ + + # TODO: Jerry - fix for S3 + import kappa.function import kappa.restapi import kappa.event_source.base @@ -351,10 +354,84 @@ def add(self, function): if self.filters: self.add_filters(function) + # reimplementing this entire class rather than extending the Kappa version because only the constructor is reused + # and I didn't want any future usages or extentions to accidentally fall through to Kappa's object via one of the + # 'aliases' it sets up + class ImprovedS3EventSource(kappa.event_source.base.EventSource): + def __init__(self, context, config): + super(ImprovedS3EventSource, self).__init__(context, config) + self._s3 = kappa.awsclient.create_client('s3', context.session) + + def _get_bucket_name(self): + return self.arn.split(':')[-1] + + # this method is overridden / implemented below but I wanted the placeholder here for clarity and to make IDEs + # less cranky + def _make_notification_id(self, function_name): + raise NotImplementedError + + def add(self, function): + event_config = { + 'Id': self._make_notification_id(function.name), + 'Events': [e for e in self._config['events']], + 'LambdaFunctionArn': '%s:%s' % (function.arn, function._context.environment), + } + + # Add S3 key filters + if 'key_filters' in self._config: + filters_spec = {'Key': {'FilterRules': []}} + for key_filter in self._config['key_filters']: + if 'type' in key_filter and 'value' in key_filter and key_filter['type'] in ('prefix', 'suffix'): + rule = {'Name': key_filter['type'], 'Value': key_filter['value']} + filters_spec['Key']['FilterRules'].append(rule) + event_config['Filter'] = filters_spec + + try: + bucket_config = self._s3.call('get_bucket_notification_configuration', Bucket=self._get_bucket_name()) + del bucket_config['ResponseMetadata'] + if 'LambdaFunctionConfigurations' in bucket_config: + bucket_config['LambdaFunctionConfigurations'].append(event_config) + else: + bucket_config['LambdaFunctionConfigurations'] = [event_config] + response = self._s3.call( + 'put_bucket_notification_configuration', + Bucket=self._get_bucket_name(), + NotificationConfiguration=bucket_config) + LOG.debug(response) + except Exception as exc: + LOG.debug(exc.response) + LOG.exception('Unable to add S3 event source') + + def remove(self, function): + LOG.debug('removing s3 notification') + bucket_config = self._s3.call('get_bucket_notification_configuration', Bucket=self._get_bucket_name()) + LOG.debug(bucket_config) + + new_config = [] + if 'LambdaFunctionConfigurations' in bucket_config: + for configuration in bucket_config['LambdaFunctionConfigurations']: + if configuration['Id'] != self._make_notification_id(function.name): + new_config.append(configuration) + response = self._s3.call('put_bucket_notification_configuration', Bucket=self._get_bucket_name(), + NotificationConfiguration={'LambdaFunctionConfigurations': new_config}) + LOG.debug(response) + + def status(self, function): + LOG.debug('status for s3 notification for %s', function.name) + bucket_config = self._s3.call('get_bucket_notification_configuration', Bucket=self._get_bucket_name()) + LOG.debug(bucket_config) + if 'LambdaFunctionConfigurations' in bucket_config: + for configuration in bucket_config['LambdaFunctionConfigurations']: + if configuration['Id'] == self._make_notification_id(function.name): + return { + 'LambdaFunctionConfiguration': configuration, + 'State': 'Enabled' + } + event_source_map = { 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, 'kinesis': kappa.event_source.kinesis.KinesisEventSource, - 's3': kappa.event_source.s3.S3EventSource, + 's3': ImprovedS3EventSource, 'sns': ExtendedSnsEventSource, 'sqs': SqsEventSource, 'events': kappa.event_source.cloudwatch.CloudWatchEventSource @@ -407,6 +484,8 @@ def add_event_source(event_source, lambda_arn, target_function, boto_session, dr Given an event_source dictionary, create the object and add the event source. """ + # TODO: Jerry - fix for S3 - need .status and .add + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: @@ -423,6 +502,8 @@ def remove_event_source(event_source, lambda_arn, target_function, boto_session, Given an event_source dictionary, create the object and remove the event source. """ + # TODO: Jerry - fix for S3 - need .status and .remove + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # This is slightly dirty, but necessary for using Kappa this way. @@ -438,6 +519,8 @@ def get_event_source_status(event_source, lambda_arn, target_function, boto_sess Given an event_source dictionary, create the object and get the event source status. """ + # TODO: Jerry - fix for S3 - need .status (actually, this method appears unused...) + event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) return event_source_obj.status(funk) From 680937fabb747f6d00a052361237c07210ec3b47 Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Mon, 22 Jun 2020 17:43:55 -0500 Subject: [PATCH 21/27] working for multiple stages --- zappa/utilities.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/zappa/utilities.py b/zappa/utilities.py index 5df3facd0..46c26df80 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -210,9 +210,6 @@ def get_event_source(event_source, lambda_arn, target_function, boto_session, dr to schedule this event, and return the event source. """ - - # TODO: Jerry - fix for S3 - import kappa.function import kappa.restapi import kappa.event_source.base @@ -484,8 +481,6 @@ def add_event_source(event_source, lambda_arn, target_function, boto_session, dr Given an event_source dictionary, create the object and add the event source. """ - # TODO: Jerry - fix for S3 - need .status and .add - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # TODO: Detect changes in config and refine exists algorithm if not dry: @@ -502,8 +497,6 @@ def remove_event_source(event_source, lambda_arn, target_function, boto_session, Given an event_source dictionary, create the object and remove the event source. """ - # TODO: Jerry - fix for S3 - need .status and .remove - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) # This is slightly dirty, but necessary for using Kappa this way. @@ -519,8 +512,6 @@ def get_event_source_status(event_source, lambda_arn, target_function, boto_sess Given an event_source dictionary, create the object and get the event source status. """ - # TODO: Jerry - fix for S3 - need .status (actually, this method appears unused...) - event_source_obj, ctx, funk = get_event_source(event_source, lambda_arn, target_function, boto_session, dry=False) return event_source_obj.status(funk) From 329e74f106c10db270c266f2ca08b2b04d6baf1f Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 08:51:01 -0500 Subject: [PATCH 22/27] cleanup --- zappa/utilities.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/zappa/utilities.py b/zappa/utilities.py index 46c26df80..541f842a9 100644 --- a/zappa/utilities.py +++ b/zappa/utilities.py @@ -351,9 +351,13 @@ def add(self, function): if self.filters: self.add_filters(function) - # reimplementing this entire class rather than extending the Kappa version because only the constructor is reused + # This class allows for correct addition, updates, and deletes of S3 trigger events when there are multiple events + # on a bucket that may be tied to different stages with different filters by limiting changes only to events tied + # to the targeted method and stage unlike the handler in Kappa. + # Related: https://github.com/Miserlou/Zappa/issues/2111 + # This entire class is reimplemented rather than extending the Kappa version because only the constructor is reused # and I didn't want any future usages or extentions to accidentally fall through to Kappa's object via one of the - # 'aliases' it sets up + # 'aliases' it sets up. class ImprovedS3EventSource(kappa.event_source.base.EventSource): def __init__(self, context, config): super(ImprovedS3EventSource, self).__init__(context, config) @@ -405,10 +409,9 @@ def remove(self, function): LOG.debug(bucket_config) new_config = [] - if 'LambdaFunctionConfigurations' in bucket_config: - for configuration in bucket_config['LambdaFunctionConfigurations']: - if configuration['Id'] != self._make_notification_id(function.name): - new_config.append(configuration) + for configuration in bucket_config.get('LambdaFunctionConfigurations', []): + if configuration['Id'] != self._make_notification_id(function.name): + new_config.append(configuration) response = self._s3.call('put_bucket_notification_configuration', Bucket=self._get_bucket_name(), NotificationConfiguration={'LambdaFunctionConfigurations': new_config}) LOG.debug(response) @@ -417,13 +420,9 @@ def status(self, function): LOG.debug('status for s3 notification for %s', function.name) bucket_config = self._s3.call('get_bucket_notification_configuration', Bucket=self._get_bucket_name()) LOG.debug(bucket_config) - if 'LambdaFunctionConfigurations' in bucket_config: - for configuration in bucket_config['LambdaFunctionConfigurations']: - if configuration['Id'] == self._make_notification_id(function.name): - return { - 'LambdaFunctionConfiguration': configuration, - 'State': 'Enabled' - } + for configuration in bucket_config.get('LambdaFunctionConfigurations', []): + if configuration['Id'] == self._make_notification_id(function.name): + return {'LambdaFunctionConfiguration': configuration, 'State': 'Enabled'} event_source_map = { 'dynamodb': kappa.event_source.dynamodb_stream.DynamoDBStreamEventSource, From 37b172b56426e58eec66c9e96d7eea07bb63c967 Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 09:44:35 -0500 Subject: [PATCH 23/27] new tests --- ....GetBucketNotificationConfiguration_1.json | 18 +++++++++++++ ....GetBucketNotificationConfiguration_2.json | 18 +++++++++++++ ....GetBucketNotificationConfiguration_3.json | 27 +++++++++++++++++++ ....GetBucketNotificationConfiguration_4.json | 27 +++++++++++++++++++ ....PutBucketNotificationConfiguration_1.json | 18 +++++++++++++ ....PutBucketNotificationConfiguration_2.json | 18 +++++++++++++ tests/tests_placebo.py | 4 +-- 7 files changed, 128 insertions(+), 2 deletions(-) create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_2.json create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_3.json create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_4.json create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_1.json create mode 100644 tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_2.json diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json new file mode 100644 index 000000000..f51bf8641 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "6GER0T0H4R7XAV8T", + "HostId": "6EEJhlP3r8RO7cOo21OEiNjvg998YnDsnctx/hZ3ZWYMuylW2+TPTaVEW00nzTECWfb91K9jA1c=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "6EEJhlP3r8RO7cOo21OEiNjvg998YnDsnctx/hZ3ZWYMuylW2+TPTaVEW00nzTECWfb91K9jA1c=", + "x-amz-request-id": "6GER0T0H4R7XAV8T", + "date": "Tue, 23 Jun 2020 14:33:30 GMT", + "transfer-encoding": "chunked", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_2.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_2.json new file mode 100644 index 000000000..a414d54f8 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_2.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "A1147AFA4BEF07BA", + "HostId": "Fxv8kpOjAeISFoOayckNGqtMKqYci9oe/Z2GZ3Sqoc+cGxRpmdpNhdPr8SZWmKH39HvNqDbX1qg=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "Fxv8kpOjAeISFoOayckNGqtMKqYci9oe/Z2GZ3Sqoc+cGxRpmdpNhdPr8SZWmKH39HvNqDbX1qg=", + "x-amz-request-id": "A1147AFA4BEF07BA", + "date": "Tue, 23 Jun 2020 14:33:30 GMT", + "transfer-encoding": "chunked", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_3.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_3.json new file mode 100644 index 000000000..cec02b4cd --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_3.json @@ -0,0 +1,27 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "68727B2ED4A5E5DB", + "HostId": "h2Tim4RpazYTXTo+NzdC8vH5gqlYtEGYHhHxjqTKRoUyFCgetU6gGc17mg/nNOF7lVxMJgBSB5s=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "h2Tim4RpazYTXTo+NzdC8vH5gqlYtEGYHhHxjqTKRoUyFCgetU6gGc17mg/nNOF7lVxMJgBSB5s=", + "x-amz-request-id": "68727B2ED4A5E5DB", + "date": "Tue, 23 Jun 2020 14:33:30 GMT", + "transfer-encoding": "chunked", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + }, + "LambdaFunctionConfigurations": [ + { + "Id": "lambda:test_settings.callback", + "LambdaFunctionArn": "arn:aws:lambda:lambda:lambda:lambda:lambda", + "Events": [ + "s3:ObjectCreated:*" + ] + } + ] + } +} \ No newline at end of file diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_4.json b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_4.json new file mode 100644 index 000000000..86075387d --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.GetBucketNotificationConfiguration_4.json @@ -0,0 +1,27 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "AB1396B752AF96FA", + "HostId": "paLR5Q18m5aW8gKTFSWY17Cw8oZWnuANlsCGFEnzTb1Vb/Ja/+vyK5GT1l+KOK0n8+ieM6MvuaM=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "paLR5Q18m5aW8gKTFSWY17Cw8oZWnuANlsCGFEnzTb1Vb/Ja/+vyK5GT1l+KOK0n8+ieM6MvuaM=", + "x-amz-request-id": "AB1396B752AF96FA", + "date": "Tue, 23 Jun 2020 14:33:58 GMT", + "transfer-encoding": "chunked", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + }, + "LambdaFunctionConfigurations": [ + { + "Id": "lambda:test_settings.callback", + "LambdaFunctionArn": "arn:aws:lambda:lambda:lambda:lambda:lambda", + "Events": [ + "s3:ObjectCreated:*" + ] + } + ] + } +} \ No newline at end of file diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_1.json b/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_1.json new file mode 100644 index 000000000..ce6fa9908 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_1.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "7R9WFV4Q3WFWFH2W", + "HostId": "yKZnYszkf/RBcrnCHwT566JbRjTYanGk/pmujo9dXLLIMdxofz+z1srIfyFxo2mMQzPzkuH8Ybg=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "yKZnYszkf/RBcrnCHwT566JbRjTYanGk/pmujo9dXLLIMdxofz+z1srIfyFxo2mMQzPzkuH8Ybg=", + "x-amz-request-id": "7R9WFV4Q3WFWFH2W", + "date": "Tue, 23 Jun 2020 14:33:30 GMT", + "content-length": "0", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_2.json b/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_2.json new file mode 100644 index 000000000..4447a4060 --- /dev/null +++ b/tests/placebo/TestZappa.test_add_event_source/s3.PutBucketNotificationConfiguration_2.json @@ -0,0 +1,18 @@ +{ + "status_code": 200, + "data": { + "ResponseMetadata": { + "RequestId": "61E24EFFB461E8E9", + "HostId": "sB3PWivILvgM5clr+VFuq0ymED90/wd76dk7DoRbC0u/7XsxlC9sWHT7Jvi8fF/bksgjEWiUT10=", + "HTTPStatusCode": 200, + "HTTPHeaders": { + "x-amz-id-2": "sB3PWivILvgM5clr+VFuq0ymED90/wd76dk7DoRbC0u/7XsxlC9sWHT7Jvi8fF/bksgjEWiUT10=", + "x-amz-request-id": "61E24EFFB461E8E9", + "date": "Tue, 23 Jun 2020 14:33:58 GMT", + "content-length": "0", + "server": "AmazonS3" + }, + "RetryAttempts": 0 + } + } +} \ No newline at end of file diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py index 47e477173..a863f198f 100644 --- a/tests/tests_placebo.py +++ b/tests/tests_placebo.py @@ -484,8 +484,8 @@ def test_add_event_source(self, session): event_source = {'arn': 's3:s3:s3:s3', 'events': [ "s3:ObjectCreated:*" ]} - add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) - remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) + add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) + remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) @placebo_session From cda35d71cca9fcd339ea375d4afeaf40cb104010 Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 10:21:59 -0500 Subject: [PATCH 24/27] better tests --- tests/tests_placebo.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py index a863f198f..28b78525d 100644 --- a/tests/tests_placebo.py +++ b/tests/tests_placebo.py @@ -481,9 +481,7 @@ def test_add_event_source(self, session): except ValueError: pass - event_source = {'arn': 's3:s3:s3:s3', 'events': [ - "s3:ObjectCreated:*" - ]} + event_source = {'arn': 's3:s3:s3:s3', 'events': ["s3:ObjectCreated:*"], 'key_filters': [{'type': 'prefix', 'value': 'value'}]} add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) From 5cc892fbdc997c6cd249a1f19a531077a9c0152b Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 11:29:04 -0500 Subject: [PATCH 25/27] explicit tests --- tests/tests_placebo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tests_placebo.py b/tests/tests_placebo.py index 28b78525d..2d769746d 100644 --- a/tests/tests_placebo.py +++ b/tests/tests_placebo.py @@ -482,8 +482,8 @@ def test_add_event_source(self, session): pass event_source = {'arn': 's3:s3:s3:s3', 'events': ["s3:ObjectCreated:*"], 'key_filters': [{'type': 'prefix', 'value': 'value'}]} - add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) - remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session) + add_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=False) + remove_event_source(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=False) # get_event_source_status(event_source, 'lambda:lambda:lambda:lambda', 'test_settings.callback', session, dry=True) @placebo_session From a29e385e9f8fe7cf6dd84af8420d63d003fe3a0b Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 11:46:36 -0500 Subject: [PATCH 26/27] Trigger travis From 8c91fd3d1306083f27d67507130c943133ec60cd Mon Sep 17 00:00:00 2001 From: Jerry Gregoire Date: Tue, 23 Jun 2020 11:58:26 -0500 Subject: [PATCH 27/27] Trigger travis