Source code for airflow.providers.amazon.aws.operators.ecs
## Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License.importreimportsysimporttimefromcollectionsimportdequefromdatetimeimportdatetime,timedeltafromloggingimportLoggerfromthreadingimportEvent,ThreadfromtypingimportDict,Generator,Optional,Sequencefrombotocore.exceptionsimportClientErrorfrombotocore.waiterimportWaiterfromairflow.exceptionsimportAirflowExceptionfromairflow.modelsimportBaseOperator,XComfromairflow.providers.amazon.aws.exceptionsimportECSOperatorErrorfromairflow.providers.amazon.aws.hooks.base_awsimportAwsBaseHookfromairflow.providers.amazon.aws.hooks.logsimportAwsLogsHookfromairflow.typing_compatimportProtocol,runtime_checkablefromairflow.utils.sessionimportprovide_session
[docs]defshould_retry(exception:Exception):"""Check if exception is related to ECS resource quota (CPU, MEM)."""ifisinstance(exception,ECSOperatorError):returnany(quota_reasoninfailure['reason']forquota_reasonin['RESOURCE:MEMORY','RESOURCE:CPU']forfailureinexception.failures)returnFalse
[docs]@runtime_checkableclassECSProtocol(Protocol):""" A structured Protocol for ``boto3.client('ecs')``. This is used for type hints on :py:meth:`.ECSOperator.client`. .. seealso:: - https://mypy.readthedocs.io/en/latest/protocols.html - https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html """
[docs]classECSTaskLogFetcher(Thread):""" Fetches Cloudwatch log events with specific interval as a thread and sends the log events to the info channel of the provided logger. """def__init__(self,*,aws_conn_id:Optional[str]='aws_default',region_name:Optional[str]=None,log_group:str,log_stream_name:str,fetch_interval:timedelta,logger:Logger,):super().__init__()self._event=Event()self.fetch_interval=fetch_intervalself.logger=loggerself.log_group=log_groupself.log_stream_name=log_stream_nameself.hook=AwsLogsHook(aws_conn_id=aws_conn_id,region_name=region_name)
[docs]classECSOperator(BaseOperator):""" Execute a task on AWS ECS (Elastic Container Service) .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:ECSOperator` :param task_definition: the task definition name on Elastic Container Service :type task_definition: str :param cluster: the cluster name on Elastic Container Service :type cluster: str :param overrides: the same parameter that boto3 will receive (templated): https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task :type overrides: dict :param aws_conn_id: connection id of AWS credentials / region name. If None, credential boto3 strategy will be used (http://boto3.readthedocs.io/en/latest/guide/configuration.html). :type aws_conn_id: str :param region_name: region name to use in AWS Hook. Override the region_name in connection (if provided) :type region_name: str :param launch_type: the launch type on which to run your task ('EC2' or 'FARGATE') :type launch_type: str :param capacity_provider_strategy: the capacity provider strategy to use for the task. When capacity_provider_strategy is specified, the launch_type parameter is omitted. If no capacity_provider_strategy or launch_type is specified, the default capacity provider strategy for the cluster is used. :type capacity_provider_strategy: list :param group: the name of the task group associated with the task :type group: str :param placement_constraints: an array of placement constraint objects to use for the task :type placement_constraints: list :param placement_strategy: an array of placement strategy objects to use for the task :type placement_strategy: list :param platform_version: the platform version on which your task is running :type platform_version: str :param network_configuration: the network configuration for the task :type network_configuration: dict :param tags: a dictionary of tags in the form of {'tagKey': 'tagValue'}. :type tags: dict :param awslogs_group: the CloudWatch group where your ECS container logs are stored. Only required if you want logs to be shown in the Airflow UI after your job has finished. :type awslogs_group: str :param awslogs_region: the region in which your CloudWatch logs are stored. If None, this is the same as the `region_name` parameter. If that is also None, this is the default AWS region based on your connection settings. :type awslogs_region: str :param awslogs_stream_prefix: the stream prefix that is used for the CloudWatch logs. This is usually based on some custom name combined with the name of the container. Only required if you want logs to be shown in the Airflow UI after your job has finished. :type awslogs_stream_prefix: str :param awslogs_fetch_interval: the interval that the ECS task log fetcher should wait in between each Cloudwatch logs fetches. :type awslogs_fetch_interval: timedelta :param quota_retry: Config if and how to retry the launch of a new ECS task, to handle transient errors. :type quota_retry: dict :param reattach: If set to True, will check if the task previously launched by the task_instance is already running. If so, the operator will attach to it instead of starting a new task. This is to avoid relaunching a new task when the connection drops between Airflow and ECS while the task is running (when the Airflow worker is restarted for example). :type reattach: bool :param number_logs_exception: Number of lines from the last Cloudwatch logs to return in the AirflowException if an ECS task is stopped (to receive Airflow alerts with the logs of what failed in the code running in ECS). :type number_logs_exception: int """
[docs]defexecute(self,context,session=None):self.log.info('Running ECS Task - Task definition: %s - on cluster %s',self.task_definition,self.cluster)self.log.info('ECSOperator overrides: %s',self.overrides)self.client=self.get_hook().get_conn()ifself.reattach:self._try_reattach_task(context)ifnotself.arn:self._start_task(context)ifself._aws_logs_enabled():self.log.info('Starting ECS Task Log Fetcher')self.task_log_fetcher=self._get_task_log_fetcher()self.task_log_fetcher.start()try:self._wait_for_task_ended()finally:self.task_log_fetcher.stop()self.task_log_fetcher.join()else:self._wait_for_task_ended()self._check_success_task()self.log.info('ECS Task has been successfully executed')ifself.reattach:# Clear the XCom value storing the ECS task ARN if the task has completed# as we can't reattach it anymoreself._xcom_del(session,self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id))ifself.do_xcom_pushandself.task_log_fetcher:returnself.task_log_fetcher.get_last_log_message()returnNone
def_xcom_del(self,session,task_id):session.query(XCom).filter(XCom.dag_id==self.dag_id,XCom.task_id==task_id).delete()def_start_task(self,context):run_opts={'cluster':self.cluster,'taskDefinition':self.task_definition,'overrides':self.overrides,'startedBy':self.owner,}ifself.capacity_provider_strategy:run_opts['capacityProviderStrategy']=self.capacity_provider_strategyelifself.launch_type:run_opts['launchType']=self.launch_typeifself.platform_versionisnotNone:run_opts['platformVersion']=self.platform_versionifself.groupisnotNone:run_opts['group']=self.groupifself.placement_constraintsisnotNone:run_opts['placementConstraints']=self.placement_constraintsifself.placement_strategyisnotNone:run_opts['placementStrategy']=self.placement_strategyifself.network_configurationisnotNone:run_opts['networkConfiguration']=self.network_configurationifself.tagsisnotNone:run_opts['tags']=[{'key':k,'value':v}for(k,v)inself.tags.items()]ifself.propagate_tagsisnotNone:run_opts['propagateTags']=self.propagate_tagsresponse=self.client.run_task(**run_opts)failures=response['failures']iflen(failures)>0:raiseECSOperatorError(failures,response)self.log.info('ECS Task started: %s',response)self.arn=response['tasks'][0]['taskArn']self.ecs_task_id=self.arn.split("/")[-1]self.log.info(f"ECS task ID is: {self.ecs_task_id}")ifself.reattach:# Save the task ARN in XCom to be able to reattach it if neededself._xcom_set(context,key=self.REATTACH_XCOM_KEY,value=self.arn,task_id=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),)def_xcom_set(self,context,key,value,task_id):XCom.set(key=key,value=value,task_id=task_id,dag_id=self.dag_id,run_id=context["run_id"],)def_try_reattach_task(self,context):task_def_resp=self.client.describe_task_definition(taskDefinition=self.task_definition)ecs_task_family=task_def_resp['taskDefinition']['family']list_tasks_resp=self.client.list_tasks(cluster=self.cluster,desiredStatus='RUNNING',family=ecs_task_family)running_tasks=list_tasks_resp['taskArns']# Check if the ECS task previously launched is already runningprevious_task_arn=self.xcom_pull(context,task_ids=self.REATTACH_XCOM_TASK_ID_TEMPLATE.format(task_id=self.task_id),key=self.REATTACH_XCOM_KEY,)ifprevious_task_arninrunning_tasks:self.arn=previous_task_arnself.log.info("Reattaching previously launched task: %s",self.arn)else:self.log.info("No active previously launched task found to reattach")def_wait_for_task_ended(self)->None:ifnotself.clientornotself.arn:returnwaiter=self.client.get_waiter('tasks_stopped')waiter.config.max_attempts=sys.maxsize# timeout is managed by airflowwaiter.wait(cluster=self.cluster,tasks=[self.arn])returndef_aws_logs_enabled(self):returnself.awslogs_groupandself.awslogs_stream_prefixdef_get_task_log_fetcher(self)->ECSTaskLogFetcher:ifnotself.awslogs_group:raiseValueError("must specify awslogs_group to fetch task logs")log_stream_name=f"{self.awslogs_stream_prefix}/{self.ecs_task_id}"returnECSTaskLogFetcher(aws_conn_id=self.aws_conn_id,region_name=self.awslogs_region,log_group=self.awslogs_group,log_stream_name=log_stream_name,fetch_interval=self.awslogs_fetch_interval,logger=self.log,)def_check_success_task(self)->None:ifnotself.clientornotself.arn:returnresponse=self.client.describe_tasks(cluster=self.cluster,tasks=[self.arn])self.log.info('ECS Task stopped, check status: %s',response)iflen(response.get('failures',[]))>0:raiseAirflowException(response)fortaskinresponse['tasks']:iftask.get('stopCode','')=='TaskFailedToStart':raiseAirflowException(f"The task failed to start due to: {task.get('stoppedReason','')}")# This is a `stoppedReason` that indicates a task has not# successfully finished, but there is no other indication of failure# in the response.# https://docs.aws.amazon.com/AmazonECS/latest/developerguide/stopped-task-errors.htmlifre.match(r'Host EC2 \(instance .+?\) (stopped|terminated)\.',task.get('stoppedReason','')):raiseAirflowException('The task was stopped because the host instance terminated: {}'.format(task.get('stoppedReason','')))containers=task['containers']forcontainerincontainers:ifcontainer.get('lastStatus')=='STOPPED'andcontainer.get('exitCode',1)!=0:ifself.task_log_fetcher:last_logs="\n".join(self.task_log_fetcher.get_last_log_messages(self.number_logs_exception))raiseAirflowException(f"This task is not in success state - last {self.number_logs_exception} "f"logs from Cloudwatch:\n{last_logs}")else:raiseAirflowException(f'This task is not in success state {task}')elifcontainer.get('lastStatus')=='PENDING':raiseAirflowException(f'This task is still pending {task}')elif'error'incontainer.get('reason','').lower():raiseAirflowException('This containers encounter an error during launching : {}'.format(container.get('reason','').lower()))
[docs]defget_hook(self)->AwsBaseHook:"""Create and return an AwsHook."""ifself.hook:returnself.hookself.hook=AwsBaseHook(aws_conn_id=self.aws_conn_id,client_type='ecs',region_name=self.region_name)returnself.hook
[docs]defon_kill(self)->None:ifnotself.clientornotself.arn:returnifself.task_log_fetcher:self.task_log_fetcher.stop()response=self.client.stop_task(cluster=self.cluster,task=self.arn,reason='Task killed by the user')self.log.info(response)