Source code for airflow.providers.amazon.aws.operators.batch
# Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License."""AWS Batch services... seealso:: - https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html - https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html - https://docs.aws.amazon.com/batch/latest/APIReference/Welcome.html"""from__future__importannotationsimportwarningsfromdatetimeimporttimedeltafromfunctoolsimportcached_propertyfromtypingimportTYPE_CHECKING,Any,Sequencefromairflow.exceptionsimportAirflowException,AirflowProviderDeprecationWarningfromairflow.modelsimportBaseOperatorfromairflow.providers.amazon.aws.hooks.batch_clientimportBatchClientHookfromairflow.providers.amazon.aws.links.batchimport(BatchJobDefinitionLink,BatchJobDetailsLink,BatchJobQueueLink,)fromairflow.providers.amazon.aws.links.logsimportCloudWatchEventsLinkfromairflow.providers.amazon.aws.triggers.batchimportBatchOperatorTriggerfromairflow.providers.amazon.aws.utilsimporttrim_none_valuesfromairflow.providers.amazon.aws.utils.task_log_fetcherimportAwsTaskLogFetcherifTYPE_CHECKING:fromairflow.utils.contextimportContext
[docs]classBatchOperator(BaseOperator):"""Execute a job on AWS Batch. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:BatchOperator` :param job_name: the name for the job that will run on AWS Batch (templated) :param job_definition: the job definition name on AWS Batch :param job_queue: the queue name on AWS Batch :param overrides: DEPRECATED, use container_overrides instead with the same value. :param container_overrides: the `containerOverrides` parameter for boto3 (templated) :param node_overrides: the `nodeOverrides` parameter for boto3 (templated) :param share_identifier: The share identifier for the job. Don't specify this parameter if the job queue doesn't have a scheduling policy. :param scheduling_priority_override: The scheduling priority for the job. Jobs with a higher scheduling priority are scheduled before jobs with a lower scheduling priority. This overrides any scheduling priority in the job definition :param array_properties: the `arrayProperties` parameter for boto3 :param parameters: the `parameters` for boto3 (templated) :param job_id: the job ID, usually unknown (None) until the submit_job operation gets the jobId defined by AWS Batch :param waiters: an :py:class:`.BatchWaiters` object (see note below); if None, polling is used with max_retries and status_retries. :param max_retries: exponential back-off retries, 4200 = 48 hours; polling is only used when waiters is None :param status_retries: number of HTTP retries to get job status, 10; polling is only used when waiters is None :param aws_conn_id: connection id of AWS credentials / region name. If None, credential boto3 strategy will be used. :param region_name: region name to use in AWS Hook. Override the region_name in connection (if provided) :param tags: collection of tags to apply to the AWS Batch job submission if None, no tags are submitted :param deferrable: Run operator in the deferrable mode. :param awslogs_enabled: Specifies whether logs from CloudWatch should be printed or not, False. If it is an array job, only the logs of the first task will be printed. :param awslogs_fetch_interval: The interval with which cloudwatch logs are to be fetched, 30 sec. :param poll_interval: (Deferrable mode only) Time in seconds to wait between polling. .. note:: Any custom waiters must return a waiter for these calls: .. code-block:: python waiter = waiters.get_waiter("JobExists") waiter = waiters.get_waiter("JobRunning") waiter = waiters.get_waiter("JobComplete") """
[docs]defoperator_extra_links(self):op_extra_links=[BatchJobDetailsLink()]ifself.wait_for_completion:op_extra_links.extend([BatchJobDefinitionLink(),BatchJobQueueLink()])ifnotself.array_properties:# There is no CloudWatch Link to the parent Batch Job available.op_extra_links.append(CloudWatchEventsLink())returntuple(op_extra_links)
def__init__(self,*,job_name:str,job_definition:str,job_queue:str,overrides:dict|None=None,# deprecatedcontainer_overrides:dict|None=None,array_properties:dict|None=None,node_overrides:dict|None=None,share_identifier:str|None=None,scheduling_priority_override:int|None=None,parameters:dict|None=None,job_id:str|None=None,waiters:Any|None=None,max_retries:int|None=None,status_retries:int|None=None,aws_conn_id:str|None=None,region_name:str|None=None,tags:dict|None=None,wait_for_completion:bool=True,deferrable:bool=False,poll_interval:int=30,awslogs_enabled:bool=False,awslogs_fetch_interval:timedelta=timedelta(seconds=30),**kwargs,)->None:BaseOperator.__init__(self,**kwargs)self.job_id=job_idself.job_name=job_nameself.job_definition=job_definitionself.job_queue=job_queueself.container_overrides=container_overrides# handle `overrides` deprecation in favor of `container_overrides`ifoverrides:ifcontainer_overrides:# disallow setting both old and new paramsraiseAirflowException("'container_overrides' replaces the 'overrides' parameter. ""You cannot specify both. Please remove assignation to the deprecated 'overrides'.")self.container_overrides=overrideswarnings.warn("Parameter `overrides` is deprecated, Please use `container_overrides` instead.",AirflowProviderDeprecationWarning,stacklevel=2,)self.node_overrides=node_overridesself.share_identifier=share_identifierself.scheduling_priority_override=scheduling_priority_overrideself.array_properties=array_propertiesself.parameters=parametersor{}self.waiters=waitersself.tags=tagsor{}self.wait_for_completion=wait_for_completionself.deferrable=deferrableself.poll_interval=poll_intervalself.awslogs_enabled=awslogs_enabledself.awslogs_fetch_interval=awslogs_fetch_interval# params for hookself.max_retries=max_retriesself.status_retries=status_retriesself.aws_conn_id=aws_conn_idself.region_name=region_name@cached_property
[docs]defexecute(self,context:Context):"""Submit and monitor an AWS Batch job. :raises: AirflowException """self.submit_job(context)ifself.deferrable:self.defer(timeout=self.execution_timeout,trigger=BatchOperatorTrigger(job_id=self.job_id,max_retries=self.max_retriesor10,aws_conn_id=self.aws_conn_id,region_name=self.region_name,poll_interval=self.poll_interval,),method_name="execute_complete",)ifself.wait_for_completion:self.monitor_job(context)returnself.job_id
[docs]defexecute_complete(self,context,event=None):ifevent["status"]!="success":raiseAirflowException(f"Error while running job: {event}")else:self.log.info("Job completed.")returnevent["job_id"]
[docs]defon_kill(self):response=self.hook.client.terminate_job(jobId=self.job_id,reason="Task killed by the user")self.log.info("AWS Batch job (%s) terminated: %s",self.job_id,response)
[docs]defmonitor_job(self,context:Context):"""Monitor an AWS Batch job. This can raise an exception or an AirflowTaskTimeout if the task was created with ``execution_timeout``. """ifnotself.job_id:raiseAirflowException("AWS Batch job - job_id was not found")try:job_desc=self.hook.get_job_description(self.job_id)job_definition_arn=job_desc["jobDefinition"]job_queue_arn=job_desc["jobQueue"]self.log.info("AWS Batch job (%s) Job Definition ARN: %r, Job Queue ARN: %r",self.job_id,job_definition_arn,job_queue_arn,)exceptKeyError:self.log.warning("AWS Batch job (%s) can't get Job Definition ARN and Job Queue ARN",self.job_id)else:BatchJobDefinitionLink.persist(context=context,operator=self,region_name=self.hook.conn_region_name,aws_partition=self.hook.conn_partition,job_definition_arn=job_definition_arn,)BatchJobQueueLink.persist(context=context,operator=self,region_name=self.hook.conn_region_name,aws_partition=self.hook.conn_partition,job_queue_arn=job_queue_arn,)ifself.awslogs_enabled:ifself.waiters:self.waiters.wait_for_job(self.job_id,get_batch_log_fetcher=self._get_batch_log_fetcher)else:self.hook.wait_for_job(self.job_id,get_batch_log_fetcher=self._get_batch_log_fetcher)else:ifself.waiters:self.waiters.wait_for_job(self.job_id)else:self.hook.wait_for_job(self.job_id)awslogs=self.hook.get_job_all_awslogs_info(self.job_id)ifawslogs:self.log.info("AWS Batch job (%s) CloudWatch Events details found. Links to logs:",self.job_id)link_builder=CloudWatchEventsLink()forloginawslogs:self.log.info(link_builder.format_link(**log))iflen(awslogs)>1:# there can be several log streams on multi-node jobsself.log.warning("out of all those logs, we can only link to one in the UI. Using the first one.")CloudWatchEventsLink.persist(context=context,operator=self,region_name=self.hook.conn_region_name,aws_partition=self.hook.conn_partition,**awslogs[0],)self.hook.check_job_success(self.job_id)self.log.info("AWS Batch job (%s) succeeded",self.job_id)
[docs]classBatchCreateComputeEnvironmentOperator(BaseOperator):"""Create an AWS Batch compute environment. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:BatchCreateComputeEnvironmentOperator` :param compute_environment_name: Name of the AWS batch compute environment (templated). :param environment_type: Type of the compute-environment. :param state: State of the compute-environment. :param compute_resources: Details about the resources managed by the compute-environment (templated). More details: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/batch.html#Batch.Client.create_compute_environment :param unmanaged_v_cpus: Maximum number of vCPU for an unmanaged compute environment. This parameter is only supported when the ``type`` parameter is set to ``UNMANAGED``. :param service_role: IAM role that allows Batch to make calls to other AWS services on your behalf (templated). :param tags: Tags that you apply to the compute-environment to help you categorize and organize your resources. :param max_retries: Exponential back-off retries, 4200 = 48 hours; polling is only used when waiters is None. :param status_retries: Number of HTTP retries to get job status, 10; polling is only used when waiters is None. :param aws_conn_id: Connection ID of AWS credentials / region name. If None, credential boto3 strategy will be used. :param region_name: Region name to use in AWS Hook. Overrides the ``region_name`` in connection if provided. """
[docs]defhook(self):"""Create and return a BatchClientHook."""returnBatchClientHook(max_retries=self.max_retries,status_retries=self.status_retries,aws_conn_id=self.aws_conn_id,region_name=self.region_name,)
[docs]defexecute(self,context:Context):"""Create an AWS batch compute environment."""kwargs:dict[str,Any]={"computeEnvironmentName":self.compute_environment_name,"type":self.environment_type,"state":self.state,"unmanagedvCpus":self.unmanaged_v_cpus,"computeResources":self.compute_resources,"serviceRole":self.service_role,"tags":self.tags,}self.hook.client.create_compute_environment(**trim_none_values(kwargs))self.log.info("AWS Batch compute environment created successfully")