## Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License.importcopyimportfunctoolsimportloggingimportosimportpathlibimportpickleimportreimportsysimporttracebackimportwarningsfromcollectionsimportOrderedDictfromdatetimeimportdatetime,timedelta,tzinfofrominspectimportsignaturefromtypingimport(TYPE_CHECKING,Any,Callable,Collection,Dict,FrozenSet,Iterable,List,Optional,Set,Tuple,Type,Union,cast,overload,)importjinja2importpendulumfromdateutil.relativedeltaimportrelativedeltafromjinja2.nativetypesimportNativeEnvironmentfromsqlalchemyimportBoolean,Column,ForeignKey,Index,Integer,String,Text,func,or_fromsqlalchemy.ormimportbackref,joinedload,relationshipfromsqlalchemy.orm.sessionimportSessionfromsqlalchemy.sqlimportexpressionimportairflow.templatesfromairflowimportsettings,utilsfromairflow.compat.functoolsimportcached_propertyfromairflow.configurationimportconffromairflow.exceptionsimportAirflowException,DuplicateTaskIdFound,TaskNotFoundfromairflow.models.baseimportID_LEN,Basefromairflow.models.baseoperatorimportBaseOperatorfromairflow.models.dagbagimportDagBagfromairflow.models.dagcodeimportDagCodefromairflow.models.dagpickleimportDagPicklefromairflow.models.dagrunimportDagRunfromairflow.models.paramimportDagParam,ParamsDictfromairflow.models.taskinstanceimportContext,TaskInstance,TaskInstanceKey,clear_task_instancesfromairflow.securityimportpermissionsfromairflow.statsimportStatsfromairflow.timetables.baseimportDagRunInfo,DataInterval,TimeRestriction,Timetablefromairflow.timetables.intervalimportCronDataIntervalTimetable,DeltaDataIntervalTimetablefromairflow.timetables.simpleimportNullTimetable,OnceTimetablefromairflow.typing_compatimportLiteral,RePatternTypefromairflow.utilsimporttimezonefromairflow.utils.dag_cycle_testerimportcheck_cyclefromairflow.utils.datesimportcron_presets,date_rangeasutils_date_rangefromairflow.utils.fileimportcorrect_maybe_zippedfromairflow.utils.helpersimportvalidate_keyfromairflow.utils.log.logging_mixinimportLoggingMixinfromairflow.utils.sessionimportprovide_sessionfromairflow.utils.sqlalchemyimportInterval,UtcDateTime,skip_locked,with_row_locksfromairflow.utils.stateimportDagRunState,Statefromairflow.utils.typesimportDagRunType,EdgeInfoTypeifTYPE_CHECKING:fromairflow.utils.task_groupimportTaskGroup
[docs]classInconsistentDataInterval(AirflowException):"""Exception raised when a model populates data interval fields incorrectly. The data interval fields should either both be None (for runs scheduled prior to AIP-39), or both be datetime (for runs scheduled after AIP-39 is implemented). This is raised if exactly one of the fields is None. """_template=("Inconsistent {cls}: {start[0]}={start[1]!r}, {end[0]}={end[1]!r}, ""they must be either both None or both datetime")def__init__(self,instance:Any,start_field_name:str,end_field_name:str)->None:self._class_name=type(instance).__name__self._start_field=(start_field_name,getattr(instance,start_field_name))self._end_field=(end_field_name,getattr(instance,end_field_name))
[docs]defcreate_timetable(interval:ScheduleIntervalArg,timezone:tzinfo)->Timetable:"""Create a Timetable instance from a ``schedule_interval`` argument."""ifintervalisScheduleIntervalArgNotSet:returnDeltaDataIntervalTimetable(DEFAULT_SCHEDULE_INTERVAL)ifintervalisNone:returnNullTimetable()ifinterval=="@once":returnOnceTimetable()ifisinstance(interval,(timedelta,relativedelta)):returnDeltaDataIntervalTimetable(interval)ifisinstance(interval,str):returnCronDataIntervalTimetable(interval,timezone)raiseValueError(f"{interval!r} is not a valid schedule_interval.")
[docs]defget_last_dagrun(dag_id,session,include_externally_triggered=False):""" Returns the last dag run for a dag, None if there was none. Last dag run can be any type of run eg. scheduled or backfilled. Overridden DagRuns are ignored. """DR=DagRunquery=session.query(DR).filter(DR.dag_id==dag_id)ifnotinclude_externally_triggered:query=query.filter(DR.external_trigger==expression.false())query=query.order_by(DR.execution_date.desc())returnquery.first()
[docs]@functools.total_orderingclassDAG(LoggingMixin):""" A dag (directed acyclic graph) is a collection of tasks with directional dependencies. A dag also has a schedule, a start date and an end date (optional). For each schedule, (say daily or hourly), the DAG needs to run each individual tasks as their dependencies are met. Certain tasks have the property of depending on their own past, meaning that they can't run until their previous schedule (and upstream tasks) are completed. DAGs essentially act as namespaces for tasks. A task_id can only be added once to a DAG. :param dag_id: The id of the DAG; must consist exclusively of alphanumeric characters, dashes, dots and underscores (all ASCII) :type dag_id: str :param description: The description for the DAG to e.g. be shown on the webserver :type description: str :param schedule_interval: Defines how often that DAG runs, this timedelta object gets added to your latest task instance's execution_date to figure out the next schedule :type schedule_interval: datetime.timedelta or dateutil.relativedelta.relativedelta or str that acts as a cron expression :param timetable: Specify which timetable to use (in which case schedule_interval must not be set). See :doc:`/howto/timetable` for more information :type timetable: airflow.timetables.base.Timetable :param start_date: The timestamp from which the scheduler will attempt to backfill :type start_date: datetime.datetime :param end_date: A date beyond which your DAG won't run, leave to None for open ended scheduling :type end_date: datetime.datetime :param template_searchpath: This list of folders (non relative) defines where jinja will look for your templates. Order matters. Note that jinja/airflow includes the path of your DAG file by default :type template_searchpath: str or list[str] :param template_undefined: Template undefined type. :type template_undefined: jinja2.StrictUndefined :param user_defined_macros: a dictionary of macros that will be exposed in your jinja templates. For example, passing ``dict(foo='bar')`` to this argument allows you to ``{{ foo }}`` in all jinja templates related to this DAG. Note that you can pass any type of object here. :type user_defined_macros: dict :param user_defined_filters: a dictionary of filters that will be exposed in your jinja templates. For example, passing ``dict(hello=lambda name: 'Hello %s' % name)`` to this argument allows you to ``{{ 'world' | hello }}`` in all jinja templates related to this DAG. :type user_defined_filters: dict :param default_args: A dictionary of default parameters to be used as constructor keyword parameters when initialising operators. Note that operators have the same hook, and precede those defined here, meaning that if your dict contains `'depends_on_past': True` here and `'depends_on_past': False` in the operator's call `default_args`, the actual value will be `False`. :type default_args: dict :param params: a dictionary of DAG level parameters that are made accessible in templates, namespaced under `params`. These params can be overridden at the task level. :type params: dict :param max_active_tasks: the number of task instances allowed to run concurrently :type max_active_tasks: int :param max_active_runs: maximum number of active DAG runs, beyond this number of DAG runs in a running state, the scheduler won't create new active DAG runs :type max_active_runs: int :param dagrun_timeout: specify how long a DagRun should be up before timing out / failing, so that new DagRuns can be created. The timeout is only enforced for scheduled DagRuns. :type dagrun_timeout: datetime.timedelta :param sla_miss_callback: specify a function to call when reporting SLA timeouts. See :ref:`sla_miss_callback<concepts:sla_miss_callback>` for more information about the function signature and parameters that are passed to the callback. :type sla_miss_callback: callable :param default_view: Specify DAG default view (tree, graph, duration, gantt, landing_times), default tree :type default_view: str :param orientation: Specify DAG orientation in graph view (LR, TB, RL, BT), default LR :type orientation: str :param catchup: Perform scheduler catchup (or only run latest)? Defaults to True :type catchup: bool :param on_failure_callback: A function to be called when a DagRun of this dag fails. A context dictionary is passed as a single parameter to this function. :type on_failure_callback: callable :param on_success_callback: Much like the ``on_failure_callback`` except that it is executed when the dag succeeds. :type on_success_callback: callable :param access_control: Specify optional DAG-level actions, e.g., "{'role1': {'can_read'}, 'role2': {'can_read', 'can_edit'}}" :type access_control: dict :param is_paused_upon_creation: Specifies if the dag is paused when created for the first time. If the dag exists already, this flag will be ignored. If this optional parameter is not specified, the global config setting will be used. :type is_paused_upon_creation: bool or None :param jinja_environment_kwargs: additional configuration options to be passed to Jinja ``Environment`` for template rendering **Example**: to avoid Jinja from removing a trailing newline from template strings :: DAG(dag_id='my-dag', jinja_environment_kwargs={ 'keep_trailing_newline': True, # some other jinja2 Environment options here } ) **See**: `Jinja Environment documentation <https://jinja.palletsprojects.com/en/2.11.x/api/#jinja2.Environment>`_ :type jinja_environment_kwargs: dict :param render_template_as_native_obj: If True, uses a Jinja ``NativeEnvironment`` to render templates as native Python types. If False, a Jinja ``Environment`` is used to render templates as string values. :type render_template_as_native_obj: bool :param tags: List of tags to help filtering DAGs in the UI. :type tags: List[str] """_comps={'dag_id','task_ids','parent_dag','start_date','schedule_interval','fileloc','template_searchpath','last_loaded',}__serialized_fields:Optional[FrozenSet[str]]=None
""" File path that needs to be imported to load this DAG or subdag. This may not be an actual file on disk in the case when this DAG is loaded from a ZIP file or other DAG distribution format. """def__init__(self,dag_id:str,description:Optional[str]=None,schedule_interval:ScheduleIntervalArg=ScheduleIntervalArgNotSet,timetable:Optional[Timetable]=None,start_date:Optional[datetime]=None,end_date:Optional[datetime]=None,full_filepath:Optional[str]=None,template_searchpath:Optional[Union[str,Iterable[str]]]=None,template_undefined:Type[jinja2.StrictUndefined]=jinja2.StrictUndefined,user_defined_macros:Optional[Dict]=None,user_defined_filters:Optional[Dict]=None,default_args:Optional[Dict]=None,concurrency:Optional[int]=None,max_active_tasks:int=conf.getint('core','max_active_tasks_per_dag'),max_active_runs:int=conf.getint('core','max_active_runs_per_dag'),dagrun_timeout:Optional[timedelta]=None,sla_miss_callback:Optional[Callable[["DAG",str,str,List[str],List[TaskInstance]],None]]=None,default_view:str=conf.get('webserver','dag_default_view').lower(),orientation:str=conf.get('webserver','dag_orientation'),catchup:bool=conf.getboolean('scheduler','catchup_by_default'),on_success_callback:Optional[DagStateChangeCallback]=None,on_failure_callback:Optional[DagStateChangeCallback]=None,doc_md:Optional[str]=None,params:Optional[Dict]=None,access_control:Optional[Dict]=None,is_paused_upon_creation:Optional[bool]=None,jinja_environment_kwargs:Optional[Dict]=None,render_template_as_native_obj:bool=False,tags:Optional[List[str]]=None,):fromairflow.utils.task_groupimportTaskGroupself.user_defined_macros=user_defined_macrosself.user_defined_filters=user_defined_filtersself.default_args=copy.deepcopy(default_argsor{})self.params=paramsor{}# merging potentially conflicting default_args['params'] into paramsif'params'inself.default_args:self.params.update(self.default_args['params'])delself.default_args['params']# check self.params and convert them into ParamsDictself.params=ParamsDict(self.params)iffull_filepath:warnings.warn("Passing full_filepath to DAG() is deprecated and has no effect",DeprecationWarning,stacklevel=2,)validate_key(dag_id)self._dag_id=dag_idifconcurrency:# TODO: Remove in Airflow 3.0warnings.warn("The 'concurrency' parameter is deprecated. Please use 'max_active_tasks'.",DeprecationWarning,stacklevel=2,)max_active_tasks=concurrencyself._max_active_tasks=max_active_tasksself._pickle_id:Optional[int]=Noneself._description=description# set file location to caller source pathback=sys._getframe().f_backself.fileloc=back.f_code.co_filenameifbackelse""self.task_dict:Dict[str,BaseOperator]={}# set timezone from start_dateifstart_dateandstart_date.tzinfo:self.timezone=start_date.tzinfoelif'start_date'inself.default_argsandself.default_args['start_date']:ifisinstance(self.default_args['start_date'],str):self.default_args['start_date']=timezone.parse(self.default_args['start_date'])self.timezone=self.default_args['start_date'].tzinfoifnothasattr(self,'timezone')ornotself.timezone:self.timezone=settings.TIMEZONE# Apply the timezone we settled on to end_date if it wasn't suppliedif'end_date'inself.default_argsandself.default_args['end_date']:ifisinstance(self.default_args['end_date'],str):self.default_args['end_date']=timezone.parse(self.default_args['end_date'],timezone=self.timezone)self.start_date=timezone.convert_to_utc(start_date)self.end_date=timezone.convert_to_utc(end_date)# also convert tasksif'start_date'inself.default_args:self.default_args['start_date']=timezone.convert_to_utc(self.default_args['start_date'])if'end_date'inself.default_args:self.default_args['end_date']=timezone.convert_to_utc(self.default_args['end_date'])# Calculate the DAG's timetable.iftimetableisNone:self.timetable=create_timetable(schedule_interval,self.timezone)ifschedule_intervalisScheduleIntervalArgNotSet:schedule_interval=DEFAULT_SCHEDULE_INTERVALself.schedule_interval:ScheduleInterval=schedule_intervalelifschedule_intervalisScheduleIntervalArgNotSet:self.timetable=timetableself.schedule_interval=self.timetable.summaryelse:raiseTypeError("cannot specify both 'schedule_interval' and 'timetable'")ifisinstance(template_searchpath,str):template_searchpath=[template_searchpath]self.template_searchpath=template_searchpathself.template_undefined=template_undefinedself.parent_dag:Optional[DAG]=None# Gets set when DAGs are loadedself.last_loaded=timezone.utcnow()self.safe_dag_id=dag_id.replace('.','__dot__')self.max_active_runs=max_active_runsself.dagrun_timeout=dagrun_timeoutself.sla_miss_callback=sla_miss_callbackifdefault_viewinDEFAULT_VIEW_PRESETS:self._default_view:str=default_viewelse:raiseAirflowException(f'Invalid values of dag.default_view: only support 'f'{DEFAULT_VIEW_PRESETS}, but get {default_view}')iforientationinORIENTATION_PRESETS:self.orientation=orientationelse:raiseAirflowException(f'Invalid values of dag.orientation: only support 'f'{ORIENTATION_PRESETS}, but get {orientation}')self.catchup=catchupself.is_subdag=False# DagBag.bag_dag() will set this to True if appropriateself.partial=Falseself.on_success_callback=on_success_callbackself.on_failure_callback=on_failure_callback# Keeps track of any extra edge metadata (sparse; will not contain all# edges, so do not iterate over it for that). Outer key is upstream# task ID, inner key is downstream task ID.self.edge_info:Dict[str,Dict[str,EdgeInfoType]]={}# To keep it in parity with Serialized DAGs# and identify if DAG has on_*_callback without actually storing them in Serialized JSONself.has_on_success_callback=self.on_success_callbackisnotNoneself.has_on_failure_callback=self.on_failure_callbackisnotNoneself.doc_md=doc_mdself._access_control=DAG._upgrade_outdated_dag_access_control(access_control)self.is_paused_upon_creation=is_paused_upon_creationself.jinja_environment_kwargs=jinja_environment_kwargsself.render_template_as_native_obj=render_template_as_native_objself.tags=tagsself._task_group=TaskGroup.create_root(self)self.validate_schedule_and_params()
[docs]def__eq__(self,other):iftype(self)==type(other):# Use getattr() instead of __dict__ as __dict__ doesn't return# correct values for properties.returnall(getattr(self,c,None)==getattr(other,c,None)forcinself._comps)returnFalse
[docs]def__hash__(self):hash_components=[type(self)]forcinself._comps:# task_ids returns a list and lists can't be hashedifc=='task_ids':val=tuple(self.task_dict.keys())else:val=getattr(self,c,None)try:hash(val)hash_components.append(val)exceptTypeError:hash_components.append(repr(val))returnhash(tuple(hash_components))
# /Context Manager ----------------------------------------------@staticmethoddef_upgrade_outdated_dag_access_control(access_control=None):""" Looks for outdated dag level actions (can_dag_read and can_dag_edit) in DAG access_controls (for example, {'role1': {'can_dag_read'}, 'role2': {'can_dag_read', 'can_dag_edit'}}) and replaces them with updated actions (can_read and can_edit). """ifnotaccess_control:returnNonenew_perm_mapping={permissions.DEPRECATED_ACTION_CAN_DAG_READ:permissions.ACTION_CAN_READ,permissions.DEPRECATED_ACTION_CAN_DAG_EDIT:permissions.ACTION_CAN_EDIT,}updated_access_control={}forrole,permsinaccess_control.items():updated_access_control[role]={new_perm_mapping.get(perm,perm)forperminperms}ifaccess_control!=updated_access_control:warnings.warn("The 'can_dag_read' and 'can_dag_edit' permissions are deprecated. ""Please use 'can_read' and 'can_edit', respectively.",DeprecationWarning,stacklevel=3,)returnupdated_access_control
[docs]defdate_range(self,start_date:datetime,num:Optional[int]=None,end_date:Optional[datetime]=timezone.utcnow(),)->List[datetime]:message="`DAG.date_range()` is deprecated."ifnumisnotNone:result=utils_date_range(start_date=start_date,num=num)else:message+=" Please use `DAG.iter_dagrun_infos_between(..., align=False)` instead."result=[info.logical_dateforinfoinself.iter_dagrun_infos_between(start_date,end_date,align=False)]warnings.warn(message,category=DeprecationWarning,stacklevel=2)returnresult
[docs]defis_fixed_time_schedule(self):warnings.warn("`DAG.is_fixed_time_schedule()` is deprecated.",category=DeprecationWarning,stacklevel=2,)try:returnnotself.timetable._should_fix_dstexceptAttributeError:returnTrue
[docs]deffollowing_schedule(self,dttm):""" Calculates the following schedule for this dag in UTC. :param dttm: utc datetime :return: utc datetime """warnings.warn("`DAG.following_schedule()` is deprecated. Use `DAG.next_dagrun_info(restricted=False)` instead.",category=DeprecationWarning,stacklevel=2,)data_interval=self.infer_automated_data_interval(timezone.coerce_datetime(dttm))next_info=self.next_dagrun_info(data_interval,restricted=False)ifnext_infoisNone:returnNonereturnnext_info.data_interval.start
[docs]defprevious_schedule(self,dttm):fromairflow.timetables.intervalimport_DataIntervalTimetablewarnings.warn("`DAG.previous_schedule()` is deprecated.",category=DeprecationWarning,stacklevel=2,)ifnotisinstance(self.timetable,_DataIntervalTimetable):returnNonereturnself.timetable._get_prev(timezone.coerce_datetime(dttm))
[docs]defget_next_data_interval(self,dag_model:"DagModel")->Optional[DataInterval]:"""Get the data interval of the next scheduled run. For compatibility, this method infers the data interval from the DAG's schedule if the run does not have an explicit one set, which is possible for runs created prior to AIP-39. This function is private to Airflow core and should not be depended as a part of the Python API. :meta private: """ifself.dag_id!=dag_model.dag_id:raiseValueError(f"Arguments refer to different DAGs: {self.dag_id} != {dag_model.dag_id}")ifdag_model.next_dagrunisNone:# Next run not scheduled.returnNonedata_interval=dag_model.next_dagrun_data_intervalifdata_intervalisnotNone:returndata_interval# Compatibility: A run was scheduled without an explicit data interval.# This means the run was scheduled before AIP-39 implementation. Try to# infer from the logical date.returnself.infer_automated_data_interval(dag_model.next_dagrun)
[docs]defget_run_data_interval(self,run:DagRun)->DataInterval:"""Get the data interval of this run. For compatibility, this method infers the data interval from the DAG's schedule if the run does not have an explicit one set, which is possible for runs created prior to AIP-39. This function is private to Airflow core and should not be depended as a part of the Python API. :meta private: """ifrun.dag_idisnotNoneandrun.dag_id!=self.dag_id:raiseValueError(f"Arguments refer to different DAGs: {self.dag_id} != {run.dag_id}")data_interval=_get_model_data_interval(run,"data_interval_start","data_interval_end")ifdata_intervalisnotNone:returndata_interval# Compatibility: runs created before AIP-39 implementation don't have an# explicit data interval. Try to infer from the logical date.returnself.infer_automated_data_interval(run.execution_date)
[docs]definfer_automated_data_interval(self,logical_date:datetime)->DataInterval:"""Infer a data interval for a run against this DAG. This method is used to bridge runs created prior to AIP-39 implementation, which do not have an explicit data interval. Therefore, this method only considers ``schedule_interval`` values valid prior to Airflow 2.2. DO NOT use this method is there is a known data interval. """timetable_type=type(self.timetable)ifissubclass(timetable_type,(NullTimetable,OnceTimetable)):returnDataInterval.exact(timezone.coerce_datetime(logical_date))start=timezone.coerce_datetime(logical_date)ifissubclass(timetable_type,CronDataIntervalTimetable):end=cast(CronDataIntervalTimetable,self.timetable)._get_next(start)elifissubclass(timetable_type,DeltaDataIntervalTimetable):end=cast(DeltaDataIntervalTimetable,self.timetable)._get_next(start)else:raiseValueError(f"Not a valid timetable: {self.timetable!r}")returnDataInterval(start,end)
[docs]defnext_dagrun_info(self,last_automated_dagrun:Union[None,datetime,DataInterval],*,restricted:bool=True,)->Optional[DagRunInfo]:"""Get information about the next DagRun of this dag after ``date_last_automated_dagrun``. This calculates what time interval the next DagRun should operate on (its execution date), and when it can be scheduled, , according to the dag's timetable, start_date, end_date, etc. This doesn't check max active run or any other "max_active_tasks" type limits, but only performs calculations based on the various date and interval fields of this dag and its tasks. :param date_last_automated_dagrun: The ``max(execution_date)`` of existing "automated" DagRuns for this dag (scheduled or backfill, but not manual). :param restricted: If set to *False* (default is *True*), ignore ``start_date``, ``end_date``, and ``catchup`` specified on the DAG or tasks. :return: DagRunInfo of the next dagrun, or None if a dagrun is not going to be scheduled. """# Never schedule a subdag. It will be scheduled by its parent dag.ifself.is_subdag:returnNoneifisinstance(last_automated_dagrun,datetime):warnings.warn("Passing a datetime to DAG.next_dagrun_info is deprecated. Use a DataInterval instead.",DeprecationWarning,stacklevel=2,)data_interval=self.infer_automated_data_interval(timezone.coerce_datetime(last_automated_dagrun))else:data_interval=last_automated_dagrunifrestricted:restriction=self._time_restrictionelse:restriction=TimeRestriction(earliest=None,latest=None,catchup=True)try:info=self.timetable.next_dagrun_info(last_automated_data_interval=data_interval,restriction=restriction,)exceptException:self.log.exception("Failed to fetch run info after data interval %s for DAG %r",data_interval,self.dag_id,)info=Nonereturninfo
[docs]defnext_dagrun_after_date(self,date_last_automated_dagrun:Optional[pendulum.DateTime]):warnings.warn("`DAG.next_dagrun_after_date()` is deprecated. Please use `DAG.next_dagrun_info()` instead.",category=DeprecationWarning,stacklevel=2,)ifdate_last_automated_dagrunisNone:data_interval=Noneelse:data_interval=self.infer_automated_data_interval(date_last_automated_dagrun)info=self.next_dagrun_info(data_interval)ifinfoisNone:returnNonereturninfo.run_after
[docs]defiter_dagrun_infos_between(self,earliest:Optional[pendulum.DateTime],latest:pendulum.DateTime,*,align:bool=True,)->Iterable[DagRunInfo]:"""Yield DagRunInfo using this DAG's timetable between given interval. DagRunInfo instances yielded if their ``logical_date`` is not earlier than ``earliest``, nor later than ``latest``. The instances are ordered by their ``logical_date`` from earliest to latest. If ``align`` is ``False``, the first run will happen immediately on ``earliest``, even if it does not fall on the logical timetable schedule. The default is ``True``, but subdags will ignore this value and always behave as if this is set to ``False`` for backward compatibility. Example: A DAG is scheduled to run every midnight (``0 0 * * *``). If ``earliest`` is ``2021-06-03 23:00:00``, the first DagRunInfo would be ``2021-06-03 23:00:00`` if ``align=False``, and ``2021-06-04 00:00:00`` if ``align=True``. """ifearliestisNone:earliest=self._time_restriction.earliestearliest=timezone.coerce_datetime(earliest)latest=timezone.coerce_datetime(latest)restriction=TimeRestriction(earliest,latest,catchup=True)# HACK: Sub-DAGs are currently scheduled differently. For example, say# the schedule is @daily and start is 2021-06-03 22:16:00, a top-level# DAG should be first scheduled to run on midnight 2021-06-04, but a# sub-DAG should be first scheduled to run RIGHT NOW. We can change# this, but since sub-DAGs are going away in 3.0 anyway, let's keep# compatibility for now and remove this entirely later.ifself.is_subdag:align=Falsetry:info=self.timetable.next_dagrun_info(last_automated_data_interval=None,restriction=restriction,)exceptException:self.log.exception("Failed to fetch run info after data interval %s for DAG %r",None,self.dag_id,)info=NoneifinfoisNone:# No runs to be scheduled between the user-supplied timeframe. But# if align=False, "invent" a data interval for the timeframe itself.ifnotalign:yieldDagRunInfo.interval(earliest,latest)return# If align=False and earliest does not fall on the timetable's logical# schedule, "invent" a data interval for it.ifnotalignandinfo.logical_date!=earliest:yieldDagRunInfo.interval(earliest,info.data_interval.start)# Generate naturally according to schedule.whileinfoisnotNone:yieldinfotry:info=self.timetable.next_dagrun_info(last_automated_data_interval=info.data_interval,restriction=restriction,)exceptException:self.log.exception("Failed to fetch run info after data interval %s for DAG %r",info.data_interval,self.dag_id,)break
[docs]defget_run_dates(self,start_date,end_date=None):""" Returns a list of dates between the interval received as parameter using this dag's schedule interval. Returned dates can be used for execution dates. :param start_date: The start date of the interval. :type start_date: datetime :param end_date: The end date of the interval. Defaults to ``timezone.utcnow()``. :type end_date: datetime :return: A list of dates within the interval following the dag's schedule. :rtype: list """warnings.warn("`DAG.get_run_dates()` is deprecated. Please use `DAG.iter_dagrun_infos_between()` instead.",category=DeprecationWarning,stacklevel=2,)earliest=timezone.coerce_datetime(start_date)ifend_dateisNone:latest=pendulum.now(timezone.utc)else:latest=timezone.coerce_datetime(end_date)return[info.logical_dateforinfoinself.iter_dagrun_infos_between(earliest,latest)]
[docs]defnormalize_schedule(self,dttm):warnings.warn("`DAG.normalize_schedule()` is deprecated.",category=DeprecationWarning,stacklevel=2,)withwarnings.catch_warnings():warnings.simplefilter("ignore",DeprecationWarning)following=self.following_schedule(dttm)ifnotfollowing:# in case of @oncereturndttmwithwarnings.catch_warnings():warnings.simplefilter("ignore",DeprecationWarning)previous_of_following=self.previous_schedule(following)ifprevious_of_following!=dttm:returnfollowingreturndttm
[docs]deffull_filepath(self)->str:""":meta private:"""warnings.warn("DAG.full_filepath is deprecated in favour of fileloc",DeprecationWarning,stacklevel=2,)returnself.fileloc
@full_filepath.setterdeffull_filepath(self,value)->None:warnings.warn("DAG.full_filepath is deprecated in favour of fileloc",DeprecationWarning,stacklevel=2,)self.fileloc=value@property
[docs]defconcurrency(self)->int:# TODO: Remove in Airflow 3.0warnings.warn("The 'DAG.concurrency' attribute is deprecated. Please use 'DAG.max_active_tasks'.",DeprecationWarning,stacklevel=2,)returnself._max_active_tasks
[docs]defparam(self,name:str,default=None)->DagParam:""" Return a DagParam object for current dag. :param name: dag parameter name. :param default: fallback value for dag parameter. :return: DagParam instance for specified name and current dag. """returnDagParam(current_dag=self,name=name,default=default)
[docs]deffilepath(self)->str:""":meta private:"""warnings.warn("filepath is deprecated, use relative_fileloc instead",DeprecationWarning,stacklevel=2)returnstr(self.relative_fileloc)
@property
[docs]defrelative_fileloc(self)->pathlib.Path:"""File location of the importable dag 'file' relative to the configured DAGs folder."""path=pathlib.Path(self.fileloc)try:returnpath.relative_to(settings.DAGS_FOLDER)exceptValueError:# Not relative to DAGS_FOLDER.returnpath
@property
[docs]deffolder(self)->str:"""Folder location of where the DAG object is instantiated."""returnos.path.dirname(self.fileloc)
@property
[docs]defowner(self)->str:""" Return list of all owners found in DAG tasks. :return: Comma separated list of owners in DAG tasks :rtype: str """return", ".join({t.ownerfortinself.tasks})
[docs]defget_concurrency_reached(self,session=None)->bool:""" Returns a boolean indicating whether the max_active_tasks limit for this DAG has been reached """TI=TaskInstanceqry=session.query(func.count(TI.task_id)).filter(TI.dag_id==self.dag_id,TI.state==State.RUNNING,)returnqry.scalar()>=self.max_active_tasks
@property
[docs]defconcurrency_reached(self):"""This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method."""warnings.warn("This attribute is deprecated. Please use `airflow.models.DAG.get_concurrency_reached` method.",DeprecationWarning,stacklevel=2,)returnself.get_concurrency_reached()
@provide_session
[docs]defget_is_active(self,session=None)->Optional[None]:"""Returns a boolean indicating whether this DAG is active"""qry=session.query(DagModel).filter(DagModel.dag_id==self.dag_id)returnqry.value(DagModel.is_active)
@provide_session
[docs]defget_is_paused(self,session=None)->Optional[None]:"""Returns a boolean indicating whether this DAG is paused"""qry=session.query(DagModel).filter(DagModel.dag_id==self.dag_id)returnqry.value(DagModel.is_paused)
@property
[docs]defis_paused(self):"""This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method."""warnings.warn("This attribute is deprecated. Please use `airflow.models.DAG.get_is_paused` method.",DeprecationWarning,stacklevel=2,)returnself.get_is_paused()
@property
[docs]defnormalized_schedule_interval(self)->Optional[ScheduleInterval]:warnings.warn("DAG.normalized_schedule_interval() is deprecated.",category=DeprecationWarning,stacklevel=2,)ifisinstance(self.schedule_interval,str)andself.schedule_intervalincron_presets:_schedule_interval=cron_presets.get(self.schedule_interval)# type: Optional[ScheduleInterval]elifself.schedule_interval=='@once':_schedule_interval=Noneelse:_schedule_interval=self.schedule_intervalreturn_schedule_interval
@provide_session
[docs]defhandle_callback(self,dagrun,success=True,reason=None,session=None):""" Triggers the appropriate callback depending on the value of success, namely the on_failure_callback or on_success_callback. This method gets the context of a single TaskInstance part of this DagRun and passes that to the callable along with a 'reason', primarily to differentiate DagRun failures. .. note: The logs end up in ``$AIRFLOW_HOME/logs/scheduler/latest/PROJECT/DAG_FILE.py.log`` :param dagrun: DagRun object :param success: Flag to specify if failure or success callback should be called :param reason: Completion reason :param session: Database session """callback=self.on_success_callbackifsuccesselseself.on_failure_callbackifcallback:self.log.info('Executing dag callback function: %s',callback)tis=dagrun.get_task_instances(session=session)ti=tis[-1]# get first TaskInstance of DagRunti.task=self.get_task(ti.task_id)context=ti.get_template_context(session=session)context.update({'reason':reason})try:callback(context)exceptException:self.log.exception("failed to invoke dag state update callback")Stats.incr("dag.callback_exceptions")
[docs]defget_active_runs(self):""" Returns a list of dag run execution dates currently running :return: List of execution dates """runs=DagRun.find(dag_id=self.dag_id,state=State.RUNNING)active_dates=[]forruninruns:active_dates.append(run.execution_date)returnactive_dates
@provide_session
[docs]defget_num_active_runs(self,external_trigger=None,only_running=True,session=None):""" Returns the number of active "running" dag runs :param external_trigger: True for externally triggered active dag runs :type external_trigger: bool :param session: :return: number greater than 0 for active dag runs """# .count() is inefficientquery=session.query(func.count()).filter(DagRun.dag_id==self.dag_id)ifonly_running:query=query.filter(DagRun.state==State.RUNNING)else:query=query.filter(DagRun.state.in_({State.RUNNING,State.QUEUED}))ifexternal_triggerisnotNone:query=query.filter(DagRun.external_trigger==(expression.true()ifexternal_triggerelseexpression.false()))returnquery.scalar()
@provide_session
[docs]defget_dagrun(self,execution_date:Optional[str]=None,run_id:Optional[str]=None,session:Optional[Session]=None,):""" Returns the dag run for a given execution date or run_id if it exists, otherwise none. :param execution_date: The execution date of the DagRun to find. :param run_id: The run_id of the DagRun to find. :param session: :return: The DagRun if found, otherwise None. """ifnot(execution_dateorrun_id):raiseTypeError("You must provide either the execution_date or the run_id")query=session.query(DagRun)ifexecution_date:query=query.filter(DagRun.dag_id==self.dag_id,DagRun.execution_date==execution_date)ifrun_id:query=query.filter(DagRun.dag_id==self.dag_id,DagRun.run_id==run_id)returnquery.first()
@provide_session
[docs]defget_dagruns_between(self,start_date,end_date,session=None):""" Returns the list of dag runs between start_date (inclusive) and end_date (inclusive). :param start_date: The starting execution date of the DagRun to find. :param end_date: The ending execution date of the DagRun to find. :param session: :return: The list of DagRuns found. """dagruns=(session.query(DagRun).filter(DagRun.dag_id==self.dag_id,DagRun.execution_date>=start_date,DagRun.execution_date<=end_date,).all())returndagruns
@provide_session
[docs]defget_latest_execution_date(self,session:Session)->Optional[datetime]:"""Returns the latest date for which at least one dag run exists"""returnsession.query(func.max(DagRun.execution_date)).filter(DagRun.dag_id==self.dag_id).scalar()
@property
[docs]deflatest_execution_date(self):"""This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date` method."""warnings.warn("This attribute is deprecated. Please use `airflow.models.DAG.get_latest_execution_date` method.",DeprecationWarning,stacklevel=2,)returnself.get_latest_execution_date()
@property
[docs]defsubdags(self):"""Returns a list of the subdag objects associated to this DAG"""# Check SubDag for class but don't check class directlyfromairflow.operators.subdagimportSubDagOperatorsubdag_lst=[]fortaskinself.tasks:if(isinstance(task,SubDagOperator)or# TODO remove in Airflow 2.0type(task).__name__=='SubDagOperator'ortask.task_type=='SubDagOperator'):subdag_lst.append(task.subdag)subdag_lst+=task.subdag.subdagsreturnsubdag_lst
[docs]defget_template_env(self)->jinja2.Environment:"""Build a Jinja2 environment."""# Collect directories to search for template filessearchpath=[self.folder]ifself.template_searchpath:searchpath+=self.template_searchpath# Default values (for backward compatibility)jinja_env_options={'loader':jinja2.FileSystemLoader(searchpath),'undefined':self.template_undefined,'extensions':["jinja2.ext.do"],'cache_size':0,}ifself.jinja_environment_kwargs:jinja_env_options.update(self.jinja_environment_kwargs)ifself.render_template_as_native_obj:env=NativeEnvironment(**jinja_env_options)else:env=airflow.templates.SandboxedEnvironment(**jinja_env_options)# type: ignore# Add any user defined items. Safe to edit globals as long as no templates are rendered yet.# http://jinja.pocoo.org/docs/2.10/api/#jinja2.Environment.globalsifself.user_defined_macros:env.globals.update(self.user_defined_macros)ifself.user_defined_filters:env.filters.update(self.user_defined_filters)returnenv
[docs]defset_dependency(self,upstream_task_id,downstream_task_id):""" Simple utility method to set dependency between two tasks that already have been added to the DAG using add_task() """self.get_task(upstream_task_id).set_downstream(self.get_task(downstream_task_id))
@provide_session
[docs]defget_task_instances_before(self,base_date:datetime,num:int,*,session:Session,)->List[TaskInstance]:"""Get ``num`` task instances before (including) ``base_date``. The returned list may contain exactly ``num`` task instances. It can have less if there are less than ``num`` scheduled DAG runs before ``base_date``, or more if there are manual task runs between the requested period, which does not count toward ``num``. """min_date:Optional[datetime]=(session.query(DagRun.execution_date).filter(DagRun.dag_id==self.dag_id,DagRun.execution_date<=base_date,DagRun.run_type!=DagRunType.MANUAL,).order_by(DagRun.execution_date.desc()).offset(num).limit(1).scalar())ifmin_dateisNone:min_date=timezone.utc_epoch()returnself.get_task_instances(start_date=min_date,end_date=base_date,session=session)
.all())@overloaddef_get_task_instances(self,*,task_ids,start_date:Optional[datetime],end_date:Optional[datetime],run_id:None,state:Union[str,List[str]],include_subdags:bool,include_parentdag:bool,include_dependent_dags:bool,exclude_task_ids:Collection[str],as_pk_tuple:Literal[True],session:Session,dag_bag:"DagBag"=None,recursion_depth:int=0,max_recursion_depth:int=None,visited_external_tis:Set[Tuple[str,str,datetime]]=None,)->Set["TaskInstanceKey"]:...# pragma: no cover@overloaddef_get_task_instances(self,*,task_ids,start_date:Optional[datetime],end_date:Optional[datetime],run_id:Optional[str],state:Union[str,List[str]],include_subdags:bool,include_parentdag:bool,include_dependent_dags:bool,as_pk_tuple:Literal[False],exclude_task_ids:Collection[str],session:Session,dag_bag:"DagBag"=None,recursion_depth:int=0,max_recursion_depth:int=None,visited_external_tis:Set[Tuple[str,str,datetime]]=None,)->Iterable[TaskInstance]:...# pragma: no coverdef_get_task_instances(self,*,task_ids,start_date:Optional[datetime],end_date:Optional[datetime],run_id:Optional[str],state:Union[str,List[str]],include_subdags:bool,include_parentdag:bool,include_dependent_dags:bool,as_pk_tuple:bool,exclude_task_ids:Collection[str],session:Session,dag_bag:"DagBag"=None,recursion_depth:int=0,max_recursion_depth:int=None,visited_external_tis:Set[Tuple[str,str,datetime]]=None,)->Union[Iterable[TaskInstance],Set[TaskInstanceKey]]:TI=TaskInstance# If we are looking at subdags/dependent dags we want to avoid UNION calls# in SQL (it doesn't play nice with fields that have no equality operator,# like JSON types), we instead build our result set separately.## This will be empty if we are only looking at one dag, in which case# we can return the filtered TI query object directly.result:Set[TaskInstanceKey]=set()# Do we want full objects, or just the primary columns?ifas_pk_tuple:tis=session.query(TI.dag_id,TI.task_id,TI.run_id)else:tis=session.query(TaskInstance)tis=tis.join(TaskInstance.dag_run)ifinclude_subdags:# Crafting the right filter for dag_id and task_ids comboconditions=[]fordaginself.subdags+[self]:conditions.append((TaskInstance.dag_id==dag.dag_id)&TaskInstance.task_id.in_(dag.task_ids))tis=tis.filter(or_(*conditions))else:tis=tis.filter(TaskInstance.dag_id==self.dag_id,TaskInstance.task_id.in_(self.task_ids))ifrun_id:tis=tis.filter(TaskInstance.run_id==run_id)ifstart_date:tis=tis.filter(DagRun.execution_date>=start_date)iftask_ids:tis=tis.filter(TaskInstance.task_id.in_(task_ids))# This allows allow_trigger_in_future config to take affect, rather than mandating exec_date <= UTCifend_dateornotself.allow_future_exec_dates:end_date=end_dateortimezone.utcnow()tis=tis.filter(DagRun.execution_date<=end_date)ifstate:ifisinstance(state,str):tis=tis.filter(TaskInstance.state==state)eliflen(state)==1:tis=tis.filter(TaskInstance.state==state[0])else:# this is required to deal with NULL valuesifNoneinstate:ifall(xisNoneforxinstate):tis=tis.filter(TaskInstance.state.is_(None))else:not_none_state=[sforsinstateifs]tis=tis.filter(or_(TaskInstance.state.in_(not_none_state),TaskInstance.state.is_(None)))else:tis=tis.filter(TaskInstance.state.in_(state))# Next, get any of them from our parent DAG (if there is one)ifinclude_parentdagandself.is_subdagandself.parent_dagisnotNone:p_dag=self.parent_dag.partial_subset(task_ids_or_regex=r"^{}$".format(self.dag_id.split('.')[1]),include_upstream=False,include_downstream=True,)result.update(p_dag._get_task_instances(task_ids=task_ids,start_date=start_date,end_date=end_date,run_id=None,state=state,include_subdags=include_subdags,include_parentdag=False,include_dependent_dags=include_dependent_dags,as_pk_tuple=True,exclude_task_ids=exclude_task_ids,session=session,dag_bag=dag_bag,recursion_depth=recursion_depth,max_recursion_depth=max_recursion_depth,visited_external_tis=visited_external_tis,))ifinclude_dependent_dags:# Recursively find external tasks indicated by ExternalTaskMarkerfromairflow.sensors.external_taskimportExternalTaskMarkerquery=tisifas_pk_tuple:condition=TI.filter_for_tis(TaskInstanceKey(*cols)forcolsintis.all())ifconditionisnotNone:query=session.query(TI).filter(condition)ifvisited_external_tisisNone:visited_external_tis=set()fortiinquery.filter(TI.operator==ExternalTaskMarker.__name__):ti_key=ti.key.primaryifti_keyinvisited_external_tis:continuevisited_external_tis.add(ti_key)task:ExternalTaskMarker=cast(ExternalTaskMarker,copy.copy(self.get_task(ti.task_id)))ti.task=taskifmax_recursion_depthisNone:# Maximum recursion depth allowed is the recursion_depth of the first# ExternalTaskMarker in the tasks to be visited.max_recursion_depth=task.recursion_depthifrecursion_depth+1>max_recursion_depth:# Prevent cycles or accidents.raiseAirflowException("Maximum recursion depth {} reached for {}{}. ""Attempted to clear too many tasks ""or there may be a cyclic dependency.".format(max_recursion_depth,ExternalTaskMarker.__name__,ti.task_id))ti.render_templates()external_tis=(session.query(TI).join(TI.dag_run).filter(TI.dag_id==task.external_dag_id,TI.task_id==task.external_task_id,DagRun.execution_date==pendulum.parse(task.execution_date),))fortiiinexternal_tis:ifnotdag_bag:dag_bag=DagBag(read_dags_from_db=True)external_dag=dag_bag.get_dag(tii.dag_id,session=session)ifnotexternal_dag:raiseAirflowException(f"Could not find dag {tii.dag_id}")downstream=external_dag.partial_subset(task_ids_or_regex=[tii.task_id],include_upstream=False,include_downstream=True,)result.update(downstream._get_task_instances(task_ids=None,run_id=tii.run_id,start_date=None,end_date=None,state=state,include_subdags=include_subdags,include_dependent_dags=include_dependent_dags,include_parentdag=False,as_pk_tuple=True,exclude_task_ids=exclude_task_ids,dag_bag=dag_bag,session=session,recursion_depth=recursion_depth+1,max_recursion_depth=max_recursion_depth,visited_external_tis=visited_external_tis,))ifresultoras_pk_tuple:# Only execute the `ti` query if we have also collected some other results (i.e. subdags etc.)ifas_pk_tuple:result.update(TaskInstanceKey(*cols)forcolsintis.all())else:result.update(ti.keyfortiintis.all())ifexclude_task_ids:result=set(filter(lambdakey:key.task_idnotinexclude_task_ids,result,))ifas_pk_tuple:returnresultelifresult:# We've been asked for objects, lets combine it all back in to a result settis=tis.with_entities(TI.dag_id,TI.task_id,TI.run_id)tis=session.query(TI).filter(TI.filter_for_tis(result))elifexclude_task_ids:tis=tis.filter(TI.task_id.notin_(list(exclude_task_ids)))returntis@provide_session
[docs]defset_task_instance_state(self,task_id:str,execution_date:datetime,state:State,upstream:Optional[bool]=False,downstream:Optional[bool]=False,future:Optional[bool]=False,past:Optional[bool]=False,commit:Optional[bool]=True,session=None,)->List[TaskInstance]:""" Set the state of a TaskInstance to the given state, and clear its downstream tasks that are in failed or upstream_failed state. :param task_id: Task ID of the TaskInstance :type task_id: str :param execution_date: execution_date of the TaskInstance :type execution_date: datetime :param state: State to set the TaskInstance to :type state: State :param upstream: Include all upstream tasks of the given task_id :type upstream: bool :param downstream: Include all downstream tasks of the given task_id :type downstream: bool :param future: Include all future TaskInstances of the given task_id :type future: bool :param commit: Commit changes :type commit: bool :param past: Include all past TaskInstances of the given task_id :type past: bool """fromairflow.api.common.experimental.mark_tasksimportset_statetask=self.get_task(task_id)task.dag=selfaltered=set_state(tasks=[task],execution_date=execution_date,upstream=upstream,downstream=downstream,future=future,past=past,state=state,commit=commit,session=session,)ifnotcommit:returnaltered# Clear downstream tasks that are in failed/upstream_failed state to resume them.# Flush the session so that the tasks marked success are reflected in the db.session.flush()subdag=self.partial_subset(task_ids_or_regex={task_id},include_downstream=True,include_upstream=False,)end_date=execution_dateifnotfutureelseNonestart_date=execution_dateifnotpastelseNonesubdag.clear(start_date=start_date,end_date=end_date,include_subdags=True,include_parentdag=True,only_failed=True,session=session,# Exclude the task itself from being clearedexclude_task_ids={task_id},)returnaltered
@property
[docs]defroots(self)->List[BaseOperator]:"""Return nodes with no parents. These are first to execute and are called roots or root nodes."""return[taskfortaskinself.tasksifnottask.upstream_list]
@property
[docs]defleaves(self)->List[BaseOperator]:"""Return nodes with no children. These are last to execute and are called leaves or leaf nodes."""return[taskfortaskinself.tasksifnottask.downstream_list]
[docs]deftopological_sort(self,include_subdag_tasks:bool=False):""" Sorts tasks in topographical order, such that a task comes after any of its upstream dependencies. Heavily inspired by: http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/ :param include_subdag_tasks: whether to include tasks in subdags, default to False :return: list of tasks in topological order """fromairflow.operators.subdagimportSubDagOperator# Avoid circular import# convert into an OrderedDict to speedup lookup while keeping order the samegraph_unsorted=OrderedDict((task.task_id,task)fortaskinself.tasks)graph_sorted=[]# type: List[BaseOperator]# special caseiflen(self.tasks)==0:returntuple(graph_sorted)# Run until the unsorted graph is empty.whilegraph_unsorted:# Go through each of the node/edges pairs in the unsorted# graph. If a set of edges doesn't contain any nodes that# haven't been resolved, that is, that are still in the# unsorted graph, remove the pair from the unsorted graph,# and append it to the sorted graph. Note here that by using# using the items() method for iterating, a copy of the# unsorted graph is used, allowing us to modify the unsorted# graph as we move through it. We also keep a flag for# checking that graph is acyclic, which is true if any# nodes are resolved during each pass through the graph. If# not, we need to exit as the graph therefore can't be# sorted.acyclic=Falsefornodeinlist(graph_unsorted.values()):foredgeinnode.upstream_list:ifedge.task_idingraph_unsorted:break# no edges in upstream taskselse:acyclic=Truedelgraph_unsorted[node.task_id]graph_sorted.append(node)ifinclude_subdag_tasksandisinstance(node,SubDagOperator):graph_sorted.extend(node.subdag.topological_sort(include_subdag_tasks=True))ifnotacyclic:raiseAirflowException(f"A cyclic dependency occurred in dag: {self.dag_id}")returntuple(graph_sorted)
@provide_session
[docs]defset_dag_runs_state(self,state:str=State.RUNNING,session:Session=None,start_date:Optional[datetime]=None,end_date:Optional[datetime]=None,dag_ids:List[str]=None,)->None:warnings.warn("This method is deprecated and will be removed in a future version.",DeprecationWarning,stacklevel=3,)dag_ids=dag_idsor[self.dag_id]query=session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids))ifstart_date:query=query.filter(DagRun.execution_date>=start_date)ifend_date:query=query.filter(DagRun.execution_date<=end_date)query.update({DagRun.state:state},synchronize_session='fetch')
@provide_session
[docs]defclear(self,task_ids=None,start_date=None,end_date=None,only_failed=False,only_running=False,confirm_prompt=False,include_subdags=True,include_parentdag=True,dag_run_state:DagRunState=DagRunState.QUEUED,dry_run=False,session=None,get_tis=False,recursion_depth=0,max_recursion_depth=None,dag_bag=None,exclude_task_ids:FrozenSet[str]=frozenset({}),):""" Clears a set of task instances associated with the current dag for a specified date range. :param task_ids: List of task ids to clear :type task_ids: List[str] :param start_date: The minimum execution_date to clear :type start_date: datetime.datetime or None :param end_date: The maximum execution_date to clear :type end_date: datetime.datetime or None :param only_failed: Only clear failed tasks :type only_failed: bool :param only_running: Only clear running tasks. :type only_running: bool :param confirm_prompt: Ask for confirmation :type confirm_prompt: bool :param include_subdags: Clear tasks in subdags and clear external tasks indicated by ExternalTaskMarker :type include_subdags: bool :param include_parentdag: Clear tasks in the parent dag of the subdag. :type include_parentdag: bool :param dag_run_state: state to set DagRun to. If set to False, dagrun state will not be changed. :param dry_run: Find the tasks to clear but don't clear them. :type dry_run: bool :param session: The sqlalchemy session to use :type session: sqlalchemy.orm.session.Session :param dag_bag: The DagBag used to find the dags subdags (Optional) :type dag_bag: airflow.models.dagbag.DagBag :param exclude_task_ids: A set of ``task_id`` that should not be cleared :type exclude_task_ids: frozenset """ifget_tis:warnings.warn("Passing `get_tis` to dag.clear() is deprecated. Use `dry_run` parameter instead.",DeprecationWarning,stacklevel=2,)dry_run=Trueifrecursion_depth:warnings.warn("Passing `recursion_depth` to dag.clear() is deprecated.",DeprecationWarning,stacklevel=2,)ifmax_recursion_depth:warnings.warn("Passing `max_recursion_depth` to dag.clear() is deprecated.",DeprecationWarning,stacklevel=2,)state=[]ifonly_failed:state+=[State.FAILED,State.UPSTREAM_FAILED]only_failed=Noneifonly_running:# Yes, having `+=` doesn't make sense, but this was the existing behaviourstate+=[State.RUNNING]only_running=Nonetis=self._get_task_instances(task_ids=task_ids,start_date=start_date,end_date=end_date,run_id=None,state=state,include_subdags=include_subdags,include_parentdag=include_parentdag,include_dependent_dags=include_subdags,# compat, yes this is not a typoas_pk_tuple=False,session=session,dag_bag=dag_bag,exclude_task_ids=exclude_task_ids,)ifdry_run:returntistis=tis.all()count=len(tis)do_it=Trueifcount==0:return0ifconfirm_prompt:ti_list="\n".join(str(t)fortintis)question=("You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? (yes/no): ").format(count=count,ti_list=ti_list)do_it=utils.helpers.ask_yesno(question)ifdo_it:clear_task_instances(tis,session,dag=self,dag_run_state=dag_run_state,)else:count=0print("Cancelled, nothing was cleared.")session.flush()returncount
@classmethod
[docs]defclear_dags(cls,dags,start_date=None,end_date=None,only_failed=False,only_running=False,confirm_prompt=False,include_subdags=True,include_parentdag=False,dag_run_state=DagRunState.QUEUED,dry_run=False,):all_tis=[]fordagindags:tis=dag.clear(start_date=start_date,end_date=end_date,only_failed=only_failed,only_running=only_running,confirm_prompt=False,include_subdags=include_subdags,include_parentdag=include_parentdag,dag_run_state=dag_run_state,dry_run=True,)all_tis.extend(tis)ifdry_run:returnall_tiscount=len(all_tis)do_it=Trueifcount==0:print("Nothing to clear.")return0ifconfirm_prompt:ti_list="\n".join(str(t)fortinall_tis)question=f"You are about to delete these {count} tasks:\n{ti_list}\n\nAre you sure? (yes/no): "do_it=utils.helpers.ask_yesno(question)ifdo_it:fordagindags:dag.clear(start_date=start_date,end_date=end_date,only_failed=only_failed,only_running=only_running,confirm_prompt=False,include_subdags=include_subdags,dag_run_state=dag_run_state,dry_run=False,)else:count=0print("Cancelled, nothing was cleared.")returncount
[docs]def__deepcopy__(self,memo):# Switcharoo to go around deepcopying objects coming through the# backdoorcls=self.__class__result=cls.__new__(cls)memo[id(self)]=resultfork,vinself.__dict__.items():ifknotin('user_defined_macros','user_defined_filters','params','_log'):setattr(result,k,copy.deepcopy(v,memo))result.user_defined_macros=self.user_defined_macrosresult.user_defined_filters=self.user_defined_filtersresult.params=self.paramsifhasattr(self,'_log'):result._log=self._logreturnresult
[docs]defsub_dag(self,*args,**kwargs):"""This method is deprecated in favor of partial_subset"""warnings.warn("This method is deprecated and will be removed in a future version. Please use partial_subset",DeprecationWarning,stacklevel=2,)returnself.partial_subset(*args,**kwargs)
[docs]defpartial_subset(self,task_ids_or_regex:Union[str,RePatternType,Iterable[str]],include_downstream=False,include_upstream=True,include_direct_upstream=False,):""" Returns a subset of the current dag as a deep copy of the current dag based on a regex that should match one or many tasks, and includes upstream and downstream neighbours based on the flag passed. :param task_ids_or_regex: Either a list of task_ids, or a regex to match against task ids (as a string, or compiled regex pattern). :type task_ids_or_regex: [str] or str or re.Pattern :param include_downstream: Include all downstream tasks of matched tasks, in addition to matched tasks. :param include_upstream: Include all upstream tasks of matched tasks, in addition to matched tasks. """# deep-copying self.task_dict and self._task_group takes a long time, and we don't want all# the tasks anyway, so we copy the tasks manually latermemo={id(self.task_dict):None,id(self._task_group):None}dag=copy.deepcopy(self,memo)# type: ignoreifisinstance(task_ids_or_regex,(str,RePatternType)):matched_tasks=[tfortinself.tasksifre.findall(task_ids_or_regex,t.task_id)]else:matched_tasks=[tfortinself.tasksift.task_idintask_ids_or_regex]also_include=[]fortinmatched_tasks:ifinclude_downstream:also_include+=t.get_flat_relatives(upstream=False)ifinclude_upstream:also_include+=t.get_flat_relatives(upstream=True)elifinclude_direct_upstream:also_include+=t.upstream_list# Compiling the unique list of tasks that made the cut# Make sure to not recursively deepcopy the dag while copying the taskdag.task_dict={t.task_id:copy.deepcopy(t,{id(t.dag):dag})# type: ignorefortinmatched_tasks+also_include}deffilter_task_group(group,parent_group):"""Exclude tasks not included in the subdag from the given TaskGroup."""copied=copy.copy(group)copied.used_group_ids=set(copied.used_group_ids)copied._parent_group=parent_groupcopied.children={}forchildingroup.children.values():ifisinstance(child,BaseOperator):ifchild.task_idindag.task_dict:copied.children[child.task_id]=dag.task_dict[child.task_id]else:copied.used_group_ids.discard(child.task_id)else:filtered_child=filter_task_group(child,copied)# Only include this child TaskGroup if it is non-empty.iffiltered_child.children:copied.children[child.group_id]=filtered_childreturncopieddag._task_group=filter_task_group(self._task_group,None)# Removing upstream/downstream references to tasks and TaskGroups that did not make# the cut.subdag_task_groups=dag.task_group.get_task_group_dict()forgroupinsubdag_task_groups.values():group.upstream_group_ids=group.upstream_group_ids.intersection(subdag_task_groups.keys())group.downstream_group_ids=group.downstream_group_ids.intersection(subdag_task_groups.keys())group.upstream_task_ids=group.upstream_task_ids.intersection(dag.task_dict.keys())group.downstream_task_ids=group.downstream_task_ids.intersection(dag.task_dict.keys())fortindag.tasks:# Removing upstream/downstream references to tasks that did not# make the cutt._upstream_task_ids=t.upstream_task_ids.intersection(dag.task_dict.keys())t._downstream_task_ids=t.downstream_task_ids.intersection(dag.task_dict.keys())iflen(dag.tasks)<len(self.tasks):dag.partial=Truereturndag
[docs]defget_task(self,task_id:str,include_subdags:bool=False)->BaseOperator:iftask_idinself.task_dict:returnself.task_dict[task_id]ifinclude_subdags:fordaginself.subdags:iftask_idindag.task_dict:returndag.task_dict[task_id]raiseTaskNotFound(f"Task {task_id} not found")
[docs]deftree_view(self)->None:"""Print an ASCII tree representation of the DAG."""defget_downstream(task,level=0):print((" "*level*4)+str(task))level+=1fortintask.downstream_list:get_downstream(t,level)fortinself.roots:get_downstream(t)
[docs]defadd_task(self,task):""" Add a task to the DAG :param task: the task you want to add :type task: task """ifnotself.start_dateandnottask.start_date:raiseAirflowException("Task is missing the start_date parameter")# if the task has no start date, assign it the same as the DAGelifnottask.start_date:task.start_date=self.start_date# otherwise, the task will start on the later of its own start date and# the DAG's start dateelifself.start_date:task.start_date=max(task.start_date,self.start_date)# if the task has no end date, assign it the same as the dagifnottask.end_date:task.end_date=self.end_date# otherwise, the task will end on the earlier of its own end date and# the DAG's end dateeliftask.end_dateandself.end_date:task.end_date=min(task.end_date,self.end_date)if(task.task_idinself.task_dictandself.task_dict[task.task_id]isnottask)ortask.task_idinself._task_group.used_group_ids:raiseDuplicateTaskIdFound(f"Task id '{task.task_id}' has already been added to the DAG")else:self.task_dict[task.task_id]=tasktask.dag=self# Add task_id to used_group_ids to prevent group_id and task_id collisions.self._task_group.used_group_ids.add(task.task_id)self.task_count=len(self.task_dict)
[docs]defadd_tasks(self,tasks):""" Add a list of tasks to the DAG :param tasks: a lit of tasks you want to add :type tasks: list of tasks """fortaskintasks:self.add_task(task)
[docs]defrun(self,start_date=None,end_date=None,mark_success=False,local=False,executor=None,donot_pickle=conf.getboolean('core','donot_pickle'),ignore_task_deps=False,ignore_first_depends_on_past=True,pool=None,delay_on_limit_secs=1.0,verbose=False,conf=None,rerun_failed_tasks=False,run_backwards=False,run_at_least_once=False,):""" Runs the DAG. :param start_date: the start date of the range to run :type start_date: datetime.datetime :param end_date: the end date of the range to run :type end_date: datetime.datetime :param mark_success: True to mark jobs as succeeded without running them :type mark_success: bool :param local: True to run the tasks using the LocalExecutor :type local: bool :param executor: The executor instance to run the tasks :type executor: airflow.executor.base_executor.BaseExecutor :param donot_pickle: True to avoid pickling DAG object and send to workers :type donot_pickle: bool :param ignore_task_deps: True to skip upstream tasks :type ignore_task_deps: bool :param ignore_first_depends_on_past: True to ignore depends_on_past dependencies for the first set of tasks only :type ignore_first_depends_on_past: bool :param pool: Resource pool to use :type pool: str :param delay_on_limit_secs: Time in seconds to wait before next attempt to run dag run when max_active_runs limit has been reached :type delay_on_limit_secs: float :param verbose: Make logging output more verbose :type verbose: bool :param conf: user defined dictionary passed from CLI :type conf: dict :param rerun_failed_tasks: :type: bool :param run_backwards: :type: bool :param run_at_least_once: If true, always run the DAG at least once even if no logical run exists within the time range. :type: bool """fromairflow.jobs.backfill_jobimportBackfillJobifnotexecutorandlocal:fromairflow.executors.local_executorimportLocalExecutorexecutor=LocalExecutor()elifnotexecutor:fromairflow.executors.executor_loaderimportExecutorLoaderexecutor=ExecutorLoader.get_default_executor()job=BackfillJob(self,start_date=start_date,end_date=end_date,mark_success=mark_success,executor=executor,donot_pickle=donot_pickle,ignore_task_deps=ignore_task_deps,ignore_first_depends_on_past=ignore_first_depends_on_past,pool=pool,delay_on_limit_secs=delay_on_limit_secs,verbose=verbose,conf=conf,rerun_failed_tasks=rerun_failed_tasks,run_backwards=run_backwards,run_at_least_once=run_at_least_once,)job.run()
[docs]defcli(self):"""Exposes a CLI specific to this DAG"""check_cycle(self)fromairflow.cliimportcli_parserparser=cli_parser.get_parser(dag_parser=True)args=parser.parse_args()args.func(args,self)
@provide_session
[docs]defcreate_dagrun(self,state:DagRunState,execution_date:Optional[datetime]=None,run_id:Optional[str]=None,start_date:Optional[datetime]=None,external_trigger:Optional[bool]=False,conf:Optional[dict]=None,run_type:Optional[DagRunType]=None,session=None,dag_hash:Optional[str]=None,creating_job_id:Optional[int]=None,data_interval:Optional[Tuple[datetime,datetime]]=None,):""" Creates a dag run from this dag including the tasks associated with this dag. Returns the dag run. :param run_id: defines the run id for this dag run :type run_id: str :param run_type: type of DagRun :type run_type: airflow.utils.types.DagRunType :param execution_date: the execution date of this dag run :type execution_date: datetime.datetime :param state: the state of the dag run :type state: airflow.utils.state.DagRunState :param start_date: the date this dag run should be evaluated :type start_date: datetime :param external_trigger: whether this dag run is externally triggered :type external_trigger: bool :param conf: Dict containing configuration/parameters to pass to the DAG :type conf: dict :param creating_job_id: id of the job creating this DagRun :type creating_job_id: int :param session: database session :type session: sqlalchemy.orm.session.Session :param dag_hash: Hash of Serialized DAG :type dag_hash: str :param data_interval: Data interval of the DagRun :type data_interval: tuple[datetime, datetime] | None """ifrun_id:# Infer run_type from run_id if needed.ifnotisinstance(run_id,str):raiseValueError(f"`run_id` expected to be a str is {type(run_id)}")ifnotrun_type:run_type=DagRunType.from_run_id(run_id)elifrun_typeandexecution_dateisnotNone:# Generate run_id from run_type and execution_date.ifnotisinstance(run_type,DagRunType):raiseValueError(f"`run_type` expected to be a DagRunType is {type(run_type)}")run_id=DagRun.generate_run_id(run_type,execution_date)else:raiseAirflowException("Creating DagRun needs either `run_id` or both `run_type` and `execution_date`")logical_date=timezone.coerce_datetime(execution_date)ifdata_intervalisNoneandlogical_dateisnotNone:warnings.warn("Calling `DAG.create_dagrun()` without an explicit data interval is deprecated",DeprecationWarning,stacklevel=3,)ifrun_type==DagRunType.MANUAL:data_interval=self.timetable.infer_manual_data_interval(run_after=logical_date)else:data_interval=self.infer_automated_data_interval(logical_date)# create a copy of params before validatingcopied_params=copy.deepcopy(self.params)copied_params.update(confor{})copied_params.validate()run=DagRun(dag_id=self.dag_id,run_id=run_id,execution_date=logical_date,start_date=start_date,external_trigger=external_trigger,conf=conf,state=state,run_type=run_type,dag_hash=dag_hash,creating_job_id=creating_job_id,data_interval=data_interval,)session.add(run)session.flush()run.dag=self# create the associated task instances# state is None at the moment of creationrun.verify_integrity(session=session)returnrun
@classmethod@provide_session
[docs]defbulk_sync_to_db(cls,dags:Collection["DAG"],session=None):"""This method is deprecated in favor of bulk_write_to_db"""warnings.warn("This method is deprecated and will be removed in a future version. Please use bulk_write_to_db",DeprecationWarning,stacklevel=2,)returncls.bulk_write_to_db(dags,session)
@classmethod@provide_session
[docs]defbulk_write_to_db(cls,dags:Collection["DAG"],session=None):""" Ensure the DagModel rows for the given dags are up-to-date in the dag table in the DB, including calculated fields. Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator. :param dags: the DAG objects to save to the DB :type dags: List[airflow.models.dag.DAG] :return: None """ifnotdags:returnlog.info("Sync %s DAGs",len(dags))dag_by_ids={dag.dag_id:dagfordagindags}dag_ids=set(dag_by_ids.keys())query=(session.query(DagModel).options(joinedload(DagModel.tags,innerjoin=False)).filter(DagModel.dag_id.in_(dag_ids)))orm_dags:List[DagModel]=with_row_locks(query,of=DagModel,session=session).all()existing_dag_ids={orm_dag.dag_idfororm_daginorm_dags}missing_dag_ids=dag_ids.difference(existing_dag_ids)formissing_dag_idinmissing_dag_ids:orm_dag=DagModel(dag_id=missing_dag_id)dag=dag_by_ids[missing_dag_id]ifdag.is_paused_upon_creationisnotNone:orm_dag.is_paused=dag.is_paused_upon_creationorm_dag.tags=[]log.info("Creating ORM DAG for %s",dag.dag_id)session.add(orm_dag)orm_dags.append(orm_dag)# Get the latest dag run for each existing dag as a single query (avoid n+1 query)most_recent_subq=(session.query(DagRun.dag_id,func.max(DagRun.execution_date).label("max_execution_date")).filter(DagRun.dag_id.in_(existing_dag_ids),or_(DagRun.run_type==DagRunType.BACKFILL_JOB,DagRun.run_type==DagRunType.SCHEDULED),).group_by(DagRun.dag_id).subquery())most_recent_runs_iter=session.query(DagRun).filter(DagRun.dag_id==most_recent_subq.c.dag_id,DagRun.execution_date==most_recent_subq.c.max_execution_date,)most_recent_runs={run.dag_id:runforruninmost_recent_runs_iter}# Get number of active dagruns for all dags we are processing as a single query.num_active_runs=DagRun.active_runs_of_dags(dag_ids=existing_dag_ids,session=session)filelocs=[]fororm_daginsorted(orm_dags,key=lambdad:d.dag_id):dag=dag_by_ids[orm_dag.dag_id]filelocs.append(dag.fileloc)ifdag.is_subdag:orm_dag.is_subdag=Trueorm_dag.fileloc=dag.parent_dag.fileloc# type: ignoreorm_dag.root_dag_id=dag.parent_dag.dag_id# type: ignoreorm_dag.owners=dag.parent_dag.owner# type: ignoreelse:orm_dag.is_subdag=Falseorm_dag.fileloc=dag.filelocorm_dag.owners=dag.ownerorm_dag.is_active=Trueorm_dag.last_parsed_time=timezone.utcnow()orm_dag.default_view=dag.default_vieworm_dag.description=dag.descriptionorm_dag.schedule_interval=dag.schedule_intervalorm_dag.max_active_tasks=dag.max_active_tasksorm_dag.max_active_runs=dag.max_active_runsorm_dag.has_task_concurrency_limits=any(t.max_active_tis_per_dagisnotNonefortindag.tasks)run:Optional[DagRun]=most_recent_runs.get(dag.dag_id)ifrunisNone:data_interval=Noneelse:data_interval=dag.get_run_data_interval(run)ifnum_active_runs.get(dag.dag_id,0)>=orm_dag.max_active_runs:orm_dag.next_dagrun_create_after=Noneelse:orm_dag.calculate_dagrun_date_fields(dag,data_interval)fororm_taginlist(orm_dag.tags):iform_tag.namenotinset(dag.tags):session.delete(orm_tag)orm_dag.tags.remove(orm_tag)ifdag.tags:orm_tag_names=[t.namefortinorm_dag.tags]fordag_taginset(dag.tags):ifdag_tagnotinorm_tag_names:dag_tag_orm=DagTag(name=dag_tag,dag_id=dag.dag_id)orm_dag.tags.append(dag_tag_orm)session.add(dag_tag_orm)DagCode.bulk_sync_to_db(filelocs,session=session)# Issue SQL/finish "Unit of Work", but let @provide_session commit (or if passed a session, let caller# decide when to commitsession.flush()fordagindags:cls.bulk_write_to_db(dag.subdags,session=session)
@provide_session
[docs]defsync_to_db(self,session=None):""" Save attributes about this DAG to the DB. Note that this method can be called for both DAGs and SubDAGs. A SubDag is actually a SubDagOperator. :return: None """self.bulk_write_to_db([self],session)
[docs]defget_default_view(self):"""This is only there for backward compatible jinja2 templates"""ifself.default_viewisNone:returnconf.get('webserver','dag_default_view').lower()else:returnself.default_view
@staticmethod@provide_session
[docs]defdeactivate_unknown_dags(active_dag_ids,session=None):""" Given a list of known DAGs, deactivate any other DAGs that are marked as active in the ORM :param active_dag_ids: list of DAG IDs that are active :type active_dag_ids: list[unicode] :return: None """iflen(active_dag_ids)==0:returnfordaginsession.query(DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all():dag.is_active=Falsesession.merge(dag)session.commit()
@staticmethod@provide_session
[docs]defdeactivate_stale_dags(expiration_date,session=None):""" Deactivate any DAGs that were last touched by the scheduler before the expiration date. These DAGs were likely deleted. :param expiration_date: set inactive DAGs that were touched before this time :type expiration_date: datetime :return: None """fordagin(session.query(DagModel).filter(DagModel.last_parsed_time<expiration_date,DagModel.is_active).all()):log.info("Deactivating DAG ID %s since it was last touched by the scheduler at %s",dag.dag_id,dag.last_parsed_time.isoformat(),)dag.is_active=Falsesession.merge(dag)session.commit()
@staticmethod@provide_session
[docs]defget_num_task_instances(dag_id,task_ids=None,states=None,session=None):""" Returns the number of task instances in the given DAG. :param session: ORM session :param dag_id: ID of the DAG to get the task concurrency of :type dag_id: unicode :param task_ids: A list of valid task IDs for the given DAG :type task_ids: list[unicode] :param states: A list of states to filter by if supplied :type states: list[state] :return: The number of running tasks :rtype: int """qry=session.query(func.count(TaskInstance.task_id)).filter(TaskInstance.dag_id==dag_id,)iftask_ids:qry=qry.filter(TaskInstance.task_id.in_(task_ids),)ifstates:ifNoneinstates:ifall(xisNoneforxinstates):qry=qry.filter(TaskInstance.state.is_(None))else:not_none_states=[stateforstateinstatesifstate]qry=qry.filter(or_(TaskInstance.state.in_(not_none_states),TaskInstance.state.is_(None)))else:qry=qry.filter(TaskInstance.state.in_(states))returnqry.scalar()
@classmethod
[docs]defget_serialized_fields(cls):"""Stringified DAGs and operators contain exactly these fields."""ifnotcls.__serialized_fields:cls.__serialized_fields=frozenset(vars(DAG(dag_id='test')).keys())-{'parent_dag','_old_context_manager_dags','safe_dag_id','last_loaded','user_defined_filters','user_defined_macros','partial','params','_pickle_id','_log','is_subdag','task_dict','template_searchpath','sla_miss_callback','on_success_callback','on_failure_callback','template_undefined','jinja_environment_kwargs',# has_on_*_callback are only stored if the value is True, as the default is False'has_on_success_callback','has_on_failure_callback',}returncls.__serialized_fields
[docs]defget_edge_info(self,upstream_task_id:str,downstream_task_id:str)->EdgeInfoType:""" Returns edge information for the given pair of tasks if present, and None if there is no information. """# Note - older serialized DAGs may not have edge_info being a dict at allifself.edge_info:returnself.edge_info.get(upstream_task_id,{}).get(downstream_task_id,{})else:return{}
[docs]defset_edge_info(self,upstream_task_id:str,downstream_task_id:str,info:EdgeInfoType):""" Sets the given edge information on the DAG. Note that this will overwrite, rather than merge with, existing info. """self.edge_info.setdefault(upstream_task_id,{})[downstream_task_id]=info
[docs]defvalidate_schedule_and_params(self):""" Validates & raise exception if there are any Params in the DAG which neither have a default value nor have the null in schema['type'] list, but the DAG have a schedule_interval which is not None. """ifnotself.timetable.can_run:returnfork,vinself.params.items():# As type can be an array, we would check if `null` is an allowed type or notifnotv.has_valueand("type"notinv.schemaor"null"notinv.schema["type"]):raiseAirflowException("DAG Schedule must be None, if there are any required params without default values"
)
[docs]classDagTag(Base):"""A tag name per dag, to allow quick filtering in the DAG view."""
# The location of the file containing the DAG object# Note: Do not depend on fileloc pointing to a file; in the case of a# packaged DAG, it will point to the subpath of the DAG within the# associated zip.
def__init__(self,concurrency=None,**kwargs):super().__init__(**kwargs)ifself.max_active_tasksisNone:ifconcurrency:warnings.warn("The 'DagModel.concurrency' parameter is deprecated. Please use 'max_active_tasks'.",DeprecationWarning,stacklevel=2,)self.max_active_tasks=concurrencyelse:self.max_active_tasks=conf.getint('core','max_active_tasks_per_dag')ifself.max_active_runsisNone:self.max_active_runs=conf.getint('core','max_active_runs_per_dag')ifself.has_task_concurrency_limitsisNone:# Be safe -- this will be updated later once the DAG is parsedself.has_task_concurrency_limits=True
[docs]defget_paused_dag_ids(dag_ids:List[str],session:Session=None)->Set[str]:""" Given a list of dag_ids, get a set of Paused Dag Ids :param dag_ids: List of Dag ids :param session: ORM Session :return: Paused Dag_ids """paused_dag_ids=(session.query(DagModel.dag_id).filter(DagModel.is_paused==expression.true()).filter(DagModel.dag_id.in_(dag_ids)).all())paused_dag_ids={paused_dag_idforpaused_dag_id,inpaused_dag_ids}returnpaused_dag_ids
[docs]defget_default_view(self)->str:""" Get the Default DAG View, returns the default config value if DagModel does not have a value """# This is for backwards-compatibility with old dags that don't have None as default_viewreturnself.default_vieworconf.get('webserver','dag_default_view').lower()
[docs]defrelative_fileloc(self)->Optional[pathlib.Path]:"""File location of the importable dag 'file' relative to the configured DAGs folder."""ifself.filelocisNone:returnNonepath=pathlib.Path(self.fileloc)try:returnpath.relative_to(settings.DAGS_FOLDER)exceptValueError:# Not relative to DAGS_FOLDER.returnpath
@provide_session
[docs]defset_is_paused(self,is_paused:bool,including_subdags:bool=True,session=None)->None:""" Pause/Un-pause a DAG. :param is_paused: Is the DAG paused :param including_subdags: whether to include the DAG's subdags :param session: session """filter_query=[DagModel.dag_id==self.dag_id,]ifincluding_subdags:filter_query.append(DagModel.root_dag_id==self.dag_id)session.query(DagModel).filter(or_(*filter_query)).update({DagModel.is_paused:is_paused},synchronize_session='fetch')session.commit()
@classmethod@provide_session
[docs]defdeactivate_deleted_dags(cls,alive_dag_filelocs:List[str],session=None):""" Set ``is_active=False`` on the DAGs for which the DAG files have been removed. :param alive_dag_filelocs: file paths of alive DAGs :param session: ORM Session """log.debug("Deactivating DAGs (for which DAG files are deleted) from %s table ",cls.__tablename__)dag_models=session.query(cls).all()fordag_modelindag_models:ifdag_model.filelocisnotNone:ifcorrect_maybe_zipped(dag_model.fileloc)notinalive_dag_filelocs:dag_model.is_active=Falseelse:continue
@classmethod
[docs]defdags_needing_dagruns(cls,session:Session):""" Return (and lock) a list of Dag objects that are due to create a new DagRun. This will return a resultset of rows that is row-level-locked with a "SELECT ... FOR UPDATE" query, you should ensure that any scheduling decisions are made in a single transaction -- as soon as the transaction is committed it will be unlocked. """# TODO[HA]: Bake this query, it is run _A lot_# We limit so that _one_ scheduler doesn't try to do all the creation# of dag runsquery=(session.query(cls).filter(cls.is_paused==expression.false(),cls.is_active==expression.true(),cls.next_dagrun_create_after<=func.now(),).order_by(cls.next_dagrun_create_after).limit(cls.NUM_DAGS_PER_DAGRUN_QUERY))returnwith_row_locks(query,of=cls,session=session,**skip_locked(session=session))
[docs]defcalculate_dagrun_date_fields(self,dag:DAG,most_recent_dag_run:Union[None,datetime,DataInterval],)->None:""" Calculate ``next_dagrun`` and `next_dagrun_create_after`` :param dag: The DAG object :param most_recent_dag_run: DateTime of most recent run of this dag, or none if not yet scheduled. """ifisinstance(most_recent_dag_run,datetime):warnings.warn("Passing a datetime to `DagModel.calculate_dagrun_date_fields` is deprecated. ""Provide a data interval instead.",DeprecationWarning,stacklevel=2,)most_recent_data_interval=dag.infer_automated_data_interval(most_recent_dag_run)else:most_recent_data_interval=most_recent_dag_runnext_dagrun_info=dag.next_dagrun_info(most_recent_data_interval)ifnext_dagrun_infoisNone:self.next_dagrun_data_interval=self.next_dagrun=self.next_dagrun_create_after=Noneelse:self.next_dagrun_data_interval=next_dagrun_info.data_intervalself.next_dagrun=next_dagrun_info.logical_dateself.next_dagrun_create_after=next_dagrun_info.run_afterlog.info("Setting next_dagrun for %s to %s",dag.dag_id,self.next_dagrun)
[docs]defdag(*dag_args,**dag_kwargs):""" Python dag decorator. Wraps a function into an Airflow DAG. Accepts kwargs for operator kwarg. Can be used to parametrize DAGs. :param dag_args: Arguments for DAG object :type dag_args: Any :param dag_kwargs: Kwargs for DAG object. :type dag_kwargs: Any """defwrapper(f:Callable):# Get dag initializer signature and bind it to validate that dag_args, and dag_kwargs are correctdag_sig=signature(DAG.__init__)dag_bound_args=dag_sig.bind_partial(*dag_args,**dag_kwargs)@functools.wraps(f)deffactory(*args,**kwargs):# Generate signature for decorated function and bind the arguments when called# we do this to extract parameters so we can annotate them on the DAG object.# In addition, this fails if we are missing any args/kwargs with TypeError as expected.f_sig=signature(f).bind(*args,**kwargs)# Apply defaults to capture default values if set.f_sig.apply_defaults()# Set function name as dag_id if not setdag_id=dag_bound_args.arguments.get('dag_id',f.__name__)dag_bound_args.arguments['dag_id']=dag_id# Initialize DAG with bound argumentswithDAG(*dag_bound_args.args,**dag_bound_args.kwargs)asdag_obj:# Set DAG documentation from function documentation.iff.__doc__:dag_obj.doc_md=f.__doc__# Generate DAGParam for each function arg/kwarg and replace it for calling the function.# All args/kwargs for function will be DAGParam object and replaced on execution time.f_kwargs={}forname,valueinf_sig.arguments.items():f_kwargs[name]=dag_obj.param(name,value)# set file location to caller source pathback=sys._getframe().f_backdag_obj.fileloc=back.f_code.co_filenameifbackelse""# Invoke function to create operators in the DAG scope.f(**f_kwargs)# Return dag object such that it's accessible in Globals.returndag_objreturnfactoryreturnwrapper
[docs]classDagContext:""" DAG context is used to keep the current DAG when DAG is used as ContextManager. You can use DAG as context: .. code-block:: python with DAG( dag_id="example_dag", default_args=default_args, schedule_interval="0 0 * * *", dagrun_timeout=timedelta(minutes=60), ) as dag: ... If you do this the context stores the DAG and whenever new task is created, it will use such stored DAG as the parent DAG. """_context_managed_dag:Optional[DAG]=None_previous_context_managed_dags:List[DAG]=[]@classmethod