## Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License."""Serialized DAG table in database."""from__future__importannotationsimporthashlibimportloggingimportzlibfromdatetimeimportdatetime,timedeltafromtypingimportAnyimportsqlalchemy_jsonfieldfromsqlalchemyimportBigInteger,Column,Index,LargeBinary,String,and_,or_fromsqlalchemy.ormimportSession,backref,foreign,relationshipfromsqlalchemy.sql.expressionimportfunc,literalfromairflow.models.baseimportID_LEN,Basefromairflow.models.dagimportDAG,DagModelfromairflow.models.dagcodeimportDagCodefromairflow.models.dagrunimportDagRunfromairflow.serialization.serialized_objectsimportDagDependency,SerializedDAGfromairflow.settingsimportCOMPRESS_SERIALIZED_DAGS,MIN_SERIALIZED_DAG_UPDATE_INTERVAL,jsonfromairflow.utilsimporttimezonefromairflow.utils.sessionimportprovide_sessionfromairflow.utils.sqlalchemyimportUtcDateTime
[docs]classSerializedDagModel(Base):"""A table for serialized DAGs. serialized_dag table is a snapshot of DAG files synchronized by scheduler. This feature is controlled by: * ``[core] min_serialized_dag_update_interval = 30`` (s): serialized DAGs are updated in DB when a file gets processed by scheduler, to reduce DB write rate, there is a minimal interval of updating serialized DAGs. * ``[scheduler] dag_dir_list_interval = 300`` (s): interval of deleting serialized DAGs in DB when the files are deleted, suggest to use a smaller interval such as 60 * ``[core] compress_serialized_dags``: whether compressing the dag data to the Database. It is used by webserver to load dags because reading from database is lightweight compared to importing from files, it solves the webserver scalability issue. """
def__init__(self,dag:DAG,processor_subdir:str|None=None):self.dag_id=dag.dag_idself.fileloc=dag.filelocself.fileloc_hash=DagCode.dag_fileloc_hash(self.fileloc)self.last_updated=timezone.utcnow()self.processor_subdir=processor_subdirdag_data=SerializedDAG.to_dict(dag)dag_data_json=json.dumps(dag_data,sort_keys=True).encode("utf-8")self.dag_hash=hashlib.md5(dag_data_json).hexdigest()ifCOMPRESS_SERIALIZED_DAGS:self._data=Noneself._data_compressed=zlib.compress(dag_data_json)else:self._data=dag_dataself._data_compressed=None# serve as cache so no need to decompress and load, when accessing data field# when COMPRESS_SERIALIZED_DAGS is Trueself.__data_cache=dag_data
[docs]defwrite_dag(cls,dag:DAG,min_update_interval:int|None=None,processor_subdir:str|None=None,session:Session=None,)->bool:"""Serializes a DAG and writes it into database. If the record already exists, it checks if the Serialized DAG changed or not. If it is changed, it updates the record, ignores otherwise. :param dag: a DAG to be written into database :param min_update_interval: minimal interval in seconds to update serialized DAG :param session: ORM Session :returns: Boolean indicating if the DAG was written to the DB """# Checks if (Current Time - Time when the DAG was written to DB) < min_update_interval# If Yes, does nothing# If No or the DAG does not exists, updates / writes Serialized DAG to DBifmin_update_intervalisnotNone:if(session.query(literal(True)).filter(and_(cls.dag_id==dag.dag_id,(timezone.utcnow()-timedelta(seconds=min_update_interval))<cls.last_updated,)).first()isnotNone):# TODO: .first() is not None can be changed to .scalar() once we update to sqlalchemy 1.4+# as the associated sqlalchemy bug for MySQL was fixed# related issue : https://github.com/sqlalchemy/sqlalchemy/issues/5481returnFalselog.debug("Checking if DAG (%s) changed",dag.dag_id)new_serialized_dag=cls(dag,processor_subdir)serialized_dag_db=(session.query(cls.dag_hash,cls.processor_subdir).filter(cls.dag_id==dag.dag_id).first())if(serialized_dag_dbisnotNoneandserialized_dag_db.dag_hash==new_serialized_dag.dag_hashandserialized_dag_db.processor_subdir==new_serialized_dag.processor_subdir):log.debug("Serialized DAG (%s) is unchanged. Skipping writing to DB",dag.dag_id)returnFalselog.debug("Writing Serialized DAG: %s to the DB",dag.dag_id)session.merge(new_serialized_dag)log.debug("DAG: %s written to the DB",dag.dag_id)returnTrue
@classmethod@provide_session
[docs]defread_all_dags(cls,session:Session=None)->dict[str,SerializedDAG]:"""Reads all DAGs in serialized_dag table. :param session: ORM Session :returns: a dict of DAGs read from database """serialized_dags=session.query(cls)dags={}forrowinserialized_dags:log.debug("Deserializing DAG: %s",row.dag_id)dag=row.dag# Coherence checkifdag.dag_id==row.dag_id:dags[row.dag_id]=dagelse:log.warning("dag_id Mismatch in DB: Row with dag_id '%s' has Serialised DAG with '%s' dag_id",row.dag_id,dag.dag_id,)returndags
@property
[docs]defdata(self):# use __data_cache to avoid decompress and loadsifnothasattr(self,"__data_cache")orself.__data_cacheisNone:ifself._data_compressed:self.__data_cache=json.loads(zlib.decompress(self._data_compressed))else:self.__data_cache=self._datareturnself.__data_cache
@property
[docs]defdag(self):"""The DAG deserialized from the ``data`` column"""SerializedDAG._load_operator_extra_links=self.load_op_linksifisinstance(self.data,dict):dag=SerializedDAG.from_dict(self.data)# type: Anyelse:dag=SerializedDAG.from_json(self.data)returndag
@classmethod@provide_session
[docs]defremove_dag(cls,dag_id:str,session:Session=None):"""Deletes a DAG with given dag_id. :param dag_id: dag_id to be deleted :param session: ORM Session """session.execute(cls.__table__.delete().where(cls.dag_id==dag_id))
@classmethod@provide_session
[docs]defremove_deleted_dags(cls,alive_dag_filelocs:list[str],processor_subdir:str|None=None,session=None):"""Deletes DAGs not included in alive_dag_filelocs. :param alive_dag_filelocs: file paths of alive DAGs :param session: ORM Session """alive_fileloc_hashes=[DagCode.dag_fileloc_hash(fileloc)forfilelocinalive_dag_filelocs]log.debug("Deleting Serialized DAGs (for which DAG files are deleted) from %s table ",cls.__tablename__)session.execute(cls.__table__.delete().where(and_(cls.fileloc_hash.notin_(alive_fileloc_hashes),cls.fileloc.notin_(alive_dag_filelocs),or_(cls.processor_subdirisNone,cls.processor_subdir==processor_subdir,
),)))@classmethod@provide_session
[docs]defhas_dag(cls,dag_id:str,session:Session=None)->bool:"""Checks a DAG exist in serialized_dag table. :param dag_id: the DAG to check :param session: ORM Session """returnsession.query(literal(True)).filter(cls.dag_id==dag_id).first()isnotNone
[docs]defget(cls,dag_id:str,session:Session=None)->SerializedDagModel|None:""" Get the SerializedDAG for the given dag ID. It will cope with being passed the ID of a subdag by looking up the root dag_id from the DAG table. :param dag_id: the DAG to fetch :param session: ORM Session """row=session.query(cls).filter(cls.dag_id==dag_id).one_or_none()ifrow:returnrow# If we didn't find a matching DAG id then ask the DAG table to find# out the root dagroot_dag_id=session.query(DagModel.root_dag_id).filter(DagModel.dag_id==dag_id).scalar()returnsession.query(cls).filter(cls.dag_id==root_dag_id).one_or_none()
@staticmethod@provide_session
[docs]defbulk_sync_to_db(dags:list[DAG],processor_subdir:str|None=None,session:Session=None):""" Saves DAGs as Serialized DAG objects in the database. Each DAG is saved in a separate database query. :param dags: the DAG objects to save to the DB :param session: ORM Session :return: None """fordagindags:ifnotdag.is_subdag:SerializedDagModel.write_dag(dag=dag,min_update_interval=MIN_SERIALIZED_DAG_UPDATE_INTERVAL,processor_subdir=processor_subdir,session=session,
)@classmethod@provide_session
[docs]defget_last_updated_datetime(cls,dag_id:str,session:Session=None)->datetime|None:""" Get the date when the Serialized DAG associated to DAG was last updated in serialized_dag table :param dag_id: DAG ID :param session: ORM Session """returnsession.query(cls.last_updated).filter(cls.dag_id==dag_id).scalar()
@classmethod@provide_session
[docs]defget_max_last_updated_datetime(cls,session:Session=None)->datetime|None:""" Get the maximum date when any DAG was last updated in serialized_dag table :param session: ORM Session """returnsession.query(func.max(cls.last_updated)).scalar()
@classmethod@provide_session
[docs]defget_latest_version_hash(cls,dag_id:str,session:Session=None)->str|None:""" Get the latest DAG version for a given DAG ID. :param dag_id: DAG ID :param session: ORM Session :return: DAG Hash, or None if the DAG is not found :rtype: str | None """returnsession.query(cls.dag_hash).filter(cls.dag_id==dag_id).scalar()
@classmethod@provide_session
[docs]defget_dag_dependencies(cls,session:Session=None)->dict[str,list[DagDependency]]:""" Get the dependencies between DAGs :param session: ORM Session """ifsession.bind.dialect.namein["sqlite","mysql"]:query=session.query(cls.dag_id,func.json_extract(cls._data,"$.dag.dag_dependencies"))iterator=((dag_id,json.loads(deps_data)ifdeps_dataelse[])fordag_id,deps_datainquery)elifsession.bind.dialect.name=="mssql":query=session.query(cls.dag_id,func.json_query(cls._data,"$.dag.dag_dependencies"))iterator=((dag_id,json.loads(deps_data)ifdeps_dataelse[])fordag_id,deps_datainquery)else:iterator=session.query(cls.dag_id,func.json_extract_path(cls._data,"dag","dag_dependencies"))return{dag_id:[DagDependency(**d)fordin(deps_dataor[])]fordag_id,deps_datainiterator}