Source code for airflow.providers.google.cloud.transfers.gcs_to_bigquery
## Licensed to the Apache Software Foundation (ASF) under one# or more contributor license agreements. See the NOTICE file# distributed with this work for additional information# regarding copyright ownership. The ASF licenses this file# to you under the Apache License, Version 2.0 (the# "License"); you may not use this file except in compliance# with the License. You may obtain a copy of the License at## http://www.apache.org/licenses/LICENSE-2.0## Unless required by applicable law or agreed to in writing,# software distributed under the License is distributed on an# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY# KIND, either express or implied. See the License for the# specific language governing permissions and limitations# under the License."""This module contains a Google Cloud Storage to BigQuery operator."""from__future__importannotationsimportjsonfromtypingimportTYPE_CHECKING,Any,Sequencefromgoogle.api_core.exceptionsimportBadRequest,Conflictfromgoogle.cloud.bigqueryimport(DEFAULT_RETRY,CopyJob,ExternalConfig,ExtractJob,LoadJob,QueryJob,SchemaField,UnknownJob,)fromgoogle.cloud.bigquery.tableimportEncryptionConfiguration,Table,TableReferencefromairflow.configurationimportconffromairflow.exceptionsimportAirflowExceptionfromairflow.modelsimportBaseOperatorfromairflow.providers.google.cloud.hooks.bigqueryimportBigQueryHook,BigQueryJobfromairflow.providers.google.cloud.hooks.gcsimportGCSHookfromairflow.providers.google.cloud.links.bigqueryimportBigQueryTableLinkfromairflow.providers.google.cloud.triggers.bigqueryimportBigQueryInsertJobTriggerfromairflow.utils.helpersimportmerge_dictsifTYPE_CHECKING:fromgoogle.api_core.retryimportRetryfromairflow.utils.contextimportContext
[docs]classGCSToBigQueryOperator(BaseOperator):""" Loads files from Google Cloud Storage into BigQuery. The schema to be used for the BigQuery table may be specified in one of two ways. You may either directly pass the schema fields in, or you may point the operator to a Google Cloud Storage object name. The object in Google Cloud Storage must be a JSON file with the schema fields in it. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:GCSToBigQueryOperator` :param bucket: The bucket to load from. (templated) :param source_objects: String or List of Google Cloud Storage URIs to load from. (templated) If source_format is 'DATASTORE_BACKUP', the list must only contain a single URI. :param destination_project_dataset_table: The dotted ``(<project>.|<project>:)<dataset>.<table>`` BigQuery table to load data into. If ``<project>`` is not included, project will be the project defined in the connection json. (templated) :param schema_fields: If set, the schema field list as defined here: https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load Should not be set when source_format is 'DATASTORE_BACKUP'. Parameter must be defined if 'schema_object' is null and autodetect is False. :param schema_object: If set, a GCS object path pointing to a .json file that contains the schema for the table. (templated) Parameter must be defined if 'schema_fields' is null and autodetect is False. :param schema_object_bucket: [Optional] If set, the GCS bucket where the schema object template is stored. (templated) (Default: the value of ``bucket``) :param source_format: File format to export. :param compression: [Optional] The compression type of the data source. Possible values include GZIP and NONE. The default value is NONE. This setting is ignored for Google Cloud Bigtable, Google Cloud Datastore backups and Avro formats. :param create_disposition: The create disposition if the table doesn't exist. :param skip_leading_rows: The number of rows at the top of a CSV file that BigQuery will skip when loading the data. When autodetect is on, the behavior is the following: skip_leading_rows unspecified - Autodetect tries to detect headers in the first row. If they are not detected, the row is read as data. Otherwise, data is read starting from the second row. skip_leading_rows is 0 - Instructs autodetect that there are no headers and data should be read starting from the first row. skip_leading_rows = N > 0 - Autodetect skips N-1 rows and tries to detect headers in row N. If headers are not detected, row N is just skipped. Otherwise, row N is used to extract column names for the detected schema. Default value set to None so that autodetect option can detect schema fields. :param write_disposition: The write disposition if the table already exists. :param field_delimiter: The delimiter to use when loading from a CSV. :param max_bad_records: The maximum number of bad records that BigQuery can ignore when running the job. :param quote_character: The value that is used to quote data sections in a CSV file. :param ignore_unknown_values: [Optional] Indicates if BigQuery should allow extra values that are not represented in the table schema. If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. :param allow_quoted_newlines: Whether to allow quoted newlines (true) or not (false). :param allow_jagged_rows: Accept rows that are missing trailing optional columns. The missing values are treated as nulls. If false, records with missing trailing columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. Only applicable to CSV, ignored for other formats. :param encoding: The character encoding of the data. See: https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).csvOptions.encoding https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.csvOptions.encoding :param max_id_key: If set, the name of a column in the BigQuery table that's to be loaded. This will be used to select the MAX value from BigQuery after the load occurs. The results will be returned by the execute() command, which in turn gets stored in XCom for future operators to use. This can be helpful with incremental loads--during future executions, you can pick up from the max ID. :param schema_update_options: Allows the schema of the destination table to be updated as a side effect of the load job. :param src_fmt_configs: configure optional fields specific to the source format :param external_table: Flag to specify if the destination table should be a BigQuery external table. Default Value is False. :param time_partitioning: configure optional time partitioning fields i.e. partition by field, type and expiration as per API specifications. Note that 'field' is not available in concurrency with dataset.table$partition. :param cluster_fields: Request that the result of this load be stored sorted by one or more columns. BigQuery supports clustering for both partitioned and non-partitioned tables. The order of columns given determines the sort order. Not applicable for external tables. :param autodetect: [Optional] Indicates if we should automatically infer the options and schema for CSV and JSON sources. (Default: ``True``). Parameter must be set to True if 'schema_fields' and 'schema_object' are undefined. It is suggested to set to True if table are create outside of Airflow. If autodetect is None and no schema is provided (neither via schema_fields nor a schema_object), assume the table already exists. :param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys). .. code-block:: python encryption_configuration = { "kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key", } :param location: [Optional] The geographic location of the job. Required except for US and EU. See details at https://cloud.google.com/bigquery/docs/locations#specifying_your_location :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). :param labels: [Optional] Labels for the BiqQuery table. :param description: [Optional] Description for the BigQuery table. This will only be used if the destination table is newly created. If the table already exists and a value different than the current description is provided, the job will fail. :param deferrable: Run operator in the deferrable mode """
def__init__(self,*,bucket,source_objects,destination_project_dataset_table,schema_fields=None,schema_object=None,schema_object_bucket=None,source_format="CSV",compression="NONE",create_disposition="CREATE_IF_NEEDED",skip_leading_rows=None,write_disposition="WRITE_EMPTY",field_delimiter=",",max_bad_records=0,quote_character=None,ignore_unknown_values=False,allow_quoted_newlines=False,allow_jagged_rows=False,encoding="UTF-8",max_id_key=None,gcp_conn_id="google_cloud_default",schema_update_options=(),src_fmt_configs=None,external_table=False,time_partitioning=None,cluster_fields=None,autodetect=True,encryption_configuration=None,location=None,impersonation_chain:str|Sequence[str]|None=None,labels=None,description=None,deferrable:bool=conf.getboolean("operators","default_deferrable",fallback=False),result_retry:Retry=DEFAULT_RETRY,result_timeout:float|None=None,cancel_on_kill:bool=True,job_id:str|None=None,force_rerun:bool=True,reattach_states:set[str]|None=None,project_id:str|None=None,**kwargs,)->None:super().__init__(**kwargs)self.hook:BigQueryHook|None=Noneself.configuration:dict[str,Any]={}# GCS configifsrc_fmt_configsisNone:src_fmt_configs={}iftime_partitioningisNone:time_partitioning={}self.bucket=bucketself.source_objects=source_objectsself.schema_object=schema_objectifschema_object_bucketisNone:schema_object_bucket=bucketself.schema_object_bucket=schema_object_bucket# BQ configself.destination_project_dataset_table=destination_project_dataset_tableself.project_id=project_idself.schema_fields=schema_fieldsifsource_format.upper()notinALLOWED_FORMATS:raiseValueError(f"{source_format} is not a valid source format. "f"Please use one of the following types: {ALLOWED_FORMATS}.")else:self.source_format=source_format.upper()self.compression=compressionself.create_disposition=create_dispositionself.skip_leading_rows=skip_leading_rowsself.write_disposition=write_dispositionself.field_delimiter=field_delimiterself.max_bad_records=max_bad_recordsself.quote_character=quote_characterself.ignore_unknown_values=ignore_unknown_valuesself.allow_quoted_newlines=allow_quoted_newlinesself.allow_jagged_rows=allow_jagged_rowsself.external_table=external_tableself.encoding=encodingself.max_id_key=max_id_keyself.gcp_conn_id=gcp_conn_idself.schema_update_options=schema_update_optionsself.src_fmt_configs=src_fmt_configsself.time_partitioning=time_partitioningself.cluster_fields=cluster_fieldsself.autodetect=autodetectself.encryption_configuration=encryption_configurationself.location=locationself.impersonation_chain=impersonation_chainself.labels=labelsself.description=descriptionself.job_id=job_idself.deferrable=deferrableself.result_retry=result_retryself.result_timeout=result_timeoutself.force_rerun=force_rerunself.reattach_states:set[str]=reattach_statesorset()self.cancel_on_kill=cancel_on_killself.source_uris:list[str]=[]def_submit_job(self,hook:BigQueryHook,job_id:str,)->BigQueryJob:# Submit a new job without waiting for it to complete.returnhook.insert_job(configuration=self.configuration,project_id=self.project_idorhook.project_id,location=self.location,job_id=job_id,timeout=self.result_timeout,retry=self.result_retry,nowait=True,)@staticmethoddef_handle_job_error(job:BigQueryJob|UnknownJob)->None:ifjob.error_result:raiseAirflowException(f"BigQuery job {job.job_id} failed: {job.error_result}")
[docs]defexecute(self,context:Context):hook=BigQueryHook(gcp_conn_id=self.gcp_conn_id,location=self.location,impersonation_chain=self.impersonation_chain,)self.hook=hookself.source_format=self.source_format.upper()job_id=self.hook.generate_job_id(job_id=self.job_id,dag_id=self.dag_id,task_id=self.task_id,logical_date=context["logical_date"],configuration=self.configuration,force_rerun=self.force_rerun,)self.source_objects=(self.source_objectsifisinstance(self.source_objects,list)else[self.source_objects])self.source_uris=[f"gs://{self.bucket}/{source_object}"forsource_objectinself.source_objects]ifnotself.schema_fields:# Check for self.autodetect explicitly False. self.autodetect equal to None# entails we do not want to detect schema from files. Instead, it means we# rely on an already existing table's schemaifnotself.schema_objectandself.autodetectisFalse:raiseAirflowException("Table schema was not found. Neither schema object nor schema fields were specified")ifself.schema_objectandself.source_format!="DATASTORE_BACKUP":gcs_hook=GCSHook(gcp_conn_id=self.gcp_conn_id,impersonation_chain=self.impersonation_chain,)self.schema_fields=json.loads(gcs_hook.download(self.schema_object_bucket,self.schema_object).decode("utf-8"))self.log.info("Loaded fields from schema object: %s",self.schema_fields)else:self.schema_fields=Noneifself.external_table:self.log.info("Creating a new BigQuery table for storing data...")table_obj_api_repr=self._create_external_table()BigQueryTableLink.persist(context=context,task_instance=self,dataset_id=table_obj_api_repr["tableReference"]["datasetId"],project_id=table_obj_api_repr["tableReference"]["projectId"],table_id=table_obj_api_repr["tableReference"]["tableId"],)ifself.max_id_key:max_id=self._find_max_value_in_column()returnmax_idelse:self.log.info("Using existing BigQuery table for storing data...")self.configuration=self._use_existing_table()try:self.log.info("Executing: %s",self.configuration)job:BigQueryJob|UnknownJob=self._submit_job(self.hook,job_id)exceptConflict:# If the job already exists retrieve itjob=self.hook.get_job(project_id=self.project_idorself.hook.project_id,location=self.location,job_id=job_id,)ifjob.stateinself.reattach_states:# We are reattaching to a jobjob._begin()self._handle_job_error(job)else:# Same job configuration so we need force_rerunraiseAirflowException(f"Job with id: {job_id} already exists and is in {job.state} state. If you "f"want to force rerun it consider setting `force_rerun=True`."f"Or, if you want to reattach in this scenario add {job.state} to `reattach_states`")job_types={LoadJob._JOB_TYPE:["sourceTable","destinationTable"],CopyJob._JOB_TYPE:["sourceTable","destinationTable"],ExtractJob._JOB_TYPE:["sourceTable"],QueryJob._JOB_TYPE:["destinationTable"],}ifself.hook.project_id:forjob_type,tables_propinjob_types.items():job_configuration=job.to_api_repr()["configuration"]ifjob_typeinjob_configuration:fortable_propintables_prop:iftable_propinjob_configuration[job_type]:table=job_configuration[job_type][table_prop]persist_kwargs={"context":context,"task_instance":self,"table_id":table,}ifnotisinstance(table,str):persist_kwargs["table_id"]=table["tableId"]persist_kwargs["dataset_id"]=table["datasetId"]persist_kwargs["project_id"]=table["projectId"]BigQueryTableLink.persist(**persist_kwargs)self.job_id=job.job_idcontext["ti"].xcom_push(key="job_id",value=self.job_id)ifself.deferrable:self.defer(timeout=self.execution_timeout,trigger=BigQueryInsertJobTrigger(conn_id=self.gcp_conn_id,job_id=self.job_id,project_id=self.project_idorself.hook.project_id,),method_name="execute_complete",)else:job.result(timeout=self.result_timeout,retry=self.result_retry)self._handle_job_error(job)ifself.max_id_key:returnself._find_max_value_in_column()
[docs]defexecute_complete(self,context:Context,event:dict[str,Any]):""" Callback for when the trigger fires - returns immediately. Relies on trigger to throw an exception, otherwise it assumes execution was successful. """ifevent["status"]=="error":raiseAirflowException(event["message"])self.log.info("%s completed with response %s ",self.task_id,event["message"],)returnself._find_max_value_in_column()
def_find_max_value_in_column(self):hook=BigQueryHook(gcp_conn_id=self.gcp_conn_id,location=self.location,impersonation_chain=self.impersonation_chain,)ifself.max_id_key:self.log.info(f"Selecting the MAX value from BigQuery column '{self.max_id_key}'...")select_command=(f"SELECT MAX({self.max_id_key}) AS max_value "f"FROM {self.destination_project_dataset_table}")self.configuration={"query":{"query":select_command,"useLegacySql":False,"schemaUpdateOptions":[],}}try:job_id=hook.insert_job(configuration=self.configuration,project_id=self.project_idorhook.project_id)rows=list(hook.get_job(job_id=job_id,location=self.location).result())exceptBadRequestase:if"Unrecognized name:"ine.message:raiseAirflowException(f"Could not determine MAX value in column {self.max_id_key} "f"since the default value of 'string_field_n' was set by BQ")else:raiseAirflowException(e.message)ifrows:forrowinrows:max_id=row[0]ifrow[0]else0self.log.info("Loaded BQ data with MAX value of column %s.%s: %s",self.destination_project_dataset_table,self.max_id_key,max_id,)returnstr(max_id)else:raiseRuntimeError(f"The {select_command} returned no rows!")def_create_external_table(self):external_config_api_repr={"autodetect":self.autodetect,"sourceFormat":self.source_format,"sourceUris":self.source_uris,"compression":self.compression.upper(),"ignoreUnknownValues":self.ignore_unknown_values,}# if following fields are not specified in src_fmt_configs,# honor the top-level params for backward-compatibilitybackward_compatibility_configs={"skipLeadingRows":self.skip_leading_rows,"fieldDelimiter":self.field_delimiter,"quote":self.quote_character,"allowQuotedNewlines":self.allow_quoted_newlines,"allowJaggedRows":self.allow_jagged_rows,"encoding":self.encoding,}src_fmt_to_param_mapping={"CSV":"csvOptions","GOOGLE_SHEETS":"googleSheetsOptions"}src_fmt_to_configs_mapping={"csvOptions":["allowJaggedRows","allowQuotedNewlines","fieldDelimiter","skipLeadingRows","quote","encoding","preserveAsciiControlCharacters",],"googleSheetsOptions":["skipLeadingRows"],}ifself.source_formatinsrc_fmt_to_param_mapping:valid_configs=src_fmt_to_configs_mapping[src_fmt_to_param_mapping[self.source_format]]self.src_fmt_configs=self._validate_src_fmt_configs(self.source_format,self.src_fmt_configs,valid_configs,backward_compatibility_configs)external_config_api_repr[src_fmt_to_param_mapping[self.source_format]]=self.src_fmt_configsexternal_config=ExternalConfig.from_api_repr(external_config_api_repr)ifself.schema_fields:external_config.schema=[SchemaField.from_api_repr(f)forfinself.schema_fields]ifself.max_bad_records:external_config.max_bad_records=self.max_bad_records# build table definitiontable=Table(table_ref=TableReference.from_string(self.destination_project_dataset_table,self.hook.project_id))table.external_data_configuration=external_configifself.labels:table.labels=self.labelsifself.description:table.description=self.descriptionifself.encryption_configuration:table.encryption_configuration=EncryptionConfiguration.from_api_repr(self.encryption_configuration)table_obj_api_repr=table.to_api_repr()self.log.info("Creating external table: %s",self.destination_project_dataset_table)self.hook.create_empty_table(table_resource=table_obj_api_repr,project_id=self.project_idorself.hook.project_id,location=self.location,exists_ok=True,)self.log.info("External table created successfully: %s",self.destination_project_dataset_table)returntable_obj_api_reprdef_use_existing_table(self):destination_project_id,destination_dataset,destination_table=self.hook.split_tablename(table_input=self.destination_project_dataset_table,default_project_id=self.hook.project_id,var_name="destination_project_dataset_table",)# bigquery also allows you to define how you want a table's schema to change# as a side effect of a load# for more details:# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptionsallowed_schema_update_options=["ALLOW_FIELD_ADDITION","ALLOW_FIELD_RELAXATION"]ifnotset(allowed_schema_update_options).issuperset(set(self.schema_update_options)):raiseValueError(f"{self.schema_update_options} contains invalid schema update options. "f"Please only use one or more of the following options: {allowed_schema_update_options}")self.configuration={"load":{"autodetect":self.autodetect,"createDisposition":self.create_disposition,"destinationTable":{"projectId":destination_project_id,"datasetId":destination_dataset,"tableId":destination_table,},"sourceFormat":self.source_format,"sourceUris":self.source_uris,"writeDisposition":self.write_disposition,"ignoreUnknownValues":self.ignore_unknown_values,},}self.time_partitioning=self._cleanse_time_partitioning(self.destination_project_dataset_table,self.time_partitioning)ifself.time_partitioning:self.configuration["load"].update({"timePartitioning":self.time_partitioning})ifself.cluster_fields:self.configuration["load"].update({"clustering":{"fields":self.cluster_fields}})ifself.schema_fields:self.configuration["load"]["schema"]={"fields":self.schema_fields}ifself.schema_update_options:ifself.write_dispositionnotin["WRITE_APPEND","WRITE_TRUNCATE"]:raiseValueError("schema_update_options is only ""allowed if write_disposition is ""'WRITE_APPEND' or 'WRITE_TRUNCATE'.")else:# To provide backward compatibilityself.schema_update_options=list(self.schema_update_optionsor[])self.log.info("Adding experimental 'schemaUpdateOptions': %s",self.schema_update_options)self.configuration["load"]["schemaUpdateOptions"]=self.schema_update_optionsifself.max_bad_records:self.configuration["load"]["maxBadRecords"]=self.max_bad_recordsifself.encryption_configuration:self.configuration["load"]["destinationEncryptionConfiguration"]=self.encryption_configurationifself.labelsorself.description:self.configuration["load"].update({"destinationTableProperties":{}})ifself.labels:self.configuration["load"]["destinationTableProperties"]["labels"]=self.labelsifself.description:self.configuration["load"]["destinationTableProperties"]["description"]=self.descriptionsrc_fmt_to_configs_mapping={"CSV":["allowJaggedRows","allowQuotedNewlines","autodetect","fieldDelimiter","skipLeadingRows","ignoreUnknownValues","nullMarker","quote","encoding",],"DATASTORE_BACKUP":["projectionFields"],"NEWLINE_DELIMITED_JSON":["autodetect","ignoreUnknownValues"],"PARQUET":["autodetect","ignoreUnknownValues"],"AVRO":["useAvroLogicalTypes"],}valid_configs=src_fmt_to_configs_mapping[self.source_format]# if following fields are not specified in src_fmt_configs,# honor the top-level params for backward-compatibilitybackward_compatibility_configs={"skipLeadingRows":self.skip_leading_rows,"fieldDelimiter":self.field_delimiter,"ignoreUnknownValues":self.ignore_unknown_values,"quote":self.quote_character,"allowQuotedNewlines":self.allow_quoted_newlines,"encoding":self.encoding,}self.src_fmt_configs=self._validate_src_fmt_configs(self.source_format,self.src_fmt_configs,valid_configs,backward_compatibility_configs)self.configuration["load"].update(self.src_fmt_configs)ifself.allow_jagged_rows:self.configuration["load"]["allowJaggedRows"]=self.allow_jagged_rowsreturnself.configurationdef_validate_src_fmt_configs(self,source_format:str,src_fmt_configs:dict,valid_configs:list[str],backward_compatibility_configs:dict|None=None,)->dict:""" Validates the given src_fmt_configs against a valid configuration for the source format. Adds the backward compatibility config to the src_fmt_configs. :param source_format: File format to export. :param src_fmt_configs: Configure optional fields specific to the source format. :param valid_configs: Valid configuration specific to the source format :param backward_compatibility_configs: The top-level params for backward-compatibility """ifbackward_compatibility_configsisNone:backward_compatibility_configs={}fork,vinbackward_compatibility_configs.items():ifknotinsrc_fmt_configsandkinvalid_configs:src_fmt_configs[k]=vfork,vinsrc_fmt_configs.items():ifknotinvalid_configs:raiseValueError(f"{k} is not a valid src_fmt_configs for type {source_format}.")returnsrc_fmt_configsdef_cleanse_time_partitioning(self,destination_dataset_table:str|None,time_partitioning_in:dict|None)->dict:# if it is a partitioned table ($ is in the table name) add partition load optioniftime_partitioning_inisNone:time_partitioning_in={}time_partitioning_out={}ifdestination_dataset_tableand"$"indestination_dataset_table:time_partitioning_out["type"]="DAY"time_partitioning_out.update(time_partitioning_in)returntime_partitioning_out
[docs]defon_kill(self)->None:ifself.job_idandself.cancel_on_kill:self.hook.cancel_job(job_id=self.job_id,location=self.location)# type: ignore[union-attr]else:self.log.info("Skipping to cancel job: %s.%s",self.location,self.job_id)
[docs]defget_openlineage_facets_on_complete(self,task_instance):"""Implementing on_complete as we will include final BQ job id."""frompathlibimportPathfromopenlineage.client.facetimport(ExternalQueryRunFacet,SymlinksDatasetFacet,SymlinksDatasetFacetIdentifiers,)fromopenlineage.client.runimportDatasetfromairflow.providers.google.cloud.hooks.gcsimport_parse_gcs_urlfromairflow.providers.google.cloud.utils.openlineageimport(get_facets_from_bq_table,get_identity_column_lineage_facet,)fromairflow.providers.openlineage.extractorsimportOperatorLineagetable_object=self.hook.get_client(self.hook.project_id).get_table(self.destination_project_dataset_table)output_dataset_facets=get_facets_from_bq_table(table_object)input_dataset_facets={"schema":output_dataset_facets["schema"],}input_datasets=[]foruriinsorted(self.source_uris):bucket,blob=_parse_gcs_url(uri)additional_facets={}if"*"inblob:# If wildcard ("*") is used in gcs path, we want the name of dataset to be directory name,# but we create a symlink to the full object path with wildcard.additional_facets={"symlink":SymlinksDatasetFacet(identifiers=[SymlinksDatasetFacetIdentifiers(namespace=f"gs://{bucket}",name=blob,type="file")]),}blob=Path(blob).parent.as_posix()ifblob==".":# blob path does not have leading slash, but we need root dataset name to be "/"blob="/"dataset=Dataset(namespace=f"gs://{bucket}",name=blob,facets=merge_dicts(input_dataset_facets,additional_facets),)input_datasets.append(dataset)output_dataset_facets["columnLineage"]=get_identity_column_lineage_facet(field_names=[field.nameforfieldintable_object.schema],input_datasets=input_datasets)output_dataset=Dataset(namespace="bigquery",name=str(table_object.reference),facets=output_dataset_facets,)run_facets={}ifself.job_id:run_facets={"externalQuery":ExternalQueryRunFacet(externalQueryId=self.job_id,source="bigquery"),}returnOperatorLineage(inputs=input_datasets,outputs=[output_dataset],run_facets=run_facets)