Skip to content

Commit bb0336c

Browse files
yoshi-automationsofisl
authored andcommitted
feat(bigquery): update the API
#### bigquery:v2 The following keys were added: - schemas.Dataset.properties.linkedDatasetMetadata.$ref - schemas.Dataset.properties.linkedDatasetMetadata.description - schemas.Dataset.properties.linkedDatasetMetadata.readOnly - schemas.LinkedDatasetMetadata.description - schemas.LinkedDatasetMetadata.id - schemas.LinkedDatasetMetadata.properties.linkState.description - schemas.LinkedDatasetMetadata.properties.linkState.enum - schemas.LinkedDatasetMetadata.properties.linkState.enumDescriptions - schemas.LinkedDatasetMetadata.properties.linkState.readOnly - schemas.LinkedDatasetMetadata.properties.linkState.type - schemas.LinkedDatasetMetadata.type - schemas.PartitionedColumn.description - schemas.PartitionedColumn.id - schemas.PartitionedColumn.properties.field.description - schemas.PartitionedColumn.properties.field.readOnly - schemas.PartitionedColumn.properties.field.type - schemas.PartitionedColumn.type - schemas.PartitioningDefinition.description - schemas.PartitioningDefinition.id - schemas.PartitioningDefinition.properties.partitionedColumn.description - schemas.PartitioningDefinition.properties.partitionedColumn.items.$ref - schemas.PartitioningDefinition.properties.partitionedColumn.readOnly - schemas.PartitioningDefinition.properties.partitionedColumn.type - schemas.PartitioningDefinition.type - schemas.Table.properties.partitionDefinition.$ref - schemas.Table.properties.partitionDefinition.description - schemas.Table.properties.partitionDefinition.readOnly The following keys were changed: - schemas.SparkStatistics.properties.gcsStagingBucket.description - schemas.SparkStatistics.properties.kmsKeyName.description - schemas.TableFieldSchema.properties.rangeElementType.properties.type.description - schemas.TableFieldSchema.properties.type.description
1 parent 7912c1c commit bb0336c

File tree

2 files changed

+102
-8
lines changed

2 files changed

+102
-8
lines changed

discovery/bigquery-v2.json

Lines changed: 64 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1844,7 +1844,7 @@
18441844
}
18451845
}
18461846
},
1847-
"revision": "20240211",
1847+
"revision": "20240229",
18481848
"rootUrl": "https://bigquery.googleapis.com/",
18491849
"schemas": {
18501850
"AggregateClassificationMetrics": {
@@ -3140,6 +3140,11 @@
31403140
"readOnly": true,
31413141
"type": "string"
31423142
},
3143+
"linkedDatasetMetadata": {
3144+
"$ref": "LinkedDatasetMetadata",
3145+
"description": "Output only. Metadata about the LinkedDataset. Filled out when the dataset type is LINKED.",
3146+
"readOnly": true
3147+
},
31433148
"linkedDatasetSource": {
31443149
"$ref": "LinkedDatasetSource",
31453150
"description": "Optional. The source dataset reference when the dataset is of type LINKED. For all other dataset types it is not set. This field cannot be updated once it is set. Any attempt to update this field using Update and Patch API Operations will be ignored."
@@ -5776,6 +5781,28 @@
57765781
"id": "JsonValue",
57775782
"type": "any"
57785783
},
5784+
"LinkedDatasetMetadata": {
5785+
"description": "Metadata about the Linked Dataset.",
5786+
"id": "LinkedDatasetMetadata",
5787+
"properties": {
5788+
"linkState": {
5789+
"description": "Output only. Specifies whether Linked Dataset is currently in a linked state or not.",
5790+
"enum": [
5791+
"LINK_STATE_UNSPECIFIED",
5792+
"LINKED",
5793+
"UNLINKED"
5794+
],
5795+
"enumDescriptions": [
5796+
"The default value. Default to the LINKED state.",
5797+
"Normal Linked Dataset state. Data is queryable via the Linked Dataset.",
5798+
"Data publisher or owner has unlinked this Linked Dataset. It means you can no longer query or see the data in the Linked Dataset."
5799+
],
5800+
"readOnly": true,
5801+
"type": "string"
5802+
}
5803+
},
5804+
"type": "object"
5805+
},
57795806
"LinkedDatasetSource": {
57805807
"description": "A dataset source type which refers to another BigQuery dataset.",
57815808
"id": "LinkedDatasetSource",
@@ -6407,6 +6434,33 @@
64076434
},
64086435
"type": "object"
64096436
},
6437+
"PartitionedColumn": {
6438+
"description": "The partitioning column information.",
6439+
"id": "PartitionedColumn",
6440+
"properties": {
6441+
"field": {
6442+
"description": "Output only. The name of the partition column.",
6443+
"readOnly": true,
6444+
"type": "string"
6445+
}
6446+
},
6447+
"type": "object"
6448+
},
6449+
"PartitioningDefinition": {
6450+
"description": "The partitioning information, which includes managed table and external table partition information.",
6451+
"id": "PartitioningDefinition",
6452+
"properties": {
6453+
"partitionedColumn": {
6454+
"description": "Output only. Details about each partitioning column. BigQuery native tables only support 1 partitioning column. Other table types may support 0, 1 or more partitioning columns.",
6455+
"items": {
6456+
"$ref": "PartitionedColumn"
6457+
},
6458+
"readOnly": true,
6459+
"type": "array"
6460+
}
6461+
},
6462+
"type": "object"
6463+
},
64106464
"PerformanceInsights": {
64116465
"description": "Performance insights for the job.",
64126466
"id": "PerformanceInsights",
@@ -7608,12 +7662,12 @@
76087662
"type": "object"
76097663
},
76107664
"gcsStagingBucket": {
7611-
"description": "Output only. The Google Cloud Storage bucket that is used as the default filesystem by the Spark application. This fields is only filled when the Spark procedure uses the INVOKER security mode. It is inferred from the system variable @@spark_proc_properties.staging_bucket if it is provided. Otherwise, BigQuery creates a default staging bucket for the job and returns the bucket name in this field. Example: * `gs://[bucket_name]`",
7665+
"description": "Output only. The Google Cloud Storage bucket that is used as the default file system by the Spark application. This field is only filled when the Spark procedure uses the invoker security mode. The `gcsStagingBucket` bucket is inferred from the `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise, BigQuery creates a default staging bucket for the job and returns the bucket name in this field. Example: * `gs://[bucket_name]`",
76127666
"readOnly": true,
76137667
"type": "string"
76147668
},
76157669
"kmsKeyName": {
7616-
"description": "Output only. The Cloud KMS encryption key that is used to protect the resources created by the Spark job. If the Spark procedure uses DEFINER security mode, the Cloud KMS key is inferred from the Spark connection associated with the procedure if it is provided. Otherwise the key is inferred from the default key of the Spark connection's project if the CMEK organization policy is enforced. If the Spark procedure uses INVOKER security mode, the Cloud KMS encryption key is inferred from the system variable @@spark_proc_properties.kms_key_name if it is provided. Otherwise, the key is inferred fromt he default key of the BigQuery job's project if the CMEK organization policy is enforced. Example: * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`",
7670+
"description": "Output only. The Cloud KMS encryption key that is used to protect the resources created by the Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery job's project (if the CMEK organization policy is enforced). Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the procedure (if it is provided), or from the default key of the Spark connection's project if the CMEK organization policy is enforced. Example: * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`",
76177671
"readOnly": true,
76187672
"type": "string"
76197673
},
@@ -8043,6 +8097,11 @@
80438097
"readOnly": true,
80448098
"type": "string"
80458099
},
8100+
"partitionDefinition": {
8101+
"$ref": "PartitioningDefinition",
8102+
"description": "Output only. The partition information for all table formats, including managed partitioned tables, hive partitioned tables, and iceberg partitioned tables.",
8103+
"readOnly": true
8104+
},
80468105
"rangePartitioning": {
80478106
"$ref": "RangePartitioning",
80488107
"description": "If specified, configures range partitioning for this table."
@@ -8370,7 +8429,7 @@
83708429
"description": "Represents the type of a field element.",
83718430
"properties": {
83728431
"type": {
8373-
"description": "Required. The type of a field element. See TableFieldSchema.type.",
8432+
"description": "Required. The type of a field element. For more information, see TableFieldSchema.type.",
83748433
"type": "string"
83758434
}
83768435
},
@@ -8396,7 +8455,7 @@
83968455
"type": "string"
83978456
},
83988457
"type": {
8399-
"description": "Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) Use of RECORD/STRUCT indicates that the field contains a nested schema.",
8458+
"description": "Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) * RANGE ([Preview](/products/#product-launch-stages)) Use of RECORD/STRUCT indicates that the field contains a nested schema.",
84008459
"type": "string"
84018460
}
84028461
},

src/apis/bigquery/v2.ts

Lines changed: 38 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -943,6 +943,10 @@ export namespace bigquery_v2 {
943943
* Output only. The date when this dataset was last modified, in milliseconds since the epoch.
944944
*/
945945
lastModifiedTime?: string | null;
946+
/**
947+
* Output only. Metadata about the LinkedDataset. Filled out when the dataset type is LINKED.
948+
*/
949+
linkedDatasetMetadata?: Schema$LinkedDatasetMetadata;
946950
/**
947951
* Optional. The source dataset reference when the dataset is of type LINKED. For all other dataset types it is not set. This field cannot be updated once it is set. Any attempt to update this field using Update and Patch API Operations will be ignored.
948952
*/
@@ -2803,6 +2807,15 @@ export namespace bigquery_v2 {
28032807
encoding?: string | null;
28042808
}
28052809
export interface Schema$JsonValue {}
2810+
/**
2811+
* Metadata about the Linked Dataset.
2812+
*/
2813+
export interface Schema$LinkedDatasetMetadata {
2814+
/**
2815+
* Output only. Specifies whether Linked Dataset is currently in a linked state or not.
2816+
*/
2817+
linkState?: string | null;
2818+
}
28062819
/**
28072820
* A dataset source type which refers to another BigQuery dataset.
28082821
*/
@@ -3147,6 +3160,24 @@ export namespace bigquery_v2 {
31473160
*/
31483161
enumAsString?: boolean | null;
31493162
}
3163+
/**
3164+
* The partitioning column information.
3165+
*/
3166+
export interface Schema$PartitionedColumn {
3167+
/**
3168+
* Output only. The name of the partition column.
3169+
*/
3170+
field?: string | null;
3171+
}
3172+
/**
3173+
* The partitioning information, which includes managed table and external table partition information.
3174+
*/
3175+
export interface Schema$PartitioningDefinition {
3176+
/**
3177+
* Output only. Details about each partitioning column. BigQuery native tables only support 1 partitioning column. Other table types may support 0, 1 or more partitioning columns.
3178+
*/
3179+
partitionedColumn?: Schema$PartitionedColumn[];
3180+
}
31503181
/**
31513182
* Performance insights for the job.
31523183
*/
@@ -3958,11 +3989,11 @@ export namespace bigquery_v2 {
39583989
*/
39593990
endpoints?: {[key: string]: string} | null;
39603991
/**
3961-
* Output only. The Google Cloud Storage bucket that is used as the default filesystem by the Spark application. This fields is only filled when the Spark procedure uses the INVOKER security mode. It is inferred from the system variable @@spark_proc_properties.staging_bucket if it is provided. Otherwise, BigQuery creates a default staging bucket for the job and returns the bucket name in this field. Example: * `gs://[bucket_name]`
3992+
* Output only. The Google Cloud Storage bucket that is used as the default file system by the Spark application. This field is only filled when the Spark procedure uses the invoker security mode. The `gcsStagingBucket` bucket is inferred from the `@@spark_proc_properties.staging_bucket` system variable (if it is provided). Otherwise, BigQuery creates a default staging bucket for the job and returns the bucket name in this field. Example: * `gs://[bucket_name]`
39623993
*/
39633994
gcsStagingBucket?: string | null;
39643995
/**
3965-
* Output only. The Cloud KMS encryption key that is used to protect the resources created by the Spark job. If the Spark procedure uses DEFINER security mode, the Cloud KMS key is inferred from the Spark connection associated with the procedure if it is provided. Otherwise the key is inferred from the default key of the Spark connection's project if the CMEK organization policy is enforced. If the Spark procedure uses INVOKER security mode, the Cloud KMS encryption key is inferred from the system variable @@spark_proc_properties.kms_key_name if it is provided. Otherwise, the key is inferred fromt he default key of the BigQuery job's project if the CMEK organization policy is enforced. Example: * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
3996+
* Output only. The Cloud KMS encryption key that is used to protect the resources created by the Spark job. If the Spark procedure uses the invoker security mode, the Cloud KMS encryption key is either inferred from the provided system variable, `@@spark_proc_properties.kms_key_name`, or the default key of the BigQuery job's project (if the CMEK organization policy is enforced). Otherwise, the Cloud KMS key is either inferred from the Spark connection associated with the procedure (if it is provided), or from the default key of the Spark connection's project if the CMEK organization policy is enforced. Example: * `projects/[kms_project_id]/locations/[region]/keyRings/[key_region]/cryptoKeys/[key]`
39663997
*/
39673998
kmsKeyName?: string | null;
39683999
/**
@@ -4237,6 +4268,10 @@ export namespace bigquery_v2 {
42374268
* Output only. The physical size of this table in bytes. This also includes storage used for time travel. This data is not kept in real time, and might be delayed by a few seconds to a few minutes.
42384269
*/
42394270
numTotalPhysicalBytes?: string | null;
4271+
/**
4272+
* Output only. The partition information for all table formats, including managed partitioned tables, hive partitioned tables, and iceberg partitioned tables.
4273+
*/
4274+
partitionDefinition?: Schema$PartitioningDefinition;
42404275
/**
42414276
* If specified, configures range partitioning for this table.
42424277
*/
@@ -4439,7 +4474,7 @@ export namespace bigquery_v2 {
44394474
*/
44404475
scale?: string | null;
44414476
/**
4442-
* Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) Use of RECORD/STRUCT indicates that the field contains a nested schema.
4477+
* Required. The field data type. Possible values include: * STRING * BYTES * INTEGER (or INT64) * FLOAT (or FLOAT64) * BOOLEAN (or BOOL) * TIMESTAMP * DATE * TIME * DATETIME * GEOGRAPHY * NUMERIC * BIGNUMERIC * JSON * RECORD (or STRUCT) * RANGE ([Preview](/products/#product-launch-stages)) Use of RECORD/STRUCT indicates that the field contains a nested schema.
44434478
*/
44444479
type?: string | null;
44454480
}

0 commit comments

Comments
 (0)