|
230 | 230 | "description": "Regional Endpoint",
|
231 | 231 | "endpointUrl": "https://dlp.us-west8.rep.googleapis.com/",
|
232 | 232 | "location": "us-west8"
|
233 |
| -}, |
234 |
| -{ |
235 |
| -"description": "Regional Endpoint", |
236 |
| -"endpointUrl": "https://dlp.us.rep.googleapis.com/", |
237 |
| -"location": "us" |
238 |
| -}, |
239 |
| -{ |
240 |
| -"description": "Regional Endpoint", |
241 |
| -"endpointUrl": "https://dlp.eu.rep.googleapis.com/", |
242 |
| -"location": "eu" |
243 | 233 | }
|
244 | 234 | ],
|
245 | 235 | "fullyEncodeReservedExpansion": true,
|
|
5025 | 5015 | }
|
5026 | 5016 | }
|
5027 | 5017 | },
|
5028 |
| -"revision": "20250105", |
| 5018 | +"revision": "20250119", |
5029 | 5019 | "rootUrl": "https://dlp.googleapis.com/",
|
5030 | 5020 | "schemas": {
|
5031 | 5021 | "GooglePrivacyDlpV2Action": {
|
|
7492 | 7482 | "enumDescriptions": [
|
7493 | 7483 | "Unused.",
|
7494 | 7484 | "Scan buckets regardless of the attribute.",
|
7495 |
| -"Buckets with autoclass disabled (https://cloud.google.com/storage/docs/autoclass). Only one of AUTOCLASS_DISABLED or AUTOCLASS_ENABLED should be set.", |
7496 |
| -"Buckets with autoclass enabled (https://cloud.google.com/storage/docs/autoclass). Only one of AUTOCLASS_DISABLED or AUTOCLASS_ENABLED should be set. Scanning Autoclass-enabled buckets can affect object storage classes." |
| 7485 | +"Buckets with [Autoclass](https://cloud.google.com/storage/docs/autoclass) disabled. Only one of AUTOCLASS_DISABLED or AUTOCLASS_ENABLED should be set.", |
| 7486 | +"Buckets with [Autoclass](https://cloud.google.com/storage/docs/autoclass) enabled. Only one of AUTOCLASS_DISABLED or AUTOCLASS_ENABLED should be set. Scanning Autoclass-enabled buckets can affect object storage classes." |
7497 | 7487 | ],
|
7498 | 7488 | "type": "string"
|
7499 | 7489 | },
|
|
8147 | 8137 | "properties": {
|
8148 | 8138 | "profileTable": {
|
8149 | 8139 | "$ref": "GooglePrivacyDlpV2BigQueryTable",
|
8150 |
| -"description": "Store all profiles to BigQuery. * The system will create a new dataset and table for you if none are are provided. The dataset will be named `sensitive_data_protection_discovery` and table will be named `discovery_profiles`. This table will be placed in the same project as the container project running the scan. The configuration will be updated with the fields set after the first profile is generated and the dataset and table are created. * See [Analyze data profiles stored in BigQuery](https://cloud.google.com/sensitive-data-protection/docs/analyze-data-profiles) * See [Sample queries for your BigQuery table](https://cloud.google.com/sensitive-data-protection/docs/analyze-data-profiles#sample_sql_queries). * Data is inserted using [streaming insert](https://cloud.google.com/blog/products/bigquery/life-of-a-bigquery-streaming-insert) and so data may be in the buffer for a period of time after the profile has finished. * The Pub/Sub notification is sent before the streaming buffer is guaranteed to be written, so data may not be instantly visible to queries by the time your topic receives the Pub/Sub notification. * The best practice is to use the same table for an entire organization so that you can take advantage of the provided Looker reports. If you use VPC Service Controls to define security perimeters, then you must use a separate table for each boundary." |
| 8140 | +"description": "Store all profiles to BigQuery. * The system will create a new dataset and table for you if none are are provided. The dataset will be named `sensitive_data_protection_discovery` and table will be named `discovery_profiles`. This table will be placed in the same project as the container project running the scan. After the first profile is generated and the dataset and table are created, the discovery scan configuration will be updated with the dataset and table names. * See [Analyze data profiles stored in BigQuery](https://cloud.google.com/sensitive-data-protection/docs/analyze-data-profiles). * See [Sample queries for your BigQuery table](https://cloud.google.com/sensitive-data-protection/docs/analyze-data-profiles#sample_sql_queries). * Data is inserted using [streaming insert](https://cloud.google.com/blog/products/bigquery/life-of-a-bigquery-streaming-insert) and so data may be in the buffer for a period of time after the profile has finished. * The Pub/Sub notification is sent before the streaming buffer is guaranteed to be written, so data may not be instantly visible to queries by the time your topic receives the Pub/Sub notification. * The best practice is to use the same table for an entire organization so that you can take advantage of the [provided Looker reports](https://cloud.google.com/sensitive-data-protection/docs/analyze-data-profiles#use_a_premade_report). If you use VPC Service Controls to define security perimeters, then you must use a separate table for each boundary." |
8151 | 8141 | }
|
8152 | 8142 | },
|
8153 | 8143 | "type": "object"
|
|
9053 | 9043 | "type": "string"
|
9054 | 9044 | },
|
9055 | 9045 | "example": {
|
9056 |
| -"description": "A sample true positive for this infoType.", |
| 9046 | +"description": "A sample that is a true positive for this infoType.", |
9057 | 9047 | "type": "string"
|
9058 | 9048 | },
|
9059 | 9049 | "name": {
|
|
0 commit comments