@@ -387,6 +387,12 @@ export interface RedshiftMetadata {}
387
387
*/
388
388
export interface S3Metadata { }
389
389
390
+ export enum SalesforceDataTransferApi {
391
+ AUTOMATIC = "AUTOMATIC" ,
392
+ BULKV2 = "BULKV2" ,
393
+ REST_SYNC = "REST_SYNC" ,
394
+ }
395
+
390
396
/**
391
397
* <p> The connector metadata specific to Salesforce. </p>
392
398
*/
@@ -395,6 +401,12 @@ export interface SalesforceMetadata {
395
401
* <p> The desired authorization scope for the Salesforce account. </p>
396
402
*/
397
403
oAuthScopes ?: string [ ] ;
404
+
405
+ /**
406
+ * <p>The Salesforce APIs that you can have Amazon AppFlow use when your flows transfers
407
+ * data to or from Salesforce.</p>
408
+ */
409
+ dataTransferApis ?: ( SalesforceDataTransferApi | string ) [ ] ;
398
410
}
399
411
400
412
/**
@@ -2959,6 +2971,47 @@ export interface SalesforceDestinationProperties {
2959
2971
* is <code>UPSERT</code>, then <code>idFieldNames</code> is required. </p>
2960
2972
*/
2961
2973
writeOperationType ?: WriteOperationType | string ;
2974
+
2975
+ /**
2976
+ * <p>Specifies which Salesforce API is used by Amazon AppFlow when your flow transfers
2977
+ * data to Salesforce.</p>
2978
+ * <dl>
2979
+ * <dt>AUTOMATIC</dt>
2980
+ * <dd>
2981
+ * <p>The default. Amazon AppFlow selects which API to use based on the number of
2982
+ * records that your flow transfers to Salesforce. If your flow transfers fewer than 1,000
2983
+ * records, Amazon AppFlow uses Salesforce REST API. If your flow transfers 1,000
2984
+ * records or more, Amazon AppFlow uses Salesforce Bulk API 2.0.</p>
2985
+ * <p>Each of these Salesforce APIs structures data differently. If Amazon AppFlow
2986
+ * selects the API automatically, be aware that, for recurring flows, the data output might
2987
+ * vary from one flow run to the next. For example, if a flow runs daily, it might use REST
2988
+ * API on one day to transfer 900 records, and it might use Bulk API 2.0 on the next day to
2989
+ * transfer 1,100 records. For each of these flow runs, the respective Salesforce API
2990
+ * formats the data differently. Some of the differences include how dates are formatted
2991
+ * and null values are represented. Also, Bulk API 2.0 doesn't transfer Salesforce compound
2992
+ * fields.</p>
2993
+ * <p>By choosing this option, you optimize flow performance for both small and large data
2994
+ * transfers, but the tradeoff is inconsistent formatting in the output.</p>
2995
+ * </dd>
2996
+ * <dt>BULKV2</dt>
2997
+ * <dd>
2998
+ * <p>Amazon AppFlow uses only Salesforce Bulk API 2.0. This API runs asynchronous
2999
+ * data transfers, and it's optimal for large sets of data. By choosing this option, you
3000
+ * ensure that your flow writes consistent output, but you optimize performance only for
3001
+ * large data transfers.</p>
3002
+ * <p>Note that Bulk API 2.0 does not transfer Salesforce compound fields.</p>
3003
+ * </dd>
3004
+ * <dt>REST_SYNC</dt>
3005
+ * <dd>
3006
+ * <p>Amazon AppFlow uses only Salesforce REST API. By choosing this option, you
3007
+ * ensure that your flow writes consistent output, but you decrease performance for large
3008
+ * data transfers that are better suited for Bulk API 2.0. In some cases, if your flow
3009
+ * attempts to transfer a vary large set of data, it might fail with a timed out
3010
+ * error.</p>
3011
+ * </dd>
3012
+ * </dl>
3013
+ */
3014
+ dataTransferApi ?: SalesforceDataTransferApi | string ;
2962
3015
}
2963
3016
2964
3017
/**
@@ -3358,6 +3411,47 @@ export interface SalesforceSourceProperties {
3358
3411
* <p> Indicates whether Amazon AppFlow includes deleted files in the flow run. </p>
3359
3412
*/
3360
3413
includeDeletedRecords ?: boolean ;
3414
+
3415
+ /**
3416
+ * <p>Specifies which Salesforce API is used by Amazon AppFlow when your flow transfers
3417
+ * data from Salesforce.</p>
3418
+ * <dl>
3419
+ * <dt>AUTOMATIC</dt>
3420
+ * <dd>
3421
+ * <p>The default. Amazon AppFlow selects which API to use based on the number of
3422
+ * records that your flow transfers from Salesforce. If your flow transfers fewer than
3423
+ * 1,000,000 records, Amazon AppFlow uses Salesforce REST API. If your flow transfers
3424
+ * 1,000,000 records or more, Amazon AppFlow uses Salesforce Bulk API 2.0.</p>
3425
+ * <p>Each of these Salesforce APIs structures data differently. If Amazon AppFlow
3426
+ * selects the API automatically, be aware that, for recurring flows, the data output might
3427
+ * vary from one flow run to the next. For example, if a flow runs daily, it might use REST
3428
+ * API on one day to transfer 900,000 records, and it might use Bulk API 2.0 on the next
3429
+ * day to transfer 1,100,000 records. For each of these flow runs, the respective
3430
+ * Salesforce API formats the data differently. Some of the differences include how dates
3431
+ * are formatted and null values are represented. Also, Bulk API 2.0 doesn't transfer
3432
+ * Salesforce compound fields.</p>
3433
+ * <p>By choosing this option, you optimize flow performance for both small and large data
3434
+ * transfers, but the tradeoff is inconsistent formatting in the output.</p>
3435
+ * </dd>
3436
+ * <dt>BULKV2</dt>
3437
+ * <dd>
3438
+ * <p>Amazon AppFlow uses only Salesforce Bulk API 2.0. This API runs asynchronous
3439
+ * data transfers, and it's optimal for large sets of data. By choosing this option, you
3440
+ * ensure that your flow writes consistent output, but you optimize performance only for
3441
+ * large data transfers.</p>
3442
+ * <p>Note that Bulk API 2.0 does not transfer Salesforce compound fields.</p>
3443
+ * </dd>
3444
+ * <dt>REST_SYNC</dt>
3445
+ * <dd>
3446
+ * <p>Amazon AppFlow uses only Salesforce REST API. By choosing this option, you
3447
+ * ensure that your flow writes consistent output, but you decrease performance for large
3448
+ * data transfers that are better suited for Bulk API 2.0. In some cases, if your flow
3449
+ * attempts to transfer a vary large set of data, it might fail with a timed out
3450
+ * error.</p>
3451
+ * </dd>
3452
+ * </dl>
3453
+ */
3454
+ dataTransferApi ?: SalesforceDataTransferApi | string ;
3361
3455
}
3362
3456
3363
3457
/**
@@ -3576,6 +3670,7 @@ export enum OperatorPropertiesKeys {
3576
3670
DATA_TYPE = "DATA_TYPE" ,
3577
3671
DESTINATION_DATA_TYPE = "DESTINATION_DATA_TYPE" ,
3578
3672
EXCLUDE_SOURCE_FIELDS_LIST = "EXCLUDE_SOURCE_FIELDS_LIST" ,
3673
+ INCLUDE_NEW_FIELDS = "INCLUDE_NEW_FIELDS" ,
3579
3674
LOWER_BOUND = "LOWER_BOUND" ,
3580
3675
MASK_LENGTH = "MASK_LENGTH" ,
3581
3676
MASK_VALUE = "MASK_VALUE" ,
@@ -3670,11 +3765,13 @@ export interface ScheduledTriggerProperties {
3670
3765
3671
3766
/**
3672
3767
* <p>Specifies the time zone used when referring to the dates and times of a scheduled flow,
3673
- * such as <code>America/New_York</code>. This time zone is only a descriptive label. It doesn't affect how
3674
- * Amazon AppFlow interprets the timestamps that you specify to schedule the flow.</p>
3675
- * <p>If you want to schedule a flow by using times in a particular time zone, indicate the time zone as a UTC
3676
- * offset in your timestamps. For example, the UTC offsets for the <code>America/New_York</code> timezone are
3677
- * <code>-04:00</code> EDT and <code>-05:00 EST</code>.</p>
3768
+ * such as <code>America/New_York</code>. This time zone is only a descriptive label. It doesn't
3769
+ * affect how Amazon AppFlow interprets the timestamps that you specify to schedule the
3770
+ * flow.</p>
3771
+ * <p>If you want to schedule a flow by using times in a particular time zone, indicate the time
3772
+ * zone as a UTC offset in your timestamps. For example, the UTC offsets for the
3773
+ * <code>America/New_York</code> timezone are <code>-04:00</code> EDT and <code>-05:00
3774
+ * EST</code>.</p>
3678
3775
*/
3679
3776
timezone ?: string ;
3680
3777
0 commit comments