Skip to content

Commit fa7223a

Browse files
author
awstools
committed
feat(client-appflow): With this update, you can choose which Salesforce API is used by Amazon AppFlow to transfer data to or from your Salesforce account. You can choose the Salesforce REST API or Bulk API 2.0. You can also choose for Amazon AppFlow to pick the API automatically.
1 parent 17b50b4 commit fa7223a

File tree

3 files changed

+2697
-1805
lines changed

3 files changed

+2697
-1805
lines changed

clients/client-appflow/src/models/models_0.ts

Lines changed: 102 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -387,6 +387,12 @@ export interface RedshiftMetadata {}
387387
*/
388388
export interface S3Metadata {}
389389

390+
export enum SalesforceDataTransferApi {
391+
AUTOMATIC = "AUTOMATIC",
392+
BULKV2 = "BULKV2",
393+
REST_SYNC = "REST_SYNC",
394+
}
395+
390396
/**
391397
* <p> The connector metadata specific to Salesforce. </p>
392398
*/
@@ -395,6 +401,12 @@ export interface SalesforceMetadata {
395401
* <p> The desired authorization scope for the Salesforce account. </p>
396402
*/
397403
oAuthScopes?: string[];
404+
405+
/**
406+
* <p>The Salesforce APIs that you can have Amazon AppFlow use when your flows transfers
407+
* data to or from Salesforce.</p>
408+
*/
409+
dataTransferApis?: (SalesforceDataTransferApi | string)[];
398410
}
399411

400412
/**
@@ -2959,6 +2971,47 @@ export interface SalesforceDestinationProperties {
29592971
* is <code>UPSERT</code>, then <code>idFieldNames</code> is required. </p>
29602972
*/
29612973
writeOperationType?: WriteOperationType | string;
2974+
2975+
/**
2976+
* <p>Specifies which Salesforce API is used by Amazon AppFlow when your flow transfers
2977+
* data to Salesforce.</p>
2978+
* <dl>
2979+
* <dt>AUTOMATIC</dt>
2980+
* <dd>
2981+
* <p>The default. Amazon AppFlow selects which API to use based on the number of
2982+
* records that your flow transfers to Salesforce. If your flow transfers fewer than 1,000
2983+
* records, Amazon AppFlow uses Salesforce REST API. If your flow transfers 1,000
2984+
* records or more, Amazon AppFlow uses Salesforce Bulk API 2.0.</p>
2985+
* <p>Each of these Salesforce APIs structures data differently. If Amazon AppFlow
2986+
* selects the API automatically, be aware that, for recurring flows, the data output might
2987+
* vary from one flow run to the next. For example, if a flow runs daily, it might use REST
2988+
* API on one day to transfer 900 records, and it might use Bulk API 2.0 on the next day to
2989+
* transfer 1,100 records. For each of these flow runs, the respective Salesforce API
2990+
* formats the data differently. Some of the differences include how dates are formatted
2991+
* and null values are represented. Also, Bulk API 2.0 doesn't transfer Salesforce compound
2992+
* fields.</p>
2993+
* <p>By choosing this option, you optimize flow performance for both small and large data
2994+
* transfers, but the tradeoff is inconsistent formatting in the output.</p>
2995+
* </dd>
2996+
* <dt>BULKV2</dt>
2997+
* <dd>
2998+
* <p>Amazon AppFlow uses only Salesforce Bulk API 2.0. This API runs asynchronous
2999+
* data transfers, and it's optimal for large sets of data. By choosing this option, you
3000+
* ensure that your flow writes consistent output, but you optimize performance only for
3001+
* large data transfers.</p>
3002+
* <p>Note that Bulk API 2.0 does not transfer Salesforce compound fields.</p>
3003+
* </dd>
3004+
* <dt>REST_SYNC</dt>
3005+
* <dd>
3006+
* <p>Amazon AppFlow uses only Salesforce REST API. By choosing this option, you
3007+
* ensure that your flow writes consistent output, but you decrease performance for large
3008+
* data transfers that are better suited for Bulk API 2.0. In some cases, if your flow
3009+
* attempts to transfer a vary large set of data, it might fail with a timed out
3010+
* error.</p>
3011+
* </dd>
3012+
* </dl>
3013+
*/
3014+
dataTransferApi?: SalesforceDataTransferApi | string;
29623015
}
29633016

29643017
/**
@@ -3358,6 +3411,47 @@ export interface SalesforceSourceProperties {
33583411
* <p> Indicates whether Amazon AppFlow includes deleted files in the flow run. </p>
33593412
*/
33603413
includeDeletedRecords?: boolean;
3414+
3415+
/**
3416+
* <p>Specifies which Salesforce API is used by Amazon AppFlow when your flow transfers
3417+
* data from Salesforce.</p>
3418+
* <dl>
3419+
* <dt>AUTOMATIC</dt>
3420+
* <dd>
3421+
* <p>The default. Amazon AppFlow selects which API to use based on the number of
3422+
* records that your flow transfers from Salesforce. If your flow transfers fewer than
3423+
* 1,000,000 records, Amazon AppFlow uses Salesforce REST API. If your flow transfers
3424+
* 1,000,000 records or more, Amazon AppFlow uses Salesforce Bulk API 2.0.</p>
3425+
* <p>Each of these Salesforce APIs structures data differently. If Amazon AppFlow
3426+
* selects the API automatically, be aware that, for recurring flows, the data output might
3427+
* vary from one flow run to the next. For example, if a flow runs daily, it might use REST
3428+
* API on one day to transfer 900,000 records, and it might use Bulk API 2.0 on the next
3429+
* day to transfer 1,100,000 records. For each of these flow runs, the respective
3430+
* Salesforce API formats the data differently. Some of the differences include how dates
3431+
* are formatted and null values are represented. Also, Bulk API 2.0 doesn't transfer
3432+
* Salesforce compound fields.</p>
3433+
* <p>By choosing this option, you optimize flow performance for both small and large data
3434+
* transfers, but the tradeoff is inconsistent formatting in the output.</p>
3435+
* </dd>
3436+
* <dt>BULKV2</dt>
3437+
* <dd>
3438+
* <p>Amazon AppFlow uses only Salesforce Bulk API 2.0. This API runs asynchronous
3439+
* data transfers, and it's optimal for large sets of data. By choosing this option, you
3440+
* ensure that your flow writes consistent output, but you optimize performance only for
3441+
* large data transfers.</p>
3442+
* <p>Note that Bulk API 2.0 does not transfer Salesforce compound fields.</p>
3443+
* </dd>
3444+
* <dt>REST_SYNC</dt>
3445+
* <dd>
3446+
* <p>Amazon AppFlow uses only Salesforce REST API. By choosing this option, you
3447+
* ensure that your flow writes consistent output, but you decrease performance for large
3448+
* data transfers that are better suited for Bulk API 2.0. In some cases, if your flow
3449+
* attempts to transfer a vary large set of data, it might fail with a timed out
3450+
* error.</p>
3451+
* </dd>
3452+
* </dl>
3453+
*/
3454+
dataTransferApi?: SalesforceDataTransferApi | string;
33613455
}
33623456

33633457
/**
@@ -3576,6 +3670,7 @@ export enum OperatorPropertiesKeys {
35763670
DATA_TYPE = "DATA_TYPE",
35773671
DESTINATION_DATA_TYPE = "DESTINATION_DATA_TYPE",
35783672
EXCLUDE_SOURCE_FIELDS_LIST = "EXCLUDE_SOURCE_FIELDS_LIST",
3673+
INCLUDE_NEW_FIELDS = "INCLUDE_NEW_FIELDS",
35793674
LOWER_BOUND = "LOWER_BOUND",
35803675
MASK_LENGTH = "MASK_LENGTH",
35813676
MASK_VALUE = "MASK_VALUE",
@@ -3670,11 +3765,13 @@ export interface ScheduledTriggerProperties {
36703765

36713766
/**
36723767
* <p>Specifies the time zone used when referring to the dates and times of a scheduled flow,
3673-
* such as <code>America/New_York</code>. This time zone is only a descriptive label. It doesn't affect how
3674-
* Amazon AppFlow interprets the timestamps that you specify to schedule the flow.</p>
3675-
* <p>If you want to schedule a flow by using times in a particular time zone, indicate the time zone as a UTC
3676-
* offset in your timestamps. For example, the UTC offsets for the <code>America/New_York</code> timezone are
3677-
* <code>-04:00</code> EDT and <code>-05:00 EST</code>.</p>
3768+
* such as <code>America/New_York</code>. This time zone is only a descriptive label. It doesn't
3769+
* affect how Amazon AppFlow interprets the timestamps that you specify to schedule the
3770+
* flow.</p>
3771+
* <p>If you want to schedule a flow by using times in a particular time zone, indicate the time
3772+
* zone as a UTC offset in your timestamps. For example, the UTC offsets for the
3773+
* <code>America/New_York</code> timezone are <code>-04:00</code> EDT and <code>-05:00
3774+
* EST</code>.</p>
36783775
*/
36793776
timezone?: string;
36803777

clients/client-appflow/src/protocols/Aws_restJson1.ts

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -174,6 +174,7 @@ import {
174174
S3SourceProperties,
175175
SalesforceConnectorProfileCredentials,
176176
SalesforceConnectorProfileProperties,
177+
SalesforceDataTransferApi,
177178
SalesforceDestinationProperties,
178179
SalesforceMetadata,
179180
SalesforceSourceProperties,
@@ -2989,6 +2990,7 @@ const serializeAws_restJson1SalesforceDestinationProperties = (
29892990
context: __SerdeContext
29902991
): any => {
29912992
return {
2993+
...(input.dataTransferApi != null && { dataTransferApi: input.dataTransferApi }),
29922994
...(input.errorHandlingConfig != null && {
29932995
errorHandlingConfig: serializeAws_restJson1ErrorHandlingConfig(input.errorHandlingConfig, context),
29942996
}),
@@ -3005,6 +3007,7 @@ const serializeAws_restJson1SalesforceSourceProperties = (
30053007
context: __SerdeContext
30063008
): any => {
30073009
return {
3010+
...(input.dataTransferApi != null && { dataTransferApi: input.dataTransferApi }),
30083011
...(input.enableDynamicFieldUpdate != null && { enableDynamicFieldUpdate: input.enableDynamicFieldUpdate }),
30093012
...(input.includeDeletedRecords != null && { includeDeletedRecords: input.includeDeletedRecords }),
30103013
...(input.object != null && { object: input.object }),
@@ -4843,11 +4846,27 @@ const deserializeAws_restJson1SalesforceConnectorProfileProperties = (
48434846
} as any;
48444847
};
48454848

4849+
const deserializeAws_restJson1SalesforceDataTransferApiList = (
4850+
output: any,
4851+
context: __SerdeContext
4852+
): (SalesforceDataTransferApi | string)[] => {
4853+
const retVal = (output || [])
4854+
.filter((e: any) => e != null)
4855+
.map((entry: any) => {
4856+
if (entry === null) {
4857+
return null as any;
4858+
}
4859+
return __expectString(entry) as any;
4860+
});
4861+
return retVal;
4862+
};
4863+
48464864
const deserializeAws_restJson1SalesforceDestinationProperties = (
48474865
output: any,
48484866
context: __SerdeContext
48494867
): SalesforceDestinationProperties => {
48504868
return {
4869+
dataTransferApi: __expectString(output.dataTransferApi),
48514870
errorHandlingConfig:
48524871
output.errorHandlingConfig != null
48534872
? deserializeAws_restJson1ErrorHandlingConfig(output.errorHandlingConfig, context)
@@ -4861,6 +4880,10 @@ const deserializeAws_restJson1SalesforceDestinationProperties = (
48614880

48624881
const deserializeAws_restJson1SalesforceMetadata = (output: any, context: __SerdeContext): SalesforceMetadata => {
48634882
return {
4883+
dataTransferApis:
4884+
output.dataTransferApis != null
4885+
? deserializeAws_restJson1SalesforceDataTransferApiList(output.dataTransferApis, context)
4886+
: undefined,
48644887
oAuthScopes:
48654888
output.oAuthScopes != null ? deserializeAws_restJson1OAuthScopeList(output.oAuthScopes, context) : undefined,
48664889
} as any;
@@ -4871,6 +4894,7 @@ const deserializeAws_restJson1SalesforceSourceProperties = (
48714894
context: __SerdeContext
48724895
): SalesforceSourceProperties => {
48734896
return {
4897+
dataTransferApi: __expectString(output.dataTransferApi),
48744898
enableDynamicFieldUpdate: __expectBoolean(output.enableDynamicFieldUpdate),
48754899
includeDeletedRecords: __expectBoolean(output.includeDeletedRecords),
48764900
object: __expectString(output.object),

0 commit comments

Comments
 (0)