Skip to content

Commit a7ccb72

Browse files
eaplataniosrxwei
authored andcommitted
Regenerated bindings. (#25)
1 parent bd3692d commit a7ccb72

File tree

1 file changed

+211
-25
lines changed

1 file changed

+211
-25
lines changed

RawOpsGenerated.swift

Lines changed: 211 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
public enum Raw {
1818

1919
static let generatedTensorFlowVersion = "1.13.1"
20-
static let generatedTensorFlowGitVersion = "v1.12.0-11444-g8b2884c9cb"
20+
static let generatedTensorFlowGitVersion = "v1.12.0-12404-gd1db9860a2"
2121

2222
// @_frozen // SR-9739
2323
public enum A {
@@ -440,6 +440,21 @@ public enum RoundMode6 {
440440
}
441441
}
442442

443+
// @_frozen // SR-9739
444+
public enum SplitType {
445+
case inequality
446+
447+
@inlinable
448+
var cName: String {
449+
@inline(__always)
450+
get {
451+
switch self {
452+
case .inequality: return "inequality"
453+
}
454+
}
455+
}
456+
}
457+
443458
// @_frozen // SR-9739
444459
public enum Unit {
445460
case byte
@@ -1990,7 +2005,7 @@ public static func batchSvd<T: FloatingPoint & TensorFlowScalar>(
19902005
/// (2) For the following input of shape `[4, 1, 1, 3]` and block_size of 2:
19912006
///
19922007
/// ```
1993-
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
2008+
/// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
19942009
/// ```
19952010
///
19962011
/// The output tensor has shape `[1, 2, 2, 3]` and value:
@@ -2012,10 +2027,10 @@ public static func batchSvd<T: FloatingPoint & TensorFlowScalar>(
20122027
/// The output tensor has shape `[1, 4, 4, 1]` and value:
20132028
///
20142029
/// ```
2015-
/// x = [[[1], [2], [3], [4]],
2030+
/// x = [[[[1], [2], [3], [4]],
20162031
/// [[5], [6], [7], [8]],
20172032
/// [[9], [10], [11], [12]],
2018-
/// [[13], [14], [15], [16]]]
2033+
/// [[13], [14], [15], [16]]]]
20192034
/// ```
20202035
///
20212036
/// (4) For the following input of shape `[8, 1, 2, 1]` and block_size of 2:
@@ -2123,7 +2138,7 @@ public static func batchToSpace<T: TensorFlowScalar, Tidx: BinaryInteger & Tenso
21232138
/// `crops = [[0, 0], [0, 0]]`:
21242139
///
21252140
/// ```
2126-
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
2141+
/// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
21272142
/// ```
21282143
///
21292144
/// The output tensor has shape `[1, 2, 2, 3]` and value:
@@ -2146,10 +2161,10 @@ public static func batchToSpace<T: TensorFlowScalar, Tidx: BinaryInteger & Tenso
21462161
/// The output tensor has shape `[1, 4, 4, 1]` and value:
21472162
///
21482163
/// ```
2149-
/// x = [[[1], [2], [3], [4]],
2164+
/// x = [[[[1], [2], [3], [4]],
21502165
/// [[5], [6], [7], [8]],
21512166
/// [[9], [10], [11], [12]],
2152-
/// [[13], [14], [15], [16]]]
2167+
/// [[13], [14], [15], [16]]]]
21532168
/// ```
21542169
///
21552170
/// (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
@@ -2389,6 +2404,43 @@ public static func bincount<T: Numeric & TensorFlowScalar>(
23892404
/// dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from
23902405
/// [..., sizeof(`type`)/sizeof(`T`)] to [...].
23912406
///
2407+
/// tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype
2408+
/// (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast()
2409+
/// gives module error.
2410+
/// For example,
2411+
///
2412+
/// Example 1:
2413+
/// ```python
2414+
/// >>> a = [1., 2., 3.]
2415+
/// >>> equality_bitcast = tf.bitcast(a,tf.complex128)
2416+
/// tensorflow.python.framework.errors_impl.InvalidArgumentError: Cannot bitcast from float to complex128: shape [3] [Op:Bitcast]
2417+
/// >>> equality_cast = tf.cast(a,tf.complex128)
2418+
/// >>> print(equality_cast)
2419+
/// tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128)
2420+
/// ```
2421+
/// Example 2:
2422+
/// ```python
2423+
/// >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8)
2424+
/// <tf.Tensor: ... shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)>
2425+
/// ```
2426+
/// Example 3:
2427+
/// ```python
2428+
/// >>> x = [1., 2., 3.]
2429+
/// >>> y = [0., 2., 3.]
2430+
/// >>> equality= tf.equal(x,y)
2431+
/// >>> equality_cast = tf.cast(equality,tf.float32)
2432+
/// >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8)
2433+
/// >>> print(equality)
2434+
/// tf.Tensor([False True True], shape=(3,), dtype=bool)
2435+
/// >>> print(equality_cast)
2436+
/// tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32)
2437+
/// >>> print(equality_bitcast)
2438+
/// tf.Tensor(
2439+
/// [[ 0 0 0 0]
2440+
/// [ 0 0 128 63]
2441+
/// [ 0 0 128 63]], shape=(3, 4), dtype=uint8)
2442+
/// ```
2443+
///
23922444
/// *NOTE*: Bitcast is implemented as a low-level cast, so machines with different
23932445
/// endian orderings will give different results.
23942446
@inlinable @inline(__always)
@@ -2609,6 +2661,95 @@ public static func blockLSTMGrad<T: FloatingPoint & TensorFlowScalar>(
26092661
return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4), Tensor(handle: ret.5), Tensor(handle: ret.6), Tensor(handle: ret.7))
26102662
}
26112663

2664+
/// Aggregates the summary of accumulated stats for the batch.
2665+
///
2666+
/// The summary stats contains gradients and hessians accumulated for each node, feature dimension id and bucket.
2667+
///
2668+
/// - Parameters:
2669+
/// - node_ids: int32; Rank 1 Tensor containing node ids for each example, shape [batch_size].
2670+
/// - gradients: float32; Rank 2 Tensor (shape=[batch_size, logits_dimension]) with gradients for each example.
2671+
/// - hessians: float32; Rank 2 Tensor (shape=[batch_size, hessian_dimension]) with hessians for each example.
2672+
/// - feature: int32; Rank 2 feature Tensors (shape=[batch_size, feature_dimension]).
2673+
///
2674+
/// - Attrs:
2675+
/// - max_splits: int; the maximum number of splits possible in the whole tree.
2676+
/// - num_buckets: int; equals to the maximum possible value of bucketized feature.
2677+
///
2678+
/// - Output stats_summary: output Rank 4 Tensor (shape=[splits, feature_dimension, buckets, logits_dimension + hessian_dimension])
2679+
/// containing accumulated stats for each node, feature dimension and bucket.
2680+
@inlinable @inline(__always)
2681+
public static func boostedTreesAggregateStats(
2682+
nodeIds: Tensor<Int32>,
2683+
gradients: Tensor<Float>,
2684+
hessians: Tensor<Float>,
2685+
feature: Tensor<Int32>,
2686+
maxSplits: Int64,
2687+
numBuckets: Int64
2688+
) -> Tensor<Float> {
2689+
let ret: TensorHandle<Float> = #tfop("BoostedTreesAggregateStats",
2690+
nodeIds,
2691+
gradients,
2692+
hessians,
2693+
feature,
2694+
max_splits: maxSplits,
2695+
num_buckets: numBuckets)
2696+
return Tensor(handle: ret)
2697+
}
2698+
2699+
/// Calculates gains for each feature and returns the best possible split information for the feature.
2700+
///
2701+
/// The split information is the best threshold (bucket id), gains and left/right node contributions per node for each feature.
2702+
///
2703+
/// It is possible that not all nodes can be split on each feature. Hence, the list of possible nodes can differ between the features. Therefore, we return `node_ids_list` for each feature, containing the list of nodes that this feature can be used to split.
2704+
///
2705+
/// In this manner, the output is the best split per features and per node, so that it needs to be combined later to produce the best split for each node (among all possible features).
2706+
///
2707+
/// The output shapes are compatible in a way that the first dimension of all tensors are the same and equal to the number of possible split nodes for each feature.
2708+
///
2709+
/// - Parameters:
2710+
/// - node_id_range: A Rank 1 tensor (shape=[2]) to specify the range [first, last) of node ids to process within `stats_summary_list`. The nodes are iterated between the two nodes specified by the tensor, as like `for node_id in range(node_id_range[0], node_id_range[1])` (Note that the last index node_id_range[1] is exclusive).
2711+
/// - stats_summary: A Rank 4 tensor (#shape=[max_splits, feature_dims, bucket, stats_dims]) for accumulated stats summary (gradient/hessian) per node, per dimension, per buckets for each feature.
2712+
/// The first dimension of the tensor is the maximum number of splits, and thus not all elements of it will be used, but only the indexes specified by node_ids will be used.
2713+
/// - l1: l1 regularization factor on leaf weights, per instance based.
2714+
/// - l2: l2 regularization factor on leaf weights, per instance based.
2715+
/// - tree_complexity: adjustment to the gain, per leaf based.
2716+
/// - min_node_weight: mininum avg of hessians in a node before required for the node to be considered for splitting.
2717+
///
2718+
/// - Attrs:
2719+
/// - logits_dimension: The dimension of logit, i.e., number of classes.
2720+
/// - split_type: A string indicating if this Op should perform inequality split or equality split.
2721+
///
2722+
/// - Outputs:
2723+
/// - node_ids: A Rank 1 tensors indicating possible split node ids for each feature. The length of the list is num_features, but each tensor has different size as each feature provides different possible nodes. See above for details like shapes and sizes.
2724+
/// - gains: A Rank 1 tensors indicating the best gains for each feature to split for certain nodes. See above for details like shapes and sizes.
2725+
/// - feature_dimensions: A Rank 1 tensors indicating the best feature dimension for each feature to split for certain nodes if the feature is multi-dimension. See above for details like shapes and sizes.
2726+
/// - thresholds: A Rank 1 tensors indicating the bucket id to compare with (as a threshold) for split in each node. See above for details like shapes and sizes.
2727+
/// - left_node_contribs: A Rank 2 tensors indicating the contribution of the left nodes when branching from parent nodes (given by the tensor element in the output node_ids_list) to the left direction by the given threshold for each feature. This value will be used to make the left node value by adding to the parent node value. Second dimension size is 1 for 1-dimensional logits, but would be larger for multi-class problems. See above for details like shapes and sizes.
2728+
/// - right_node_contribs: A Rank 2 tensors, with the same shape/conditions as left_node_contribs_list, but just that the value is for the right node.
2729+
/// - split_with_default_directions: A Rank 1 tensors indicating the which direction to go if data is missing. See above for details like shapes and sizes.
2730+
@inlinable @inline(__always)
2731+
public static func boostedTreesCalculateBestFeatureSplit(
2732+
nodeIdRange: Tensor<Int32>,
2733+
statsSummary: Tensor<Float>,
2734+
l1: Tensor<Float>,
2735+
l2: Tensor<Float>,
2736+
treeComplexity: Tensor<Float>,
2737+
minNodeWeight: Tensor<Float>,
2738+
logitsDimension: Int64,
2739+
splitType: SplitType = .inequality
2740+
) -> (nodeIds: Tensor<Int32>, gains: Tensor<Float>, featureDimensions: Tensor<Int32>, thresholds: Tensor<Int32>, leftNodeContribs: Tensor<Float>, rightNodeContribs: Tensor<Float>, splitWithDefaultDirections: StringTensor) {
2741+
let ret: (TensorHandle<Int32>, TensorHandle<Float>, TensorHandle<Int32>, TensorHandle<Int32>, TensorHandle<Float>, TensorHandle<Float>, TensorHandle<String>) = #tfop("BoostedTreesCalculateBestFeatureSplit",
2742+
nodeIdRange,
2743+
statsSummary,
2744+
l1,
2745+
l2,
2746+
treeComplexity,
2747+
minNodeWeight,
2748+
logits_dimension: logitsDimension,
2749+
split_type: splitType.cName)
2750+
return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3), Tensor(handle: ret.4), Tensor(handle: ret.5), StringTensor(handle: ret.6))
2751+
}
2752+
26122753
/// Makes the summary of accumulated stats for the batch.
26132754
///
26142755
/// The summary stats contains gradients and hessians accumulated into the corresponding node and bucket for each example.
@@ -3045,11 +3186,15 @@ public static func collectiveReduce<T: Numeric & TensorFlowScalar>(
30453186
/// - score_threshold: A 0-D float tensor representing the threshold for deciding when to remove
30463187
/// boxes based on score.
30473188
///
3048-
/// - Attr pad_per_class: If false, the output nmsed boxes, scores and classes
3049-
/// are padded/clipped to `max_total_size`. If true, the
3050-
/// output nmsed boxes, scores and classes are padded to be of length
3051-
/// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
3052-
/// which case it is clipped to `max_total_size`. Defaults to false.
3189+
/// - Attrs:
3190+
/// - pad_per_class: If false, the output nmsed boxes, scores and classes
3191+
/// are padded/clipped to `max_total_size`. If true, the
3192+
/// output nmsed boxes, scores and classes are padded to be of length
3193+
/// `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in
3194+
/// which case it is clipped to `max_total_size`. Defaults to false.
3195+
/// - clip_boxes: If true, assume the box coordinates are between [0, 1] and clip the output boxes
3196+
/// if they fall beyond [0, 1]. If false, do not do clipping and output the box
3197+
/// coordinates as it is.
30533198
///
30543199
/// - Outputs:
30553200
/// - nmsed_boxes: A [batch_size, max_detections, 4] float32 tensor
@@ -3070,7 +3215,8 @@ public static func combinedNonMaxSuppression(
30703215
maxTotalSize: Tensor<Int32>,
30713216
iouThreshold: Tensor<Float>,
30723217
scoreThreshold: Tensor<Float>,
3073-
padPerClass: Bool = false
3218+
padPerClass: Bool = false,
3219+
clipBoxes: Bool = true
30743220
) -> (nmsedBoxes: Tensor<Float>, nmsedScores: Tensor<Float>, nmsedClasses: Tensor<Float>, validDetections: Tensor<Int32>) {
30753221
let ret: (TensorHandle<Float>, TensorHandle<Float>, TensorHandle<Float>, TensorHandle<Int32>) = #tfop("CombinedNonMaxSuppression",
30763222
boxes,
@@ -3079,7 +3225,8 @@ public static func combinedNonMaxSuppression(
30793225
maxTotalSize,
30803226
iouThreshold,
30813227
scoreThreshold,
3082-
pad_per_class: padPerClass)
3228+
pad_per_class: padPerClass,
3229+
clip_boxes: clipBoxes)
30833230
return (Tensor(handle: ret.0), Tensor(handle: ret.1), Tensor(handle: ret.2), Tensor(handle: ret.3))
30843231
}
30853232

@@ -6173,6 +6320,42 @@ public static func drawBoundingBoxes<T: FloatingPoint & TensorFlowScalar>(
61736320
return Tensor(handle: ret)
61746321
}
61756322

6323+
/// Draw bounding boxes on a batch of images.
6324+
///
6325+
/// Outputs a copy of `images` but draws on top of the pixels zero or more bounding
6326+
/// boxes specified by the locations in `boxes`. The coordinates of the each
6327+
/// bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The
6328+
/// bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and
6329+
/// height of the underlying image.
6330+
///
6331+
/// For example, if an image is 100 x 200 pixels (height x width) and the bounding
6332+
/// box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of
6333+
/// the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates).
6334+
///
6335+
/// Parts of the bounding box may fall outside the image.
6336+
///
6337+
/// - Parameters:
6338+
/// - images: 4-D with shape `[batch, height, width, depth]`. A batch of images.
6339+
/// - boxes: 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding
6340+
/// boxes.
6341+
/// - colors: 2-D. A list of RGBA colors to cycle through for the boxes.
6342+
///
6343+
/// - Output output: 4-D with the same shape as `images`. The batch of input images with
6344+
/// bounding boxes drawn on the images.
6345+
@inlinable @inline(__always)
6346+
public static func drawBoundingBoxesV2<T: FloatingPoint & TensorFlowScalar>(
6347+
images: Tensor<T>,
6348+
boxes: Tensor<Float>,
6349+
colors: Tensor<Float>
6350+
) -> Tensor<T> {
6351+
let ret: TensorHandle<T> = #tfop("DrawBoundingBoxesV2",
6352+
images,
6353+
boxes,
6354+
colors,
6355+
T$dtype: T.tensorFlowDataType)
6356+
return Tensor(handle: ret)
6357+
}
6358+
61766359
/// Interleave the values from the `data` tensors into a single tensor.
61776360
///
61786361
/// Builds a merged tensor such that
@@ -6741,7 +6924,8 @@ public static func enqueueTPUEmbeddingSparseTensorBatch<T1: BinaryInteger & Tens
67416924
modeOverride: StringTensor,
67426925
deviceOrdinal: Int64 = -1,
67436926
combiners: [String],
6744-
tableIds: [Int32]
6927+
tableIds: [Int32],
6928+
maxSequenceLengths: [Int32]
67456929
) {
67466930
return #tfop("EnqueueTPUEmbeddingSparseTensorBatch",
67476931
sampleIndices,
@@ -6753,7 +6937,8 @@ public static func enqueueTPUEmbeddingSparseTensorBatch<T1: BinaryInteger & Tens
67536937
T3$dtype: T3.tensorFlowDataType,
67546938
device_ordinal: deviceOrdinal,
67556939
combiners: combiners,
6756-
table_ids: tableIds)
6940+
table_ids: tableIds,
6941+
max_sequence_lengths: maxSequenceLengths)
67576942
}
67586943

67596944
/// Creates or finds a child frame, and makes `data` available to the child frame.
@@ -18604,7 +18789,7 @@ public static func softsignGrad<T: FloatingPoint & TensorFlowScalar>(
1860418789
/// The output tensor has shape `[4, 1, 1, 3]` and value:
1860518790
///
1860618791
/// ```
18607-
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
18792+
/// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1860818793
/// ```
1860918794
///
1861018795
/// (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2:
@@ -18738,7 +18923,7 @@ public static func spaceToBatch<T: TensorFlowScalar, Tpaddings: BinaryInteger &
1873818923
/// The output tensor has shape `[4, 1, 1, 3]` and value:
1873918924
///
1874018925
/// ```
18741-
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
18926+
/// [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
1874218927
/// ```
1874318928
///
1874418929
/// (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and
@@ -22007,12 +22192,12 @@ public static func tensorScatterUpdate<T: TensorFlowScalar, Tindices: BinaryInte
2200722192

2200822193
/// Assign `value` to the sliced l-value reference of `input`.
2200922194
///
22010-
/// The values of `value` are assigned to the positions in the tensor
22011-
/// `input` that are selected by the slice parameters. The slice parameters
22012-
/// `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`.
22195+
/// The values of `value` are assigned to the positions in the tensor `input` that
22196+
/// are selected by the slice parameters. The slice parameters `begin` `end`
22197+
/// `strides` etc. work exactly as in `StridedSlice`.
2201322198
///
22014-
/// NOTE this op currently does not support broadcasting and so `value`'s
22015-
/// shape must be exactly the shape produced by the slice of `input`.
22199+
/// NOTE this op currently does not support broadcasting and so `value`'s shape
22200+
/// must be exactly the shape produced by the slice of `input`.
2201622201
@inlinable @inline(__always)
2201722202
public static func tensorStridedSliceUpdate<T: TensorFlowScalar, Index: BinaryInteger & TensorFlowScalar>(
2201822203
_ input: Tensor<T>,
@@ -22025,8 +22210,8 @@ public static func tensorStridedSliceUpdate<T: TensorFlowScalar, Index: BinaryIn
2202522210
ellipsisMask: Int64 = 0,
2202622211
newAxisMask: Int64 = 0,
2202722212
shrinkAxisMask: Int64 = 0
22028-
) {
22029-
return #tfop("TensorStridedSliceUpdate",
22213+
) -> Tensor<T> {
22214+
let ret: TensorHandle<T> = #tfop("TensorStridedSliceUpdate",
2203022215
input,
2203122216
begin,
2203222217
end,
@@ -22039,6 +22224,7 @@ public static func tensorStridedSliceUpdate<T: TensorFlowScalar, Index: BinaryIn
2203922224
ellipsis_mask: ellipsisMask,
2204022225
new_axis_mask: newAxisMask,
2204122226
shrink_axis_mask: shrinkAxisMask)
22227+
return Tensor(handle: ret)
2204222228
}
2204322229

2204422230
/// Outputs a `Summary` protocol buffer with a tensor.

0 commit comments

Comments
 (0)