Skip to content

Added tf dtype generics support in eager mode. #19588

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Sep 28, 2018
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 14 additions & 9 deletions include/swift/Runtime/RuntimeFunctions.def
Original file line number Diff line number Diff line change
Expand Up @@ -1362,6 +1362,11 @@ FUNCTION(TFC_CreateScalarFloatTensor, swift_tfc_CreateScalarFloatTensor, C_CC,
ARGS(Int32Ty),
ATTRS(NoUnwind))

FUNCTION(TFC_CreateScalarIntTensor, swift_tfc_CreateScalarIntTensor, C_CC,
RETURNS(Int8PtrTy),
ARGS(Int64Ty, Int32Ty, Int8PtrTy),
ATTRS(NoUnwind))

FUNCTION(TFE_Execute, swift_tfc_TFE_Execute, C_CC,
RETURNS(),
ARGS(Int8PtrTy, Int8PtrPtrTy, Int32PtrTy, Int8PtrTy),
Expand All @@ -1373,15 +1378,15 @@ FUNCTION(TFC_GetGlobalEagerContext, _swift_tfc_GetGlobalEagerContext, C_CC,
ATTRS(NoUnwind))

// TODO: enable these decls once we have AnyTensorHandle.
// FUNCTION(TFC_ExtractFloatCTensorHandle, _swift_tfc_ExtractFloatCTensorHandle, C_CC,
// RETURNS(Int8PtrTy),
// ARGS(Int8PtrTy),
// ATTRS(NoUnwind))

// FUNCTION(TFC_CreateFloatTensorHandleFromCTensorHandle, _swift_tfc_CreateFloatTensorHandleFromCTensorHandle, C_CC,
// RETURNS(Int8PtrTy),
// ARGS(Int8PtrTy),
// ATTRS(NoUnwind))
FUNCTION(TFC_GetCTensorHandleFromSwift, _swift_tfc_GetCTensorHandleFromSwift, C_CC,
RETURNS(Int8PtrTy),
ARGS(Int8PtrTy),
ATTRS(NoUnwind))

FUNCTION(TFC_CreateTensorHandleFromC, _swift_tfc_CreateTensorHandleFromC, C_CC,
RETURNS(Int8PtrTy),
ARGS(Int8PtrTy),
ATTRS(NoUnwind))

FUNCTION(TFC_CheckOk, _swift_tfc_CheckOk, C_CC,
RETURNS(),
Expand Down
47 changes: 31 additions & 16 deletions lib/IRGen/IRGenSIL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1968,8 +1968,6 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) {
// 2. Run the graph_op
// 3. Set the output tensor handles via setLoweredExplosion()

auto &silModule = CurSILFn->getModule();

auto *TFNewStatusFn = IGM.getTF_NewStatusFn();
auto status = Builder.CreateCall(TFNewStatusFn, {});

Expand Down Expand Up @@ -2015,8 +2013,7 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) {

auto tensorHandleValue =
getLoweredSingletonExplosion(tensorHandleSilValue);
llvm::Function *extractHandleFn =
findFunction("_swift_tfc_ExtractFloatCTensorHandle", silModule);
auto *extractHandleFn = IGM.getTFC_GetCTensorHandleFromSwiftFn();
auto cHandle = Builder.CreateCall(extractHandleFn, {tensorHandleValue});

// Add an op input as in:
Expand Down Expand Up @@ -2091,18 +2088,37 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) {
i->dump();
llvm_unreachable("dtype attr must have been processed!");
}
if (attr.value.getKind() != SymbolicValue::Float) {

llvm::Value *tensor = nullptr;
switch (attr.value.getKind()) {
case SymbolicValue::Float: {
auto apfloat = attr.value.getFloatValue();
// CreateScalarFloatTensor() takes an int instead of float, as runtime
// functions that take/return float values do not yet exist.
auto constVal =
llvm::ConstantInt::get(IGM.Int32Ty, apfloat.convertToFloat());
LLVM_DEBUG(llvm::dbgs() << "The const value is " << *constVal << ".\n");

auto *createTensorFn = IGM.getTFC_CreateScalarFloatTensorFn();
tensor = Builder.CreateCall(createTensorFn, {constVal});
break;
}
case SymbolicValue::Integer: {
auto apint = attr.value.getIntegerValue();
auto constVal = llvm::ConstantInt::get(
IGM.Int64Ty, apint.sextOrTrunc(64).getLimitedValue());
LLVM_DEBUG(llvm::dbgs() << "The const value is " << *constVal << ".\n");

auto *createTensorFn = IGM.getTFC_CreateScalarIntTensorFn();
tensor = Builder.CreateCall(
createTensorFn,
{constVal, llvm::ConstantInt::get(IGM.Int32Ty, dtypeAttr), status});
checkOk(status);
break;
}
default:
llvm_unreachable("TODO: support other dtypes for tensor attr.");
}
auto apfloat = attr.value.getFloatValue();
// CreateScalarFloatTensor() takes an int instead of float, as runtime
// functions that take/return float values do not yet exist.
auto constVal =
llvm::ConstantInt::get(IGM.Int32Ty, apfloat.convertToFloat());
LLVM_DEBUG(llvm::dbgs() << "The const value is " << *constVal << ".\n");

auto *createTensorFn = IGM.getTFC_CreateScalarFloatTensorFn();
auto tensor = Builder.CreateCall(createTensorFn, {constVal});

// Set up the tensor-typed value attr as in:
// TFE_OpSetAttrTensor(op, "value", tensor, status);
Expand Down Expand Up @@ -2167,8 +2183,7 @@ void IRGenSILFunction::visitGraphOperationInst(GraphOperationInst *i) {
<< ".\n");

// Wrap `cTensorHandle` into a TensorHandle<T> object.
llvm::Function *createHandleFn = findFunction(
"_swift_tfc_CreateFloatTensorHandleFromCTensorHandle", silModule);
auto *createHandleFn = IGM.getTFC_CreateTensorHandleFromCFn();
auto tensorHandle = Builder.CreateCall(createHandleFn, {cTensorHandle});

LLVM_DEBUG(
Expand Down
40 changes: 40 additions & 0 deletions stdlib/public/CTensorFlow/ctensorflow_init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,46 @@ void *swift_tfc_CreateScalarFloatTensor(int32_t val) {
return tensor;
}

void *swift_tfc_CreateScalarIntTensor(int64_t val, int32_t dtype,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure why ints and floats have to be in different code paths. Can you explain a bit more, and also add some doc comments at the declaration site?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The float version should take as input a float or double value, instead of int64_t val, so there should be different C APIs for them.

BTW, currently we only have integer related data types like IGM.Int32Ty, but there is no IGM.FloatTy. That can be added though.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be useful to add doc documents, because these arguments' types do not directly reflect the underlying data type, and would require the call and the function implementation to cast things back and forth.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done.

TF_Status *status) {
auto tfDtype = (TF_DataType)dtype;
auto *tensor =
TF_AllocateTensor(tfDtype, /*shape.data()*/ nullptr, /*shape.size()*/ 0,
TF_DataTypeSize(tfDtype) * 1);
auto *ptr = reinterpret_cast<char *>(TF_TensorData(tensor));

switch (tfDtype) {
case TF_INT8:
*reinterpret_cast<int8_t *>(ptr) = static_cast<int8_t>(val);
break;
case TF_UINT8:
*reinterpret_cast<uint8_t *>(ptr) = static_cast<uint8_t>(val);
break;
case TF_INT16:
*reinterpret_cast<int16_t *>(ptr) = static_cast<int16_t>(val);
break;
case TF_UINT16:
*reinterpret_cast<uint16_t *>(ptr) = static_cast<uint16_t>(val);
break;
case TF_INT32:
*reinterpret_cast<int32_t *>(ptr) = static_cast<int32_t>(val);
break;
case TF_UINT32:
*reinterpret_cast<uint32_t *>(ptr) = static_cast<uint32_t>(val);
break;
case TF_INT64:
*reinterpret_cast<int64_t *>(ptr) = static_cast<int64_t>(val);
break;
case TF_UINT64:
*reinterpret_cast<uint64_t *>(ptr) = static_cast<uint64_t>(val);
break;
default:
TF_MakeInternalErrorStatus(status, "Unsupported data type");
return nullptr;
}
return tensor;
}

void swift_tfc_TFE_Execute(void *op, void **retvals, int32_t *num_retvals,
void *status) {
int int_num_retvals = *num_retvals;
Expand Down
4 changes: 4 additions & 0 deletions stdlib/public/CTensorFlow/ctensorflow_init.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ extern void InitTensorFlowRuntime(unsigned char enable_debug_logging,
// TODO: Generalize to create tensors from other shapes and dtypes.
void *swift_tfc_CreateScalarFloatTensor(int32_t val);

struct TF_Status;
void *swift_tfc_CreateScalarIntTensor(int64_t val, int32_t dtype,
TF_Status *status);

void swift_tfc_TFE_Execute(void *op, void **retvals, int32_t *num_retvals,
void *status);

Expand Down
24 changes: 3 additions & 21 deletions stdlib/public/TensorFlow/CompilerRuntime.swift
Original file line number Diff line number Diff line change
Expand Up @@ -1156,19 +1156,9 @@ func _TFCGetGlobalEagerContext() -> CTFEContext {
return _ExecutionContext.global.eagerContext
}

// TODO: replace these functions with generic ones that do not hard-code Float.

// TODO: use @_cdecl instead, once we make the input/output data types C-compatible.
// Current compiler error if we use @_cdecl: method cannot be marked @_cdecl
// because the type of the parameter cannot be represented in Objective-C
@inlinable
@_silgen_name("_swift_tfc_ExtractFloatCTensorHandle")
public func _TFCExtractCTensorHandle(
_ handle: TensorHandle<Float>
) -> CTensorHandle {
return handle.cTensorHandle
}

// Some of the functions are marked with @silgen_name instead of @_cdecl,
// because their input/output data types are not C-compatible
// (e.g. AnyTensorHandle).
@inlinable
@_silgen_name("_swift_tfc_GetCTensorHandleFromSwift")
public func _TFCGetCTensorHandleFromSwift(
Expand Down Expand Up @@ -1200,14 +1190,6 @@ public func _TFCCreateTensorHandleFromC(
}
}

@inlinable
@_silgen_name("_swift_tfc_CreateFloatTensorHandleFromCTensorHandle")
public func _TFCCreateTensorHandleFromCTensorHandle(
_ ownedCHandle: CTensorHandle
) -> TensorHandle<Float> {
return TensorHandle<Float>(owning: ownedCHandle)
}

@usableFromInline
@_cdecl("_swift_tfc_CheckOk")
func _TFCCheckOk(_ s: CTFStatus) {
Expand Down
20 changes: 19 additions & 1 deletion test/TensorFlowRuntime/dynamic_compilation.swift
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ DynamicCompilationTests.testCPUOrGPU("Const") {
expectNearlyEqualWithScalarTensor(1.0, Tensor<Float>(handle: x))
}

DynamicCompilationTests.testCPUOrGPU("Add") {
DynamicCompilationTests.testCPUOrGPU("AddFloat") {
_RuntimeConfig.printsDebugLog = true
let x = Tensor<Float>(1.0)
let y = Tensor<Float>(2.0)
Expand All @@ -32,6 +32,24 @@ DynamicCompilationTests.testCPUOrGPU("Add") {
expectNearlyEqualWithScalarTensor(3.0, z)
}

DynamicCompilationTests.testCPUOrGPU("AddInt64") {
_RuntimeConfig.printsDebugLog = true
let x = Tensor<Int64>(1)
let y = Tensor<Int64>(2)
let z = x + y
_hostOp(z)
expectEqualWithScalarTensor(3, z)
}

DynamicCompilationTests.testCPUOrGPU("AddInt32") {
_RuntimeConfig.printsDebugLog = true
let x = Tensor<Int32>(1)
let y = Tensor<Int32>(2)
let z = x + y
_hostOp(z)
expectEqualWithScalarTensor(3, z)
}

#endif // !CUDA

runAllTests()
2 changes: 1 addition & 1 deletion utils/update_checkout/update-checkout-config.json
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@
"swift-integration-tests": "swift-DEVELOPMENT-SNAPSHOT-2018-08-06-a",
"swift-xcode-playground-support": "swift-DEVELOPMENT-SNAPSHOT-2018-08-06-a",
"ninja": "253e94c1fa511704baeb61cf69995bbf09ba435e",
"tensorflow": "b5594e6121e902f8dd2d5127653a1ec5f97daccd",
"tensorflow": "bdab0b3c111bbe1c9656fa2228f1a4d28df5a7bf",
"tensorflow-swift-bindings": "e1983bdac0c64ba02f8c5c850f7c82436b5622e5"
}
}
Expand Down