-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[mlgo] Support composite AOT-ed models #96276
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
35e6530
8551dbf
b9c31b5
377a825
d1648b6
929072a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,10 +7,12 @@ | |
//===----------------------------------------------------------------------===// | ||
|
||
#include "llvm/Analysis/MLModelRunner.h" | ||
#include "llvm/ADT/StringExtras.h" | ||
#include "llvm/Analysis/InteractiveModelRunner.h" | ||
#include "llvm/Analysis/NoInferenceModelRunner.h" | ||
#include "llvm/Analysis/ReleaseModeModelRunner.h" | ||
#include "llvm/Support/BinaryByteStream.h" | ||
#include "llvm/Support/ErrorHandling.h" | ||
#include "llvm/Support/FileSystem.h" | ||
#include "llvm/Support/FileUtilities.h" | ||
#include "llvm/Support/JSON.h" | ||
|
@@ -28,28 +30,31 @@ namespace llvm { | |
// This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors | ||
// of shape {1}, and 'evaluation' adds them. | ||
// The interface is the one expected by ReleaseModelRunner. | ||
class MockAOTModel final { | ||
class MockAOTModelBase { | ||
protected: | ||
int64_t A = 0; | ||
int64_t B = 0; | ||
int64_t R = 0; | ||
|
||
public: | ||
MockAOTModel() = default; | ||
int LookupArgIndex(const std::string &Name) { | ||
MockAOTModelBase() = default; | ||
virtual ~MockAOTModelBase() = default; | ||
|
||
virtual int LookupArgIndex(const std::string &Name) { | ||
if (Name == "prefix_a") | ||
return 0; | ||
if (Name == "prefix_b") | ||
return 1; | ||
return -1; | ||
} | ||
int LookupResultIndex(const std::string &) { return 0; } | ||
void Run() { R = A + B; } | ||
void *result_data(int RIndex) { | ||
virtual void Run() = 0; | ||
virtual void *result_data(int RIndex) { | ||
if (RIndex == 0) | ||
return &R; | ||
return nullptr; | ||
} | ||
void *arg_data(int Index) { | ||
virtual void *arg_data(int Index) { | ||
switch (Index) { | ||
case 0: | ||
return &A; | ||
|
@@ -60,6 +65,64 @@ class MockAOTModel final { | |
} | ||
} | ||
}; | ||
|
||
class AdditionAOTModel final : public MockAOTModelBase { | ||
public: | ||
AdditionAOTModel() = default; | ||
void Run() override { R = A + B; } | ||
}; | ||
|
||
class DiffAOTModel final : public MockAOTModelBase { | ||
public: | ||
DiffAOTModel() = default; | ||
void Run() override { R = A - B; } | ||
}; | ||
|
||
static const char *M1Selector = "the model that subtracts"; | ||
static const char *M2Selector = "the model that adds"; | ||
|
||
static MD5::MD5Result Hash1 = MD5::hash(arrayRefFromStringRef(M1Selector)); | ||
static MD5::MD5Result Hash2 = MD5::hash(arrayRefFromStringRef(M2Selector)); | ||
class ComposedAOTModel final { | ||
DiffAOTModel M1; | ||
AdditionAOTModel M2; | ||
uint64_t Selector[2] = {0}; | ||
|
||
bool isHashSameAsSelector(const std::pair<uint64_t, uint64_t> &Words) const { | ||
return Selector[0] == Words.first && Selector[1] == Words.second; | ||
} | ||
MockAOTModelBase *getModel() { | ||
if (isHashSameAsSelector(Hash1.words())) | ||
return &M1; | ||
if (isHashSameAsSelector(Hash2.words())) | ||
return &M2; | ||
llvm_unreachable("Should be one of the two"); | ||
} | ||
|
||
public: | ||
ComposedAOTModel() = default; | ||
int LookupArgIndex(const std::string &Name) { | ||
if (Name == "prefix__model_selector") | ||
return 2; | ||
return getModel()->LookupArgIndex(Name); | ||
} | ||
int LookupResultIndex(const std::string &Name) { | ||
return getModel()->LookupResultIndex(Name); | ||
} | ||
void *arg_data(int Index) { | ||
if (Index == 2) | ||
return Selector; | ||
return getModel()->arg_data(Index); | ||
} | ||
void *result_data(int RIndex) { return getModel()->result_data(RIndex); } | ||
void Run() { getModel()->Run(); } | ||
}; | ||
|
||
static EmbeddedModelRunnerOptions makeOptions() { | ||
EmbeddedModelRunnerOptions Opts; | ||
Opts.setFeedPrefix("prefix_"); | ||
return Opts; | ||
} | ||
} // namespace llvm | ||
|
||
TEST(NoInferenceModelRunner, AccessTensors) { | ||
|
@@ -86,8 +149,8 @@ TEST(ReleaseModeRunner, NormalUse) { | |
LLVMContext Ctx; | ||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1})}; | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>( | ||
Ctx, Inputs, "", "prefix_"); | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>( | ||
Ctx, Inputs, "", makeOptions()); | ||
*Evaluator->getTensor<int64_t>(0) = 1; | ||
*Evaluator->getTensor<int64_t>(1) = 2; | ||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3); | ||
|
@@ -100,8 +163,8 @@ TEST(ReleaseModeRunner, ExtraFeatures) { | |
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1}), | ||
TensorSpec::createSpec<int64_t>("c", {1})}; | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>( | ||
Ctx, Inputs, "", "prefix_"); | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>( | ||
Ctx, Inputs, "", makeOptions()); | ||
*Evaluator->getTensor<int64_t>(0) = 1; | ||
*Evaluator->getTensor<int64_t>(1) = 2; | ||
*Evaluator->getTensor<int64_t>(2) = -3; | ||
|
@@ -118,8 +181,8 @@ TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) { | |
TensorSpec::createSpec<int64_t>("c", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1}), | ||
}; | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>( | ||
Ctx, Inputs, "", "prefix_"); | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>( | ||
Ctx, Inputs, "", makeOptions()); | ||
*Evaluator->getTensor<int64_t>(0) = 1; // a | ||
*Evaluator->getTensor<int64_t>(1) = 2; // c | ||
*Evaluator->getTensor<int64_t>(2) = -3; // b | ||
|
@@ -129,6 +192,56 @@ TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) { | |
EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3); | ||
} | ||
|
||
// We expect an error to be reported early if the user tried to specify a model | ||
// selector, but the model in fact doesn't support that. | ||
TEST(ReleaseModelRunner, ModelSelectorNoInputFeaturePresent) { | ||
LLVMContext Ctx; | ||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1})}; | ||
EXPECT_DEATH(std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>( | ||
Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)), | ||
"A model selector was specified but the underlying model does " | ||
"not expose a _model_selector input"); | ||
} | ||
|
||
TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) { | ||
LLVMContext Ctx; | ||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1})}; | ||
EXPECT_DEATH( | ||
std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>( | ||
Ctx, Inputs, "", makeOptions()), | ||
"A model selector was not specified but the underlying model requires " | ||
"selecting one because it exposes a _model_selector input"); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @mtrofin I'm getting build warnings on MSVC due to these EXPECT_DEATH with make_unique calls - anyway that you can refactor to avoid them please?
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We can exclude these for msvc. Is it urgent? If not, I can take care of it next week if that's ok (holiday) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not urgent, if its OK with you I'll just add (void) which seems to silence it? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. By all means - thanks! |
||
} | ||
|
||
// Test that we correctly set up the _model_selector tensor value. We are only | ||
// responsbile for what happens if the user doesn't specify a value (but the | ||
// model supports the feature), or if the user specifies one, and we correctly | ||
// populate the tensor, and do so upfront (in case the model implementation | ||
// needs that for subsequent tensor buffer lookups). | ||
TEST(ReleaseModelRunner, ModelSelector) { | ||
LLVMContext Ctx; | ||
std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}), | ||
TensorSpec::createSpec<int64_t>("b", {1})}; | ||
// This explicitly asks for M1 | ||
auto Evaluator = std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>( | ||
Ctx, Inputs, "", makeOptions().setModelSelector(M1Selector)); | ||
*Evaluator->getTensor<int64_t>(0) = 1; | ||
*Evaluator->getTensor<int64_t>(1) = 2; | ||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), -1); | ||
|
||
// Ask for M2 | ||
Evaluator = std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>( | ||
Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)); | ||
*Evaluator->getTensor<int64_t>(0) = 1; | ||
*Evaluator->getTensor<int64_t>(1) = 2; | ||
EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3); | ||
|
||
// Asking for a model that's not supported isn't handled by our infra and we | ||
// expect the model implementation to fail at a point. | ||
} | ||
|
||
#if defined(LLVM_ON_UNIX) | ||
TEST(InteractiveModelRunner, Evaluation) { | ||
LLVMContext Ctx; | ||
|
Uh oh!
There was an error while loading. Please reload this page.