Skip to content

Commit 036d7e0

Browse files
committed
Update
[ghstack-poisoned]
1 parent a971a5a commit 036d7e0

24 files changed

+195
-203
lines changed

extension/llm/custom_ops/op_sdpa.cpp

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -594,46 +594,46 @@ bool validate_flash_attention_args(
594594
const Tensor& key,
595595
const Tensor& value,
596596
const optional<Tensor>& attn_mask) {
597-
ET_LOG_MSG_AND_RETURN_IF_FALSE(query.dim() == 4, "query must be a 4D tensor");
598-
ET_LOG_MSG_AND_RETURN_IF_FALSE(key.dim() == 4, "key must be a 4D tensor");
599-
ET_LOG_MSG_AND_RETURN_IF_FALSE(value.dim() == 4, "value must be a 4D tensor");
597+
ET_CHECK_OR_RETURN_FALSE(query.dim() == 4, "query must be a 4D tensor");
598+
ET_CHECK_OR_RETURN_FALSE(key.dim() == 4, "key must be a 4D tensor");
599+
ET_CHECK_OR_RETURN_FALSE(value.dim() == 4, "value must be a 4D tensor");
600600

601601
// Sizes
602-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
602+
ET_CHECK_OR_RETURN_FALSE(
603603
(query.size(3) == value.size(3)) && (key.size(3) == value.size(3)),
604604
"scaled_dot_product_attention_flash_attention: Q/K/V should have the same head size");
605605

606-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
606+
ET_CHECK_OR_RETURN_FALSE(
607607
(query.scalar_type() == ScalarType::Float), "Query must be Float type");
608608

609-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
609+
ET_CHECK_OR_RETURN_FALSE(
610610
(query.scalar_type() == key.scalar_type()) &&
611611
(query.scalar_type() == value.scalar_type()),
612612
"Key and Value must have the same data type as Query");
613613

614-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
614+
ET_CHECK_OR_RETURN_FALSE(
615615
!attn_mask.has_value() || attn_mask.value().dim() == 2,
616616
"Attention mask must be a 2D tensor");
617617

618-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
618+
ET_CHECK_OR_RETURN_FALSE(
619619
!attn_mask.has_value() ||
620620
attn_mask.value().scalar_type() == query.scalar_type(),
621621
"Attention mask must be a 2D tensor");
622622

623-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
623+
ET_CHECK_OR_RETURN_FALSE(
624624
is_contiguous_dim_order(query.dim_order().data(), query.dim()),
625625
"key cache must be in contiguous dim order");
626626

627-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
627+
ET_CHECK_OR_RETURN_FALSE(
628628
is_contiguous_dim_order(key.dim_order().data(), key.dim()),
629629
"value cache must be in contiguous dim order");
630630

631-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
631+
ET_CHECK_OR_RETURN_FALSE(
632632
is_contiguous_dim_order(value.dim_order().data(), value.dim()),
633633
"value cache must be in contiguous dim order");
634634

635635
if (attn_mask.has_value()) {
636-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
636+
ET_CHECK_OR_RETURN_FALSE(
637637
is_contiguous_dim_order(
638638
attn_mask.value().dim_order().data(), attn_mask.value().dim()),
639639
"value cache must be in contiguous dim order");
@@ -647,21 +647,19 @@ bool validate_cache_params(
647647
const Tensor& v_cache,
648648
int64_t start_pos,
649649
int64_t seq_length) {
650-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
651-
k_cache.dim() == 4, "kcache must be a 4D tensor");
650+
ET_CHECK_OR_RETURN_FALSE(k_cache.dim() == 4, "kcache must be a 4D tensor");
652651

653-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
654-
v_cache.dim() == 4, "v_cache must be a 4D tensor");
652+
ET_CHECK_OR_RETURN_FALSE(v_cache.dim() == 4, "v_cache must be a 4D tensor");
655653

656-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
654+
ET_CHECK_OR_RETURN_FALSE(
657655
start_pos < k_cache.size(1),
658656
"start_pos must be less than key cache at dim 1");
659657

660-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
658+
ET_CHECK_OR_RETURN_FALSE(
661659
start_pos < v_cache.size(1),
662660
"start_pos must be less than value cache at dim 1");
663661

664-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
662+
ET_CHECK_OR_RETURN_FALSE(
665663
(start_pos + seq_length) <= k_cache.size(1),
666664
"start_post + seq_length must be less than max seq length supported by key cache."
667665
"start pos: %" PRId64 ", seq_length: %" PRId64
@@ -671,7 +669,7 @@ bool validate_cache_params(
671669
seq_length,
672670
k_cache.size(1));
673671

674-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
672+
ET_CHECK_OR_RETURN_FALSE(
675673
(start_pos + seq_length) <= v_cache.size(1),
676674
"start_post + seq_length must be less than max seq length supported by key cache."
677675
"start pos: %" PRId64 ", seq_length: %" PRId64
@@ -682,11 +680,11 @@ bool validate_cache_params(
682680
v_cache.size(1));
683681

684682
// Make sure they are in contiguous dim order
685-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
683+
ET_CHECK_OR_RETURN_FALSE(
686684
is_contiguous_dim_order(k_cache.dim_order().data(), k_cache.dim()),
687685
"key cache must be in contiguous dim order");
688686

689-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
687+
ET_CHECK_OR_RETURN_FALSE(
690688
is_contiguous_dim_order(v_cache.dim_order().data(), v_cache.dim()),
691689
"value cache must be in contiguous dim order");
692690

extension/llm/custom_ops/op_update_cache.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,17 +25,17 @@ bool validate_cache_params(
2525
const Tensor& quantized_cache,
2626
int64_t start_pos,
2727
int64_t seq_length) {
28-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
28+
ET_CHECK_OR_RETURN_FALSE(
2929
quantized_cache.dim() == 4, "quantized cache must be a 4D tensor");
3030

31-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
31+
ET_CHECK_OR_RETURN_FALSE(
3232
quantized_value.dim() == 4, "quantized_value must be a 4D tensor");
3333

34-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
34+
ET_CHECK_OR_RETURN_FALSE(
3535
start_pos < quantized_cache.size(1),
3636
"start_pos must be less than cache size at dim 1");
3737

38-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
38+
ET_CHECK_OR_RETURN_FALSE(
3939
(start_pos + seq_length) <= quantized_cache.size(1),
4040
"start_post + seq_length must be less than max seq length supported by cache."
4141
"start pos: %" PRId64 ", seq_length: %" PRId64
@@ -46,12 +46,12 @@ bool validate_cache_params(
4646
quantized_cache.size(1));
4747

4848
// Make sure they are in contiguous dim order
49-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
49+
ET_CHECK_OR_RETURN_FALSE(
5050
is_contiguous_dim_order(
5151
quantized_cache.dim_order().data(), quantized_cache.dim()),
5252
"quantized cache must be in contiguous dim order");
5353

54-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
54+
ET_CHECK_OR_RETURN_FALSE(
5555
is_contiguous_dim_order(
5656
quantized_value.dim_order().data(), quantized_value.dim()),
5757
"quantized value must be in contiguous dim order");

kernels/optimized/cpu/op_bmm.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -31,39 +31,38 @@ namespace {
3131
// Verifies that the parameters are valid.
3232
bool check_bmm_out_args(const Tensor& self, const Tensor& mat2, Tensor& out) {
3333
// Ensure dimensions is 3 for all input and out
34-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
34+
ET_CHECK_OR_RETURN_FALSE(
3535
self.dim() == mat2.dim(),
3636
"self.dim() %zd != mat2.dim() %zd",
3737
self.dim(),
3838
mat2.dim());
39-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
39+
ET_CHECK_OR_RETURN_FALSE(
4040
self.dim() == out.dim(),
4141
"self.dim() %zd != out.dim() %zd",
4242
self.dim(),
4343
out.dim());
44-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
45-
self.dim() == 3, "self.dim() %zd != 3", self.dim());
44+
ET_CHECK_OR_RETURN_FALSE(self.dim() == 3, "self.dim() %zd != 3", self.dim());
4645
// Ensure batch larger than or equals to 0
47-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
46+
ET_CHECK_OR_RETURN_FALSE(
4847
self.size(0) >= 0, "self.size(0) %zd < 0", self.size(0));
4948
// Ensure batches are the same
50-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
49+
ET_CHECK_OR_RETURN_FALSE(
5150
self.size(0) == mat2.size(0),
5251
"self.size(0) %zd != mat2.size(0) %zd",
5352
self.size(0),
5453
mat2.size(0));
55-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
54+
ET_CHECK_OR_RETURN_FALSE(
5655
self.size(0) == out.size(0),
5756
"self.size(0) %zd != out.size(0) %zd",
5857
self.size(0),
5958
out.size(0));
6059
// Ensure the out size is compatible with input tensors
61-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
60+
ET_CHECK_OR_RETURN_FALSE(
6261
mat2.size(2) == out.size(2),
6362
"mat2.size(2) %zd != out.size(2) %zd",
6463
mat2.size(2),
6564
out.size(2));
66-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
65+
ET_CHECK_OR_RETURN_FALSE(
6766
self.size(1) == out.size(1),
6867
"self.size(1) %zd != out.size(1) %zd",
6968
self.size(1),

kernels/portable/cpu/op_convolution_backward.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,9 +38,9 @@ bool check_convolution_backward_args(
3838
Tensor& grad_input,
3939
Tensor& grad_weight,
4040
Tensor& grad_bias) {
41-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
41+
ET_CHECK_OR_RETURN_FALSE(
4242
transposed == false, "Transposed Convolution Backward not supported yet");
43-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
43+
ET_CHECK_OR_RETURN_FALSE(
4444
weight.dim() == 4, "Only 2D Convolution Backward supported for now");
4545

4646
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(weight, input));
@@ -58,7 +58,7 @@ bool check_convolution_backward_args(
5858
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(grad_bias, input));
5959
}
6060

61-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
61+
ET_CHECK_OR_RETURN_FALSE(
6262
check_convolution_args(
6363
input,
6464
weight,
@@ -89,7 +89,7 @@ bool check_convolution_backward_args(
8989
ET_LOG_AND_RETURN_IF_FALSE(
9090
output_size_is_valid({output_sizes, output_ndim}, input.dim() - 2));
9191

92-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
92+
ET_CHECK_OR_RETURN_FALSE(
9393
grad_output.dim() == input.dim(),
9494
"grad_output should have same number of dimensions as input");
9595

kernels/portable/cpu/op_linear_scratch_example.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,13 +40,13 @@ bool check_linear_scratch_example_args(
4040
const optional<Tensor>& bias,
4141
Tensor& out,
4242
Tensor& scratch) {
43-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
43+
ET_CHECK_OR_RETURN_FALSE(
4444
input.size(1) == weight.size(1), "Unexpected weight size 1");
4545

46-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
46+
ET_CHECK_OR_RETURN_FALSE(
4747
scratch.size(0) == input.size(0), "Unexpected scratch size 0");
4848

49-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
49+
ET_CHECK_OR_RETURN_FALSE(
5050
scratch.size(1) == weight.size(0), "Unexpected scratch size 1");
5151

5252
return true;

kernels/portable/cpu/op_repeat.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ bool calculate_output_size(
2323
Tensor::SizesType* out_sizes_ptr) {
2424
ET_LOG_AND_RETURN_IF_FALSE(repeats.size() < kTensorDimensionLimit);
2525

26-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
26+
ET_CHECK_OR_RETURN_FALSE(
2727
repeats.size() >= self_sizes.size(),
2828
"Repeats vector size is %zu must be >= self_sizes %zu.",
2929
repeats.size(),

kernels/portable/cpu/op_repeat_interleave.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,26 +18,26 @@ bool check_repeat_interleave_args(
1818
int64_t output_size_value,
1919
int64_t repeats_sum,
2020
Tensor& out) {
21-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
21+
ET_CHECK_OR_RETURN_FALSE(
2222
repeats.scalar_type() == ScalarType::Int ||
2323
repeats.scalar_type() == ScalarType::Long,
2424
"repeats must be int or long");
25-
ET_LOG_MSG_AND_RETURN_IF_FALSE(repeats.dim() == 1, "repeats must be 1D");
26-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
25+
ET_CHECK_OR_RETURN_FALSE(repeats.dim() == 1, "repeats must be 1D");
26+
ET_CHECK_OR_RETURN_FALSE(
2727
output_size_value == repeats_sum,
2828
"output_size, if provided, must be equal to repeats.sum()");
2929
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(repeats, out));
3030

3131
if (repeats.scalar_type() == ScalarType::Long) {
3232
const int64_t* const repeats_data = repeats.const_data_ptr<int64_t>();
3333
for (size_t i = 0; i < repeats.numel(); ++i) {
34-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
34+
ET_CHECK_OR_RETURN_FALSE(
3535
repeats_data[i] >= 0, "repeats cannot be negative");
3636
}
3737
} else {
3838
const int32_t* const repeats_data = repeats.const_data_ptr<int32_t>();
3939
for (size_t i = 0; i < repeats.numel(); ++i) {
40-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
40+
ET_CHECK_OR_RETURN_FALSE(
4141
repeats_data[i] >= 0, "repeats cannot be negative");
4242
}
4343
}

kernels/portable/cpu/op_topk.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ bool check_topk_args(
2828
if (dim < 0) {
2929
dim += nonzero_dim(in);
3030
}
31-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
31+
ET_CHECK_OR_RETURN_FALSE(
3232
k >= 0 && k <= nonempty_size(in, dim), "selected index k out of range");
3333
return true;
3434
}

kernels/portable/cpu/util/activation_ops_util.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ namespace executor {
1717
bool check_gelu_args(const Tensor& in, string_view approximate, Tensor& out) {
1818
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
1919
ET_LOG_AND_RETURN_IF_FALSE(in.scalar_type() != ScalarType::Bool);
20-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
20+
ET_CHECK_OR_RETURN_FALSE(
2121
approximate == "tanh" || approximate == "none",
2222
"Invalid approximation format: %.*s for gelu",
2323
static_cast<int>(approximate.length()),
@@ -32,15 +32,15 @@ bool check_glu_args(const Tensor& in, int64_t dim, Tensor& out) {
3232
const size_t non_negative_dim = dim < 0 ? dim + in.dim() : dim;
3333
const size_t dim_size = in.size(non_negative_dim);
3434

35-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
35+
ET_CHECK_OR_RETURN_FALSE(
3636
dim_size % 2 == 0,
3737
"Halving dimension must be even, but dimension %zd is size %zd",
3838
non_negative_dim,
3939
dim_size);
4040

4141
ET_LOG_AND_RETURN_IF_FALSE(tensor_is_floating_type(out));
4242
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_rank(in, out));
43-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
43+
ET_CHECK_OR_RETURN_FALSE(
4444
out.size(non_negative_dim) == dim_size / 2,
4545
"output tensor must have half the size of the input tensor along the specified dimension.");
4646

@@ -73,7 +73,7 @@ bool check_log_softmax_args(
7373
int64_t dim,
7474
bool half_to_float,
7575
Tensor& out) {
76-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
76+
ET_CHECK_OR_RETURN_FALSE(
7777
!half_to_float, "half to float conversion is not supported on CPU");
7878
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
7979
ET_LOG_AND_RETURN_IF_FALSE(tensor_has_dim(in, dim));

kernels/portable/cpu/util/advanced_index_util.cpp

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ bool check_indices_dtypes(TensorOptList indices) {
2424
if (indices[i].has_value()) {
2525
const Tensor& index = indices[i].value();
2626
ScalarType ix_type = index.scalar_type();
27-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
27+
ET_CHECK_OR_RETURN_FALSE(
2828
ix_type == ScalarType::Long || ix_type == ScalarType::Int ||
2929
ix_type == ScalarType::Byte || ix_type == ScalarType::Bool,
3030
"Index tensors should be Long, Int, Byte or Bool");
@@ -47,7 +47,7 @@ bool check_mask_indices(const Tensor& in, TensorOptList indices) {
4747
if (indices[i].has_value()) {
4848
const Tensor& index = indices[i].value();
4949
if (is_mask_index(index)) {
50-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
50+
ET_CHECK_OR_RETURN_FALSE(
5151
index.dim() > 0, "Zero-dimensional mask index not allowed");
5252
for (auto j = 0; j < index.dim(); j++) {
5353
if (index.size(j) != in.size(in_i + j)) {
@@ -156,7 +156,7 @@ int64_t query_integral_index(
156156
bool check_index_args(const Tensor& in, TensorOptList indices, Tensor& out) {
157157
ET_LOG_AND_RETURN_IF_FALSE(tensors_have_same_dtype(in, out));
158158
ET_LOG_AND_RETURN_IF_FALSE(check_indices_dtypes(indices));
159-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
159+
ET_CHECK_OR_RETURN_FALSE(
160160
indices.size() <= in.dim(), "Indexing too many dimensions");
161161
ET_LOG_AND_RETURN_IF_FALSE(check_mask_indices(in, indices));
162162
return true;
@@ -197,8 +197,7 @@ bool get_indices_broadcast_shape(
197197
} else if (rev_ix_sizes[0] == 1) {
198198
rev_ix_sizes[0] = len;
199199
} else if (len != 1 && rev_ix_sizes[0] != len) {
200-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
201-
false, "Broadcast of mask index failed.");
200+
ET_CHECK_OR_RETURN_FALSE(false, "Broadcast of mask index failed.");
202201
}
203202
} else {
204203
for (size_t j = 0; j < index.dim(); j++) {
@@ -209,7 +208,7 @@ bool get_indices_broadcast_shape(
209208
} else if (rev_ix_sizes[j] == 1) {
210209
rev_ix_sizes[j] = rev_j_size;
211210
} else if (rev_j_size != 1 && rev_ix_sizes[j] != rev_j_size) {
212-
ET_LOG_MSG_AND_RETURN_IF_FALSE(false, "Broadcast of index failed.");
211+
ET_CHECK_OR_RETURN_FALSE(false, "Broadcast of index failed.");
213212
}
214213
}
215214
}
@@ -290,11 +289,11 @@ bool get_index_out_target_size(
290289
size_t num_null_indices = get_num_null_indices(indices);
291290
size_t num_indexed_dims = get_num_indexed_dims(indices);
292291

293-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
292+
ET_CHECK_OR_RETURN_FALSE(
294293
num_null_indices + num_indexed_dims <= in.dim(),
295294
"Indexing too many dimensions");
296295

297-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
296+
ET_CHECK_OR_RETURN_FALSE(
298297
in.dim() + broadcast_ndim - num_indexed_dims <= kTensorDimensionLimit,
299298
"Out tensor would exceed number of allowed dimensions");
300299

@@ -441,7 +440,7 @@ bool get_in_coord(
441440
if (index_val < 0) {
442441
index_val += in.size(i);
443442
}
444-
ET_LOG_MSG_AND_RETURN_IF_FALSE(
443+
ET_CHECK_OR_RETURN_FALSE(
445444
index_val >= 0 && index_val < in.size(i),
446445
"Index %" PRId64
447446
" is out of bounds for input dimension %zd with size %zd.",

0 commit comments

Comments
 (0)