Skip to content

Commit 77c3bd6

Browse files
committed
[executorch] Make operator<<() wrap long EValue lists
Pull Request resolved: #480 Wrap long lists at ten items per line. I considered adding another stream manipulator to let users modify this, but it seemed less immediately useful than the "edge_items" manipulator. Also considered wrapping at a particular column number, but it's way easier to count items then to count characters. The indenting on some of the nested elements like Tensors and Tensor lists aren't the best, but we can fix that in a future diff. ghstack-source-id: 201845600 @exported-using-ghexport Differential Revision: [D49607605](https://our.internmc.facebook.com/intern/diff/D49607605/)
1 parent ae989c1 commit 77c3bd6

File tree

2 files changed

+286
-3
lines changed

2 files changed

+286
-3
lines changed

extension/evalue_util/print_evalue.cpp

Lines changed: 46 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010

1111
#include <executorch/runtime/core/exec_aten/util/scalar_type_util.h>
1212

13+
#include <algorithm>
1314
#include <cmath>
1415
#include <iomanip>
1516
#include <ostream>
@@ -20,6 +21,9 @@ namespace executor {
2021

2122
namespace {
2223

24+
/// Number of list items on a line before wrapping.
25+
constexpr size_t kItemsPerLine = 10;
26+
2327
/// The default number of first/last list items to print before eliding.
2428
constexpr size_t kDefaultEdgeItems = 3;
2529

@@ -74,18 +78,57 @@ void print_scalar_list(
7478
if (print_length) {
7579
os << "(len=" << list.size() << ")";
7680
}
77-
// TODO(T159700776): Wrap at a specified number of columns.
81+
82+
// See if we'll be printing enough elements to cause us to wrap.
83+
bool wrapping = false;
84+
{
85+
long num_printed_items;
86+
if (elide_inner_items) {
87+
num_printed_items =
88+
std::min(static_cast<long>(list.size()), edge_items * 2);
89+
} else {
90+
num_printed_items = static_cast<long>(list.size());
91+
}
92+
wrapping = num_printed_items > kItemsPerLine;
93+
}
94+
7895
os << "[";
96+
size_t num_printed = 0;
7997
for (size_t i = 0; i < list.size(); ++i) {
98+
if (wrapping && num_printed % kItemsPerLine == 0) {
99+
// We've printed a full line, so wrap and begin a new one.
100+
os << "\n ";
101+
}
80102
os << EValue(exec_aten::Scalar(list[i]));
81-
if (i < list.size() - 1) {
103+
if (wrapping || i < list.size() - 1) {
104+
// No trailing comma when not wrapping. Always a trailing comma when
105+
// wrapping. This will leave a trailing space at the end of every wrapped
106+
// line, but it simplifies the logic here.
82107
os << ", ";
83108
}
109+
++num_printed;
84110
if (i + 1 == edge_items && i + edge_items + 1 < list.size()) {
85-
os << "..., ";
111+
if (wrapping) {
112+
os << "\n ...,";
113+
// Make the first line after the elision be the ragged line, letting us
114+
// always end on a full line.
115+
num_printed = kItemsPerLine - edge_items % kItemsPerLine;
116+
if (num_printed % kItemsPerLine != 0) {
117+
// If the line ended exactly when the elision happened, the next
118+
// iteration of the loop will add this line break.
119+
os << "\n ";
120+
}
121+
} else {
122+
// Non-wrapping elision.
123+
os << "..., ";
124+
}
86125
i = list.size() - edge_items - 1;
87126
}
88127
}
128+
if (wrapping) {
129+
// End the current line.
130+
os << "\n";
131+
}
89132
os << "]";
90133
}
91134

extension/evalue_util/test/print_evalue_test.cpp

Lines changed: 240 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -725,3 +725,243 @@ TEST(PrintEvalueTest, EdgeItemsAffectsTensorData) {
725725
// in full.
726726
"tensor(sizes=[5, 1, 1, 2], [1., ..., 10.1])\n");
727727
}
728+
729+
//
730+
// Long list wrapping.
731+
//
732+
// Use double as a proxy for testing the wrapping logic; the other scalar
733+
// types use the same underlying code, so they don't need to test this again.
734+
//
735+
736+
// Duplicates the internal value in the cpp file under test.
737+
constexpr size_t kItemsPerLine = 10;
738+
739+
TEST(PrintEvalueTest, ListWrapping) {
740+
// A large list of scalars.
741+
std::array<double, 100> list;
742+
for (int i = 0; i < list.size(); ++i) {
743+
list[i] = static_cast<double>(i);
744+
}
745+
746+
{
747+
// Should elide by default and print on a single line.
748+
EValue value(ArrayRef<double>(list.data(), list.size()));
749+
750+
std::ostringstream os;
751+
os << value;
752+
EXPECT_STREQ(os.str().c_str(), "(len=100)[0., 1., 2., ..., 97., 98., 99.]");
753+
}
754+
{
755+
// Exactly the per-line length should not wrap when increasing the number of
756+
// edge items to disable elision.
757+
EValue value(ArrayRef<double>(list.data(), kItemsPerLine));
758+
759+
std::ostringstream os;
760+
os << torch::executor::util::evalue_edge_items(1000) << value;
761+
EXPECT_STREQ(
762+
os.str().c_str(), "(len=10)[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]");
763+
}
764+
{
765+
// One more than the per-line length should wrap; no elision.
766+
EValue value(ArrayRef<double>(list.data(), kItemsPerLine + 1));
767+
768+
std::ostringstream os;
769+
os << torch::executor::util::evalue_edge_items(1000) << value;
770+
EXPECT_STREQ(
771+
os.str().c_str(),
772+
"(len=11)[\n"
773+
" 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n"
774+
" 10., \n"
775+
"]");
776+
}
777+
{
778+
// Exactly twice the per-line length, without elision.
779+
EValue value(ArrayRef<double>(list.data(), kItemsPerLine * 2));
780+
781+
std::ostringstream os;
782+
os << torch::executor::util::evalue_edge_items(1000) << value;
783+
EXPECT_STREQ(
784+
os.str().c_str(),
785+
"(len=20)[\n"
786+
" 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n"
787+
" 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n"
788+
// Make sure there is no extra newline here.
789+
"]");
790+
}
791+
{
792+
// Exactly one whole line, with elision.
793+
EValue value(ArrayRef<double>(list.data(), kItemsPerLine * 3));
794+
795+
std::ostringstream os;
796+
os << torch::executor::util::evalue_edge_items(kItemsPerLine) << value;
797+
EXPECT_STREQ(
798+
os.str().c_str(),
799+
"(len=30)[\n"
800+
" 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n"
801+
// Elision always on its own line when wrapping.
802+
" ...,\n"
803+
" 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n"
804+
"]");
805+
}
806+
{
807+
// Edge item count slightly larger than per-line length, with elision.
808+
EValue value(ArrayRef<double>(list.data(), kItemsPerLine * 3));
809+
810+
std::ostringstream os;
811+
os << torch::executor::util::evalue_edge_items(kItemsPerLine + 1) << value;
812+
EXPECT_STREQ(
813+
os.str().c_str(),
814+
"(len=30)[\n"
815+
" 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n"
816+
" 10., \n"
817+
// Elision always on its own line when wrapping.
818+
" ...,\n"
819+
// The ragged line always comes just after the elision so that
820+
// we will end on a full line.
821+
" 19., \n"
822+
" 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n"
823+
"]");
824+
}
825+
{
826+
// Large wrapped, ragged, elided example.
827+
EValue value(ArrayRef<double>(list.data(), list.size()));
828+
829+
std::ostringstream os;
830+
os << torch::executor::util::evalue_edge_items(33) << value;
831+
EXPECT_STREQ(
832+
os.str().c_str(),
833+
"(len=100)[\n"
834+
" 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n"
835+
" 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n"
836+
" 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n"
837+
" 30., 31., 32., \n"
838+
" ...,\n"
839+
" 67., 68., 69., \n"
840+
" 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., \n"
841+
" 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., \n"
842+
" 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., \n"
843+
"]");
844+
}
845+
}
846+
847+
TEST(PrintEvalueTest, WrappedTensorData) {
848+
TensorFactory<ScalarType::Double> tf;
849+
// A tensor with a large number of elements.
850+
EValue value(tf.ones({10, 10}));
851+
852+
std::ostringstream os;
853+
os << torch::executor::util::evalue_edge_items(33) << value;
854+
EXPECT_STREQ(
855+
os.str().c_str(),
856+
"tensor(sizes=[10, 10], [\n"
857+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
858+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
859+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
860+
" 1., 1., 1., \n"
861+
" ...,\n"
862+
" 1., 1., 1., \n"
863+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
864+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
865+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
866+
"])");
867+
}
868+
869+
TEST(PrintEvalueTest, WrappedTensorSizes) {
870+
TensorFactory<ScalarType::Double> tf;
871+
872+
{
873+
// A tensor with enough dimensions that the sizes list is wrapped, but
874+
// the data is not.
875+
std::vector<int32_t> sizes(kItemsPerLine + 1, 1);
876+
sizes[0] = 5;
877+
EValue value(tf.ones(sizes));
878+
879+
std::ostringstream os;
880+
os << value;
881+
EXPECT_STREQ(
882+
os.str().c_str(),
883+
"tensor(sizes=[\n"
884+
" 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n"
885+
" 1, \n"
886+
"], [1., 1., 1., 1., 1.])");
887+
}
888+
{
889+
// Both sizes and data are wrapped.
890+
std::vector<int32_t> sizes(kItemsPerLine + 1, 1);
891+
sizes[0] = 100;
892+
EValue value(tf.ones(sizes));
893+
894+
std::ostringstream os;
895+
os << torch::executor::util::evalue_edge_items(15) << value;
896+
EXPECT_STREQ(
897+
os.str().c_str(),
898+
"tensor(sizes=[\n"
899+
" 100, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n"
900+
" 1, \n"
901+
// TODO(T159700776): Indent this further to look more like python.
902+
"], [\n"
903+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
904+
" 1., 1., 1., 1., 1., \n"
905+
" ...,\n"
906+
" 1., 1., 1., 1., 1., \n"
907+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
908+
"])");
909+
}
910+
}
911+
912+
TEST(PrintEvalueTest, WrappedTensorLists) {
913+
TensorFactory<ScalarType::Float> tf;
914+
915+
std::array<EValue, 2> values = {
916+
// Tensors that are large enough for their data to wrap.
917+
tf.ones({10, 10}),
918+
tf.ones({11, 11}),
919+
};
920+
std::array<EValue*, values.size()> wrapped_values = {
921+
&values[0],
922+
&values[1],
923+
};
924+
// Memory that BoxedEvalueList will use to assemble a contiguous array of
925+
// Tensor entries. It's important not to destroy these entries, because the
926+
// values list will own the underlying Tensors.
927+
auto unwrapped_values_memory = std::make_unique<uint8_t[]>(
928+
sizeof(exec_aten::Tensor) * wrapped_values.size());
929+
exec_aten::Tensor* unwrapped_values =
930+
reinterpret_cast<exec_aten::Tensor*>(unwrapped_values_memory.get());
931+
#if USE_ATEN_LIB
932+
// Must be initialized because BoxedEvalueList will use operator=() on each
933+
// entry. But we can't do this in non-ATen mode because
934+
// torch::executor::Tensor doesn't have a default constructor.
935+
for (int i = 0; i < wrapped_values.size(); ++i) {
936+
new (&unwrapped_values[i]) at::Tensor();
937+
}
938+
#endif
939+
940+
// Demonstrate the formatting when printing a list with multiple tensors.
941+
BoxedEvalueList<exec_aten::Tensor> list(
942+
wrapped_values.data(), unwrapped_values, wrapped_values.size());
943+
EValue value(list);
944+
945+
std::ostringstream os;
946+
os << torch::executor::util::evalue_edge_items(15) << value;
947+
EXPECT_STREQ(
948+
os.str().c_str(),
949+
"(len=2)[\n"
950+
" [0]: tensor(sizes=[10, 10], [\n"
951+
// TODO(T159700776): Indent these entries further to look more like
952+
// python.
953+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
954+
" 1., 1., 1., 1., 1., \n"
955+
" ...,\n"
956+
" 1., 1., 1., 1., 1., \n"
957+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
958+
"]),\n"
959+
" [1]: tensor(sizes=[11, 11], [\n"
960+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
961+
" 1., 1., 1., 1., 1., \n"
962+
" ...,\n"
963+
" 1., 1., 1., 1., 1., \n"
964+
" 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n"
965+
"]),\n"
966+
"]");
967+
}

0 commit comments

Comments
 (0)