@@ -725,3 +725,243 @@ TEST(PrintEvalueTest, EdgeItemsAffectsTensorData) {
725
725
// in full.
726
726
" tensor(sizes=[5, 1, 1, 2], [1., ..., 10.1])\n " );
727
727
}
728
+
729
+ //
730
+ // Long list wrapping.
731
+ //
732
+ // Use double as a proxy for testing the wrapping logic; the other scalar
733
+ // types use the same underlying code, so they don't need to test this again.
734
+ //
735
+
736
+ // Duplicates the internal value in the cpp file under test.
737
+ constexpr size_t kItemsPerLine = 10 ;
738
+
739
+ TEST (PrintEvalueTest, ListWrapping) {
740
+ // A large list of scalars.
741
+ std::array<double , 100 > list;
742
+ for (int i = 0 ; i < list.size (); ++i) {
743
+ list[i] = static_cast <double >(i);
744
+ }
745
+
746
+ {
747
+ // Should elide by default and print on a single line.
748
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
749
+
750
+ std::ostringstream os;
751
+ os << value;
752
+ EXPECT_STREQ (os.str ().c_str (), " (len=100)[0., 1., 2., ..., 97., 98., 99.]" );
753
+ }
754
+ {
755
+ // Exactly the per-line length should not wrap when increasing the number of
756
+ // edge items to disable elision.
757
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine ));
758
+
759
+ std::ostringstream os;
760
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
761
+ EXPECT_STREQ (
762
+ os.str ().c_str (), " (len=10)[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]" );
763
+ }
764
+ {
765
+ // One more than the per-line length should wrap; no elision.
766
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine + 1 ));
767
+
768
+ std::ostringstream os;
769
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
770
+ EXPECT_STREQ (
771
+ os.str ().c_str (),
772
+ " (len=11)[\n "
773
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
774
+ " 10., \n "
775
+ " ]" );
776
+ }
777
+ {
778
+ // Exactly twice the per-line length, without elision.
779
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 2 ));
780
+
781
+ std::ostringstream os;
782
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
783
+ EXPECT_STREQ (
784
+ os.str ().c_str (),
785
+ " (len=20)[\n "
786
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
787
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
788
+ // Make sure there is no extra newline here.
789
+ " ]" );
790
+ }
791
+ {
792
+ // Exactly one whole line, with elision.
793
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
794
+
795
+ std::ostringstream os;
796
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine ) << value;
797
+ EXPECT_STREQ (
798
+ os.str ().c_str (),
799
+ " (len=30)[\n "
800
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
801
+ // Elision always on its own line when wrapping.
802
+ " ...,\n "
803
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
804
+ " ]" );
805
+ }
806
+ {
807
+ // Edge item count slightly larger than per-line length, with elision.
808
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
809
+
810
+ std::ostringstream os;
811
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine + 1 ) << value;
812
+ EXPECT_STREQ (
813
+ os.str ().c_str (),
814
+ " (len=30)[\n "
815
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
816
+ " 10., \n "
817
+ // Elision always on its own line when wrapping.
818
+ " ...,\n "
819
+ // The ragged line always comes just after the elision so that
820
+ // we will end on a full line.
821
+ " 19., \n "
822
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
823
+ " ]" );
824
+ }
825
+ {
826
+ // Large wrapped, ragged, elided example.
827
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
828
+
829
+ std::ostringstream os;
830
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
831
+ EXPECT_STREQ (
832
+ os.str ().c_str (),
833
+ " (len=100)[\n "
834
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
835
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
836
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
837
+ " 30., 31., 32., \n "
838
+ " ...,\n "
839
+ " 67., 68., 69., \n "
840
+ " 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., \n "
841
+ " 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., \n "
842
+ " 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., \n "
843
+ " ]" );
844
+ }
845
+ }
846
+
847
+ TEST (PrintEvalueTest, WrappedTensorData) {
848
+ TensorFactory<ScalarType::Double> tf;
849
+ // A tensor with a large number of elements.
850
+ EValue value (tf.ones ({10 , 10 }));
851
+
852
+ std::ostringstream os;
853
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
854
+ EXPECT_STREQ (
855
+ os.str ().c_str (),
856
+ " tensor(sizes=[10, 10], [\n "
857
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
858
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
859
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
860
+ " 1., 1., 1., \n "
861
+ " ...,\n "
862
+ " 1., 1., 1., \n "
863
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
864
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
865
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
866
+ " ])" );
867
+ }
868
+
869
+ TEST (PrintEvalueTest, WrappedTensorSizes) {
870
+ TensorFactory<ScalarType::Double> tf;
871
+
872
+ {
873
+ // A tensor with enough dimensions that the sizes list is wrapped, but
874
+ // the data is not.
875
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
876
+ sizes[0 ] = 5 ;
877
+ EValue value (tf.ones (sizes));
878
+
879
+ std::ostringstream os;
880
+ os << value;
881
+ EXPECT_STREQ (
882
+ os.str ().c_str (),
883
+ " tensor(sizes=[\n "
884
+ " 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
885
+ " 1, \n "
886
+ " ], [1., 1., 1., 1., 1.])" );
887
+ }
888
+ {
889
+ // Both sizes and data are wrapped.
890
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
891
+ sizes[0 ] = 100 ;
892
+ EValue value (tf.ones (sizes));
893
+
894
+ std::ostringstream os;
895
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
896
+ EXPECT_STREQ (
897
+ os.str ().c_str (),
898
+ " tensor(sizes=[\n "
899
+ " 100, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
900
+ " 1, \n "
901
+ // TODO(T159700776): Indent this further to look more like python.
902
+ " ], [\n "
903
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
904
+ " 1., 1., 1., 1., 1., \n "
905
+ " ...,\n "
906
+ " 1., 1., 1., 1., 1., \n "
907
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
908
+ " ])" );
909
+ }
910
+ }
911
+
912
+ TEST (PrintEvalueTest, WrappedTensorLists) {
913
+ TensorFactory<ScalarType::Float> tf;
914
+
915
+ std::array<EValue, 2 > values = {
916
+ // Tensors that are large enough for their data to wrap.
917
+ tf.ones ({10 , 10 }),
918
+ tf.ones ({11 , 11 }),
919
+ };
920
+ std::array<EValue*, values.size ()> wrapped_values = {
921
+ &values[0 ],
922
+ &values[1 ],
923
+ };
924
+ // Memory that BoxedEvalueList will use to assemble a contiguous array of
925
+ // Tensor entries. It's important not to destroy these entries, because the
926
+ // values list will own the underlying Tensors.
927
+ auto unwrapped_values_memory = std::make_unique<uint8_t []>(
928
+ sizeof (exec_aten::Tensor) * wrapped_values.size ());
929
+ exec_aten::Tensor* unwrapped_values =
930
+ reinterpret_cast <exec_aten::Tensor*>(unwrapped_values_memory.get ());
931
+ #if USE_ATEN_LIB
932
+ // Must be initialized because BoxedEvalueList will use operator=() on each
933
+ // entry. But we can't do this in non-ATen mode because
934
+ // torch::executor::Tensor doesn't have a default constructor.
935
+ for (int i = 0 ; i < wrapped_values.size (); ++i) {
936
+ new (&unwrapped_values[i]) at::Tensor ();
937
+ }
938
+ #endif
939
+
940
+ // Demonstrate the formatting when printing a list with multiple tensors.
941
+ BoxedEvalueList<exec_aten::Tensor> list (
942
+ wrapped_values.data (), unwrapped_values, wrapped_values.size ());
943
+ EValue value (list);
944
+
945
+ std::ostringstream os;
946
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
947
+ EXPECT_STREQ (
948
+ os.str ().c_str (),
949
+ " (len=2)[\n "
950
+ " [0]: tensor(sizes=[10, 10], [\n "
951
+ // TODO(T159700776): Indent these entries further to look more like
952
+ // python.
953
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
954
+ " 1., 1., 1., 1., 1., \n "
955
+ " ...,\n "
956
+ " 1., 1., 1., 1., 1., \n "
957
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
958
+ " ]),\n "
959
+ " [1]: tensor(sizes=[11, 11], [\n "
960
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
961
+ " 1., 1., 1., 1., 1., \n "
962
+ " ...,\n "
963
+ " 1., 1., 1., 1., 1., \n "
964
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
965
+ " ]),\n "
966
+ " ]" );
967
+ }
0 commit comments