@@ -729,3 +729,243 @@ TEST(PrintEvalueTest, EdgeItemsAffectsTensorData) {
729
729
// in full.
730
730
" tensor(sizes=[5, 1, 1, 2], [1., ..., 10.1])\n " );
731
731
}
732
+
733
+ //
734
+ // Long list wrapping.
735
+ //
736
+ // Use double as a proxy for testing the wrapping logic; the other scalar
737
+ // types use the same underlying code, so they don't need to test this again.
738
+ //
739
+
740
+ // Duplicates the internal value in the cpp file under test.
741
+ constexpr size_t kItemsPerLine = 10 ;
742
+
743
+ TEST (PrintEvalueTest, ListWrapping) {
744
+ // A large list of scalars.
745
+ std::array<double , 100 > list;
746
+ for (int i = 0 ; i < list.size (); ++i) {
747
+ list[i] = static_cast <double >(i);
748
+ }
749
+
750
+ {
751
+ // Should elide by default and print on a single line.
752
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
753
+
754
+ std::ostringstream os;
755
+ os << value;
756
+ EXPECT_STREQ (os.str ().c_str (), " (len=100)[0., 1., 2., ..., 97., 98., 99.]" );
757
+ }
758
+ {
759
+ // Exactly the per-line length should not wrap when increasing the number of
760
+ // edge items to disable elision.
761
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine ));
762
+
763
+ std::ostringstream os;
764
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
765
+ EXPECT_STREQ (
766
+ os.str ().c_str (), " (len=10)[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]" );
767
+ }
768
+ {
769
+ // One more than the per-line length should wrap; no elision.
770
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine + 1 ));
771
+
772
+ std::ostringstream os;
773
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
774
+ EXPECT_STREQ (
775
+ os.str ().c_str (),
776
+ " (len=11)[\n "
777
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
778
+ " 10., \n "
779
+ " ]" );
780
+ }
781
+ {
782
+ // Exactly twice the per-line length, without elision.
783
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 2 ));
784
+
785
+ std::ostringstream os;
786
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
787
+ EXPECT_STREQ (
788
+ os.str ().c_str (),
789
+ " (len=20)[\n "
790
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
791
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
792
+ // Make sure there is no extra newline here.
793
+ " ]" );
794
+ }
795
+ {
796
+ // Exactly one whole line, with elision.
797
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
798
+
799
+ std::ostringstream os;
800
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine ) << value;
801
+ EXPECT_STREQ (
802
+ os.str ().c_str (),
803
+ " (len=30)[\n "
804
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
805
+ // Elision always on its own line when wrapping.
806
+ " ...,\n "
807
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
808
+ " ]" );
809
+ }
810
+ {
811
+ // Edge item count slightly larger than per-line length, with elision.
812
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
813
+
814
+ std::ostringstream os;
815
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine + 1 ) << value;
816
+ EXPECT_STREQ (
817
+ os.str ().c_str (),
818
+ " (len=30)[\n "
819
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
820
+ " 10., \n "
821
+ // Elision always on its own line when wrapping.
822
+ " ...,\n "
823
+ // The ragged line always comes just after the elision so that
824
+ // we will end on a full line.
825
+ " 19., \n "
826
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
827
+ " ]" );
828
+ }
829
+ {
830
+ // Large wrapped, ragged, elided example.
831
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
832
+
833
+ std::ostringstream os;
834
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
835
+ EXPECT_STREQ (
836
+ os.str ().c_str (),
837
+ " (len=100)[\n "
838
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
839
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
840
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
841
+ " 30., 31., 32., \n "
842
+ " ...,\n "
843
+ " 67., 68., 69., \n "
844
+ " 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., \n "
845
+ " 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., \n "
846
+ " 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., \n "
847
+ " ]" );
848
+ }
849
+ }
850
+
851
+ TEST (PrintEvalueTest, WrappedTensorData) {
852
+ TensorFactory<ScalarType::Double> tf;
853
+ // A tensor with a large number of elements.
854
+ EValue value (tf.ones ({10 , 10 }));
855
+
856
+ std::ostringstream os;
857
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
858
+ EXPECT_STREQ (
859
+ os.str ().c_str (),
860
+ " tensor(sizes=[10, 10], [\n "
861
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
862
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
863
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
864
+ " 1., 1., 1., \n "
865
+ " ...,\n "
866
+ " 1., 1., 1., \n "
867
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
868
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
869
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
870
+ " ])" );
871
+ }
872
+
873
+ TEST (PrintEvalueTest, WrappedTensorSizes) {
874
+ TensorFactory<ScalarType::Double> tf;
875
+
876
+ {
877
+ // A tensor with enough dimensions that the sizes list is wrapped, but
878
+ // the data is not.
879
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
880
+ sizes[0 ] = 5 ;
881
+ EValue value (tf.ones (sizes));
882
+
883
+ std::ostringstream os;
884
+ os << value;
885
+ EXPECT_STREQ (
886
+ os.str ().c_str (),
887
+ " tensor(sizes=[\n "
888
+ " 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
889
+ " 1, \n "
890
+ " ], [1., 1., 1., 1., 1.])" );
891
+ }
892
+ {
893
+ // Both sizes and data are wrapped.
894
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
895
+ sizes[0 ] = 100 ;
896
+ EValue value (tf.ones (sizes));
897
+
898
+ std::ostringstream os;
899
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
900
+ EXPECT_STREQ (
901
+ os.str ().c_str (),
902
+ " tensor(sizes=[\n "
903
+ " 100, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
904
+ " 1, \n "
905
+ // TODO(T159700776): Indent this further to look more like python.
906
+ " ], [\n "
907
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
908
+ " 1., 1., 1., 1., 1., \n "
909
+ " ...,\n "
910
+ " 1., 1., 1., 1., 1., \n "
911
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
912
+ " ])" );
913
+ }
914
+ }
915
+
916
+ TEST (PrintEvalueTest, WrappedTensorLists) {
917
+ TensorFactory<ScalarType::Float> tf;
918
+
919
+ std::array<EValue, 2 > values = {
920
+ // Tensors that are large enough for their data to wrap.
921
+ tf.ones ({10 , 10 }),
922
+ tf.ones ({11 , 11 }),
923
+ };
924
+ std::array<EValue*, values.size ()> wrapped_values = {
925
+ &values[0 ],
926
+ &values[1 ],
927
+ };
928
+ // Memory that BoxedEvalueList will use to assemble a contiguous array of
929
+ // Tensor entries. It's important not to destroy these entries, because the
930
+ // values list will own the underlying Tensors.
931
+ auto unwrapped_values_memory = std::make_unique<uint8_t []>(
932
+ sizeof (exec_aten::Tensor) * wrapped_values.size ());
933
+ exec_aten::Tensor* unwrapped_values =
934
+ reinterpret_cast <exec_aten::Tensor*>(unwrapped_values_memory.get ());
935
+ #if USE_ATEN_LIB
936
+ // Must be initialized because BoxedEvalueList will use operator=() on each
937
+ // entry. But we can't do this in non-ATen mode because
938
+ // torch::executor::Tensor doesn't have a default constructor.
939
+ for (int i = 0 ; i < wrapped_values.size (); ++i) {
940
+ new (&unwrapped_values[i]) at::Tensor ();
941
+ }
942
+ #endif
943
+
944
+ // Demonstrate the formatting when printing a list with multiple tensors.
945
+ BoxedEvalueList<exec_aten::Tensor> list (
946
+ wrapped_values.data (), unwrapped_values, wrapped_values.size ());
947
+ EValue value (list);
948
+
949
+ std::ostringstream os;
950
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
951
+ EXPECT_STREQ (
952
+ os.str ().c_str (),
953
+ " (len=2)[\n "
954
+ " [0]: tensor(sizes=[10, 10], [\n "
955
+ // TODO(T159700776): Indent these entries further to look more like
956
+ // python.
957
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
958
+ " 1., 1., 1., 1., 1., \n "
959
+ " ...,\n "
960
+ " 1., 1., 1., 1., 1., \n "
961
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
962
+ " ]),\n "
963
+ " [1]: tensor(sizes=[11, 11], [\n "
964
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
965
+ " 1., 1., 1., 1., 1., \n "
966
+ " ...,\n "
967
+ " 1., 1., 1., 1., 1., \n "
968
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
969
+ " ]),\n "
970
+ " ]" );
971
+ }
0 commit comments