@@ -711,3 +711,243 @@ TEST(PrintEvalueTest, EdgeItemsAffectsTensorData) {
711
711
// in full.
712
712
" tensor(sizes=[5, 1, 1, 2], [1., ..., 10.1])\n " );
713
713
}
714
+
715
+ //
716
+ // Long list wrapping.
717
+ //
718
+ // Use double as a proxy for testing the wrapping logic; the other scalar
719
+ // types use the same underlying code, so they don't need to test this again.
720
+ //
721
+
722
+ // Duplicates the internal value in the cpp file under test.
723
+ constexpr size_t kItemsPerLine = 10 ;
724
+
725
+ TEST (PrintEvalueTest, ListWrapping) {
726
+ // A large list of scalars.
727
+ std::array<double , 100 > list;
728
+ for (int i = 0 ; i < list.size (); ++i) {
729
+ list[i] = static_cast <double >(i);
730
+ }
731
+
732
+ {
733
+ // Should elide by default and print on a single line.
734
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
735
+
736
+ std::ostringstream os;
737
+ os << value;
738
+ EXPECT_STREQ (os.str ().c_str (), " (len=100)[0., 1., 2., ..., 97., 98., 99.]" );
739
+ }
740
+ {
741
+ // Exactly the per-line length should not wrap when increasing the number of
742
+ // edge items to disable elision.
743
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine ));
744
+
745
+ std::ostringstream os;
746
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
747
+ EXPECT_STREQ (
748
+ os.str ().c_str (), " (len=10)[0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]" );
749
+ }
750
+ {
751
+ // One more than the per-line length should wrap; no elision.
752
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine + 1 ));
753
+
754
+ std::ostringstream os;
755
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
756
+ EXPECT_STREQ (
757
+ os.str ().c_str (),
758
+ " (len=11)[\n "
759
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
760
+ " 10., \n "
761
+ " ]" );
762
+ }
763
+ {
764
+ // Exactly twice the per-line length, without elision.
765
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 2 ));
766
+
767
+ std::ostringstream os;
768
+ os << torch::executor::util::evalue_edge_items (1000 ) << value;
769
+ EXPECT_STREQ (
770
+ os.str ().c_str (),
771
+ " (len=20)[\n "
772
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
773
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
774
+ // Make sure there is no extra newline here.
775
+ " ]" );
776
+ }
777
+ {
778
+ // Exactly one whole line, with elision.
779
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
780
+
781
+ std::ostringstream os;
782
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine ) << value;
783
+ EXPECT_STREQ (
784
+ os.str ().c_str (),
785
+ " (len=30)[\n "
786
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
787
+ // Elision always on its own line when wrapping.
788
+ " ...,\n "
789
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
790
+ " ]" );
791
+ }
792
+ {
793
+ // Edge item count slightly larger than per-line length, with elision.
794
+ EValue value (ArrayRef<double >(list.data (), kItemsPerLine * 3 ));
795
+
796
+ std::ostringstream os;
797
+ os << torch::executor::util::evalue_edge_items (kItemsPerLine + 1 ) << value;
798
+ EXPECT_STREQ (
799
+ os.str ().c_str (),
800
+ " (len=30)[\n "
801
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
802
+ " 10., \n "
803
+ // Elision always on its own line when wrapping.
804
+ " ...,\n "
805
+ // The ragged line always comes just after the elision so that
806
+ // we will end on a full line.
807
+ " 19., \n "
808
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
809
+ " ]" );
810
+ }
811
+ {
812
+ // Large wrapped, ragged, elided example.
813
+ EValue value (ArrayRef<double >(list.data (), list.size ()));
814
+
815
+ std::ostringstream os;
816
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
817
+ EXPECT_STREQ (
818
+ os.str ().c_str (),
819
+ " (len=100)[\n "
820
+ " 0., 1., 2., 3., 4., 5., 6., 7., 8., 9., \n "
821
+ " 10., 11., 12., 13., 14., 15., 16., 17., 18., 19., \n "
822
+ " 20., 21., 22., 23., 24., 25., 26., 27., 28., 29., \n "
823
+ " 30., 31., 32., \n "
824
+ " ...,\n "
825
+ " 67., 68., 69., \n "
826
+ " 70., 71., 72., 73., 74., 75., 76., 77., 78., 79., \n "
827
+ " 80., 81., 82., 83., 84., 85., 86., 87., 88., 89., \n "
828
+ " 90., 91., 92., 93., 94., 95., 96., 97., 98., 99., \n "
829
+ " ]" );
830
+ }
831
+ }
832
+
833
+ TEST (PrintEvalueTest, WrappedTensorData) {
834
+ TensorFactory<ScalarType::Double> tf;
835
+ // A tensor with a large number of elements.
836
+ EValue value (tf.ones ({10 , 10 }));
837
+
838
+ std::ostringstream os;
839
+ os << torch::executor::util::evalue_edge_items (33 ) << value;
840
+ EXPECT_STREQ (
841
+ os.str ().c_str (),
842
+ " tensor(sizes=[10, 10], [\n "
843
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
844
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
845
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
846
+ " 1., 1., 1., \n "
847
+ " ...,\n "
848
+ " 1., 1., 1., \n "
849
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
850
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
851
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
852
+ " ])" );
853
+ }
854
+
855
+ TEST (PrintEvalueTest, WrappedTensorSizes) {
856
+ TensorFactory<ScalarType::Double> tf;
857
+
858
+ {
859
+ // A tensor with enough dimensions that the sizes list is wrapped, but
860
+ // the data is not.
861
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
862
+ sizes[0 ] = 5 ;
863
+ EValue value (tf.ones (sizes));
864
+
865
+ std::ostringstream os;
866
+ os << value;
867
+ EXPECT_STREQ (
868
+ os.str ().c_str (),
869
+ " tensor(sizes=[\n "
870
+ " 5, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
871
+ " 1, \n "
872
+ " ], [1., 1., 1., 1., 1.])" );
873
+ }
874
+ {
875
+ // Both sizes and data are wrapped.
876
+ std::vector<int32_t > sizes (kItemsPerLine + 1 , 1 );
877
+ sizes[0 ] = 100 ;
878
+ EValue value (tf.ones (sizes));
879
+
880
+ std::ostringstream os;
881
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
882
+ EXPECT_STREQ (
883
+ os.str ().c_str (),
884
+ " tensor(sizes=[\n "
885
+ " 100, 1, 1, 1, 1, 1, 1, 1, 1, 1, \n "
886
+ " 1, \n "
887
+ // TODO(T159700776): Indent this further to look more like python.
888
+ " ], [\n "
889
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
890
+ " 1., 1., 1., 1., 1., \n "
891
+ " ...,\n "
892
+ " 1., 1., 1., 1., 1., \n "
893
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
894
+ " ])" );
895
+ }
896
+ }
897
+
898
+ TEST (PrintEvalueTest, WrappedTensorLists) {
899
+ TensorFactory<ScalarType::Float> tf;
900
+
901
+ std::array<EValue, 2 > values = {
902
+ // Tensors that are large enough for their data to wrap.
903
+ tf.ones ({10 , 10 }),
904
+ tf.ones ({11 , 11 }),
905
+ };
906
+ std::array<EValue*, values.size ()> wrapped_values = {
907
+ &values[0 ],
908
+ &values[1 ],
909
+ };
910
+ // Memory that BoxedEvalueList will use to assemble a contiguous array of
911
+ // Tensor entries. It's important not to destroy these entries, because the
912
+ // values list will own the underlying Tensors.
913
+ auto unwrapped_values_memory = std::make_unique<uint8_t []>(
914
+ sizeof (exec_aten::Tensor) * wrapped_values.size ());
915
+ exec_aten::Tensor* unwrapped_values =
916
+ reinterpret_cast <exec_aten::Tensor*>(unwrapped_values_memory.get ());
917
+ #if USE_ATEN_LIB
918
+ // Must be initialized because BoxedEvalueList will use operator=() on each
919
+ // entry. But we can't do this in non-ATen mode because
920
+ // torch::executor::Tensor doesn't have a default constructor.
921
+ for (int i = 0 ; i < wrapped_values.size (); ++i) {
922
+ new (&unwrapped_values[i]) at::Tensor ();
923
+ }
924
+ #endif
925
+
926
+ // Demonstrate the formatting when printing a list with multiple tensors.
927
+ BoxedEvalueList<exec_aten::Tensor> list (
928
+ wrapped_values.data (), unwrapped_values, wrapped_values.size ());
929
+ EValue value (list);
930
+
931
+ std::ostringstream os;
932
+ os << torch::executor::util::evalue_edge_items (15 ) << value;
933
+ EXPECT_STREQ (
934
+ os.str ().c_str (),
935
+ " (len=2)[\n "
936
+ " [0]: tensor(sizes=[10, 10], [\n "
937
+ // TODO(T159700776): Indent these entries further to look more like
938
+ // python.
939
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
940
+ " 1., 1., 1., 1., 1., \n "
941
+ " ...,\n "
942
+ " 1., 1., 1., 1., 1., \n "
943
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
944
+ " ]),\n "
945
+ " [1]: tensor(sizes=[11, 11], [\n "
946
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
947
+ " 1., 1., 1., 1., 1., \n "
948
+ " ...,\n "
949
+ " 1., 1., 1., 1., 1., \n "
950
+ " 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., \n "
951
+ " ]),\n "
952
+ " ]" );
953
+ }
0 commit comments