@@ -14,13 +14,14 @@ import (
14
14
"testing"
15
15
"time"
16
16
17
+ "go.mongodb.org/mongo-driver/bson"
18
+ "go.mongodb.org/mongo-driver/event"
17
19
"go.mongodb.org/mongo-driver/internal/testutil/assert"
18
20
"go.mongodb.org/mongo-driver/internal/testutil/israce"
19
21
"go.mongodb.org/mongo-driver/mongo"
20
22
"go.mongodb.org/mongo-driver/mongo/gridfs"
21
23
"go.mongodb.org/mongo-driver/mongo/integration/mtest"
22
24
"go.mongodb.org/mongo-driver/mongo/options"
23
- "go.mongodb.org/mongo-driver/x/bsonx"
24
25
)
25
26
26
27
func TestGridFS (x * testing.T ) {
@@ -45,6 +46,141 @@ func TestGridFS(x *testing.T) {
45
46
findIndex (findCtx , mt , mt .DB .Collection ("fs.files" ), false , "key" , "filename" )
46
47
findIndex (findCtx , mt , mt .DB .Collection ("fs.chunks" ), true , "key" , "files_id" )
47
48
})
49
+ // should not create a new index if index is numerically the same
50
+ mt .Run ("equivalent indexes" , func (mt * mtest.T ) {
51
+ tests := []struct {
52
+ name string
53
+ filesIndex bson.D
54
+ chunksIndex bson.D
55
+ newIndexes bool
56
+ }{
57
+ {
58
+ "numerically equal" ,
59
+ bson.D {
60
+ {"key" , bson.D {{"filename" , float64 (1.0 )}, {"uploadDate" , float64 (1.0 )}}},
61
+ {"name" , "filename_1_uploadDate_1" },
62
+ },
63
+ bson.D {
64
+ {"key" , bson.D {{"files_id" , float64 (1.0 )}, {"n" , float64 (1.0 )}}},
65
+ {"name" , "files_id_1_n_1" },
66
+ {"unique" , true },
67
+ },
68
+ false ,
69
+ },
70
+ {
71
+ "numerically inequal" ,
72
+ bson.D {
73
+ {"key" , bson.D {{"filename" , float64 (- 1.0 )}, {"uploadDate" , float64 (1.0 )}}},
74
+ {"name" , "filename_-1_uploadDate_1" },
75
+ },
76
+ bson.D {
77
+ {"key" , bson.D {{"files_id" , float64 (1.0 )}, {"n" , float64 (- 1.0 )}}},
78
+ {"name" , "files_id_1_n_-1" },
79
+ {"unique" , true },
80
+ },
81
+ true ,
82
+ },
83
+ }
84
+ for _ , test := range tests {
85
+ mt .Run (test .name , func (mt * mtest.T ) {
86
+ mt .Run ("OpenUploadStream" , func (mt * mtest.T ) {
87
+ // add indexes with floats to collections manually
88
+ res := mt .DB .RunCommand (context .Background (),
89
+ bson.D {
90
+ {"createIndexes" , "fs.files" },
91
+ {"indexes" , bson.A {
92
+ test .filesIndex ,
93
+ }},
94
+ },
95
+ )
96
+ assert .Nil (mt , res .Err (), "createIndexes error: %v" , res .Err ())
97
+
98
+ res = mt .DB .RunCommand (context .Background (),
99
+ bson.D {
100
+ {"createIndexes" , "fs.chunks" },
101
+ {"indexes" , bson.A {
102
+ test .chunksIndex ,
103
+ }},
104
+ },
105
+ )
106
+ assert .Nil (mt , res .Err (), "createIndexes error: %v" , res .Err ())
107
+
108
+ mt .ClearEvents ()
109
+
110
+ bucket , err := gridfs .NewBucket (mt .DB )
111
+ assert .Nil (mt , err , "NewBucket error: %v" , err )
112
+ defer func () {
113
+ _ = bucket .Drop ()
114
+ }()
115
+
116
+ _ , err = bucket .OpenUploadStream ("filename" )
117
+ assert .Nil (mt , err , "OpenUploadStream error: %v" , err )
118
+
119
+ mt .FilterStartedEvents (func (evt * event.CommandStartedEvent ) bool {
120
+ return evt .CommandName == "createIndexes"
121
+ })
122
+ evt := mt .GetStartedEvent ()
123
+ if test .newIndexes {
124
+ if evt == nil {
125
+ mt .Fatalf ("expected createIndexes events but got none" )
126
+ }
127
+ } else {
128
+ if evt != nil {
129
+ mt .Fatalf ("expected no createIndexes events but got %v" , evt .Command )
130
+ }
131
+ }
132
+ })
133
+ mt .Run ("UploadFromStream" , func (mt * mtest.T ) {
134
+ // add indexes with floats to collections manually
135
+ res := mt .DB .RunCommand (context .Background (),
136
+ bson.D {
137
+ {"createIndexes" , "fs.files" },
138
+ {"indexes" , bson.A {
139
+ test .filesIndex ,
140
+ }},
141
+ },
142
+ )
143
+ assert .Nil (mt , res .Err (), "createIndexes error: %v" , res .Err ())
144
+
145
+ res = mt .DB .RunCommand (context .Background (),
146
+ bson.D {
147
+ {"createIndexes" , "fs.chunks" },
148
+ {"indexes" , bson.A {
149
+ test .chunksIndex ,
150
+ }},
151
+ },
152
+ )
153
+ assert .Nil (mt , res .Err (), "createIndexes error: %v" , res .Err ())
154
+
155
+ mt .ClearEvents ()
156
+ var fileContent []byte
157
+ bucket , err := gridfs .NewBucket (mt .DB )
158
+ assert .Nil (mt , err , "NewBucket error: %v" , err )
159
+ defer func () {
160
+ _ = bucket .Drop ()
161
+ }()
162
+
163
+ _ , err = bucket .UploadFromStream ("filename" , bytes .NewBuffer (fileContent ))
164
+ assert .Nil (mt , err , "UploadFromStream error: %v" , err )
165
+
166
+ mt .FilterStartedEvents (func (evt * event.CommandStartedEvent ) bool {
167
+ return evt .CommandName == "createIndexes"
168
+ })
169
+ evt := mt .GetStartedEvent ()
170
+ if test .newIndexes {
171
+ if evt == nil {
172
+ mt .Fatalf ("expected createIndexes events but got none" )
173
+ }
174
+ } else {
175
+ if evt != nil {
176
+ mt .Fatalf ("expected no createIndexes events but got %v" , evt .Command )
177
+ }
178
+ }
179
+ })
180
+ })
181
+ }
182
+ })
183
+
48
184
mt .RunOpts ("round trip" , mtest .NewOptions ().MaxServerVersion ("3.6" ), func (mt * mtest.T ) {
49
185
skipRoundTripTest (mt )
50
186
oneK := 1024
@@ -135,10 +271,10 @@ func skipRoundTripTest(mt *mtest.T) {
135
271
return
136
272
}
137
273
138
- var serverStatus bsonx. Doc
274
+ var serverStatus bson. Raw
139
275
err := mt .DB .RunCommand (
140
276
context .Background (),
141
- bsonx. Doc {{"serverStatus" , bsonx . Int32 ( 1 ) }},
277
+ bson. D {{"serverStatus" , 1 }},
142
278
).Decode (& serverStatus )
143
279
assert .Nil (mt , err , "serverStatus error %v" , err )
144
280
0 commit comments