1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
|
package mongoimport
import (
"fmt"
"github.com/mongodb/mongo-tools/common/bsonutil"
"github.com/mongodb/mongo-tools/common/db"
"github.com/mongodb/mongo-tools/common/log"
"github.com/mongodb/mongo-tools/common/util"
"gopkg.in/mgo.v2/bson"
"gopkg.in/tomb.v2"
"io"
"sort"
"strconv"
"strings"
)
// ConvertibleDoc is an interface implemented by special types which wrap data
// gotten for various input readers - i.e. CSV, JSON, TSV. It exposes one
// function - Convert() - which converts the special type to a bson.D document
type ConvertibleDoc interface {
Convert() (bson.D, error)
}
// ImportWorker is used to process documents concurrently
type ImportWorker struct {
// unprocessedDataChan is used to stream the input data for a worker to process
unprocessedDataChan chan ConvertibleDoc
// used to stream the processed document back to the caller
processedDocumentChan chan bson.D
// used to synchronise all worker goroutines
tomb *tomb.Tomb
}
// constructUpsertDocument constructs a BSON document to use for upserts
func constructUpsertDocument(upsertFields []string, document bson.M) bson.M {
upsertDocument := bson.M{}
var hasDocumentKey bool
for _, key := range upsertFields {
upsertDocument[key] = getUpsertValue(key, document)
if upsertDocument[key] != nil {
hasDocumentKey = true
}
}
if !hasDocumentKey {
return nil
}
return upsertDocument
}
// doSequentialStreaming takes a slice of workers, a readDocChan (input) channel and
// an outputChan (output) channel. It sequentially writes unprocessed data read from
// the input channel to each worker and then sequentially reads the processed data
// from each worker before passing it on to the output channel
func doSequentialStreaming(workers []*ImportWorker, readDocChan chan ConvertibleDoc, outputChan chan bson.D) {
numWorkers := len(workers)
// feed in the data to be processed and do round-robin
// reads from each worker once processing is completed
go func() {
i := 0
for doc := range readDocChan {
workers[i].unprocessedDataChan <- doc
i = (i + 1) % numWorkers
}
// close the read channels of all the workers
for i := 0; i < numWorkers; i++ {
close(workers[i].unprocessedDataChan)
}
}()
// coordinate the order in which the documents are sent over to the
// main output channel
numDoneWorkers := 0
i := 0
for {
processedDocument, open := <-workers[i].processedDocumentChan
if open {
outputChan <- processedDocument
} else {
numDoneWorkers++
}
if numDoneWorkers == numWorkers {
break
}
i = (i + 1) % numWorkers
}
}
// getParsedValue returns the appropriate concrete type for the given token
// it first attempts to convert it to an int, if that doesn't succeed, it
// attempts conversion to a float, if that doesn't succeed, it returns the
// token as is.
func getParsedValue(token string) interface{} {
parsedInt, err := strconv.Atoi(token)
if err == nil {
return parsedInt
}
parsedFloat, err := strconv.ParseFloat(token, 64)
if err == nil {
return parsedFloat
}
return token
}
// getUpsertValue takes a given BSON document and a given field, and returns the
// field's associated value in the document. The field is specified using dot
// notation for nested fields. e.g. "person.age" would return 34 would return
// 34 in the document: bson.M{"person": bson.M{"age": 34}} whereas,
// "person.name" would return nil
func getUpsertValue(field string, document bson.M) interface{} {
index := strings.Index(field, ".")
if index == -1 {
return document[field]
}
left := field[0:index]
if document[left] == nil {
return nil
}
subDoc, ok := document[left].(bson.M)
if !ok {
return nil
}
return getUpsertValue(field[index+1:], subDoc)
}
// filterIngestError accepts a boolean indicating if a non-nil error should be,
// returned as an actual error.
//
// If the error indicates an unreachable server, it returns that immediately.
//
// If the error indicates an invalid write concern was passed, it returns nil
//
// If the error is not nil, it logs the error. If the error is an io.EOF error -
// indicating a lost connection to the server, it sets the error as such.
//
func filterIngestError(stopOnError bool, err error) error {
if err == nil {
return nil
}
if err.Error() == db.ErrNoReachableServers.Error() {
return err
}
if err.Error() == io.EOF.Error() {
err = db.ErrLostConnection
}
log.Logf(log.Always, "error inserting documents: %v", err)
if stopOnError || err == db.ErrLostConnection {
return err
}
return nil
}
// removeBlankFields removes empty/blank fields in csv and tsv
func removeBlankFields(document bson.D) bson.D {
for index, pair := range document {
if _, ok := pair.Value.(string); ok && pair.Value.(string) == "" {
document = append(document[:index], document[index+1:]...)
}
}
return document
}
// setNestedValue takes a nested field - in the form "a.b.c" -
// its associated value, and a document. It then assigns that
// value to the appropriate nested field within the document
func setNestedValue(key string, value interface{}, document *bson.D) {
index := strings.Index(key, ".")
if index == -1 {
*document = append(*document, bson.DocElem{key, value})
return
}
keyName := key[0:index]
subDocument := &bson.D{}
elem, err := bsonutil.FindValueByKey(keyName, document)
if err != nil { // no such key in the document
elem = nil
}
var existingKey bool
if elem != nil {
subDocument = elem.(*bson.D)
existingKey = true
}
setNestedValue(key[index+1:], value, subDocument)
if !existingKey {
*document = append(*document, bson.DocElem{keyName, subDocument})
}
}
// streamDocuments concurrently processes data gotten from the inputChan
// channel in parallel and then sends over the processed data to the outputChan
// channel - either in sequence or concurrently (depending on the value of
// ordered) - in which the data was received
func streamDocuments(ordered bool, numDecoders int, readDocChan chan ConvertibleDoc, outputChan chan bson.D) error {
if numDecoders == 0 {
numDecoders = 1
}
var importWorkers []*ImportWorker
importTomb := &tomb.Tomb{}
inChan := readDocChan
outChan := outputChan
for i := 0; i < numDecoders; i++ {
if ordered {
inChan = make(chan ConvertibleDoc, workerBufferSize)
outChan = make(chan bson.D, workerBufferSize)
}
importWorker := &ImportWorker{
unprocessedDataChan: inChan,
processedDocumentChan: outChan,
tomb: importTomb,
}
importWorkers = append(importWorkers, importWorker)
importTomb.Go(func() error {
return importWorker.processDocuments(ordered)
})
}
// if ordered, we have to coordinate the sequence in which processed
// documents are passed to the main read channel
if ordered {
doSequentialStreaming(importWorkers, readDocChan, outputChan)
}
err := importTomb.Wait()
close(outputChan)
return err
}
// tokensToBSON reads in slice of records - along with ordered fields names -
// and returns a BSON document for the record.
func tokensToBSON(fields, tokens []string, numProcessed uint64) (bson.D, error) {
log.Logf(log.DebugHigh, "got line: %v", tokens)
var parsedValue interface{}
document := bson.D{}
for index, token := range tokens {
parsedValue = getParsedValue(token)
if index < len(fields) {
if strings.Index(fields[index], ".") != -1 {
setNestedValue(fields[index], parsedValue, &document)
} else {
document = append(document, bson.DocElem{fields[index], parsedValue})
}
} else {
key := "field" + strconv.Itoa(index)
if util.StringSliceContains(fields, key) {
return nil, fmt.Errorf("Duplicate header name - on %v - for token #%v ('%v') in document #%v",
key, index+1, parsedValue, numProcessed)
}
document = append(document, bson.DocElem{key, parsedValue})
}
}
return document, nil
}
// validateHeaders takes an InputReader, and does some validation on the
// header fields. It returns an error if an issue is found in the header list
func validateHeaders(inputReader InputReader, hasHeaderLine bool) (validatedFields []string, err error) {
unsortedHeaders := []string{}
if hasHeaderLine {
unsortedHeaders, err = inputReader.ReadHeadersFromSource()
if err != nil {
return nil, err
}
} else {
unsortedHeaders = inputReader.GetHeaders()
}
headers := make([]string, len(unsortedHeaders), len(unsortedHeaders))
copy(headers, unsortedHeaders)
sort.Sort(sort.StringSlice(headers))
for index, header := range headers {
if strings.HasSuffix(header, ".") || strings.HasPrefix(header, ".") {
return nil, fmt.Errorf("field name '%v' can not start or end in '.'", header)
}
if strings.Contains(header, "..") {
return nil, fmt.Errorf("field name '%v' can not contain consecutive '.' characters", header)
}
// NOTE: since headers is sorted, this check ensures that no header
// is incompatible with another one that occurs further down the list.
// meant to prevent cases where we have headers like "a" and "a.c"
for _, latterHeader := range headers[index+1:] {
// NOTE: this means we will not support imports that have fields that
// include e.g. a, a.b
if strings.HasPrefix(latterHeader, header+".") {
return nil, fmt.Errorf("incompatible field names found: '%v' and '%v'",
header, latterHeader)
}
// NOTE: this means we will not support imports that have fields like
// a, a - since this is invalid in MongoDB
if header == latterHeader {
return nil, fmt.Errorf("field names can not be identical: '%v' and '%v'",
header, latterHeader)
}
}
validatedFields = append(validatedFields, unsortedHeaders[index])
}
if len(headers) == 1 {
log.Logf(log.Info, "using field: %v", validatedFields[0])
} else {
log.Logf(log.Info, "using fields: %v", strings.Join(validatedFields, ","))
}
return validatedFields, nil
}
// processDocuments reads from the ConvertibleDoc channel and for each record, converts it
// to a bson.D document before sending it on the processedDocumentChan channel. Once the
// input channel is closed the processed channel is also closed if the worker streams its
// reads in order
func (importWorker *ImportWorker) processDocuments(ordered bool) error {
if ordered {
defer close(importWorker.processedDocumentChan)
}
for {
select {
case convertibleDoc, alive := <-importWorker.unprocessedDataChan:
if !alive {
return nil
}
document, err := convertibleDoc.Convert()
if err != nil {
return err
}
importWorker.processedDocumentChan <- document
case <-importWorker.tomb.Dying():
return nil
}
}
}
|