summaryrefslogtreecommitdiff
path: root/src/mongo/s/chunk.h
blob: a9b1422ada691c2f60698c28b65e27a355430682 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
// @file chunk.h

/**
*    Copyright (C) 2008 10gen Inc.
*
*    This program is free software: you can redistribute it and/or  modify
*    it under the terms of the GNU Affero General Public License, version 3,
*    as published by the Free Software Foundation.
*
*    This program is distributed in the hope that it will be useful,
*    but WITHOUT ANY WARRANTY; without even the implied warranty of
*    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
*    GNU Affero General Public License for more details.
*
*    You should have received a copy of the GNU Affero General Public License
*    along with this program.  If not, see <http://www.gnu.org/licenses/>.
*
*    As a special exception, the copyright holders give permission to link the
*    code of portions of this program with the OpenSSL library under certain
*    conditions as described in each individual source file and distribute
*    linked combinations including the program with the OpenSSL library. You
*    must comply with the GNU Affero General Public License in all respects
*    for all of the code used other than as permitted herein. If you modify
*    file(s) with this exception, you may extend this exception to your
*    version of the file(s), but you are not obligated to do so. If you do not
*    wish to do so, delete this exception statement from your version. If you
*    delete this exception statement from all source files in the program,
*    then also delete it in the license file.
*/

#pragma once

#include "mongo/base/string_data.h"
#include "mongo/bson/util/atomic_int.h"
#include "mongo/s/chunk_version.h"
#include "mongo/s/distlock.h"
#include "mongo/s/shard.h"
#include "mongo/s/shardkey.h"
#include "mongo/util/concurrency/ticketholder.h"
#include "mongo/db/query/query_solution.h"

namespace mongo {

    class DBConfig;
    class Chunk;
    class ChunkRange;
    class ChunkManager;
    class ChunkObjUnitTest;

    typedef shared_ptr<const Chunk> ChunkPtr;

    // key is max for each Chunk or ChunkRange
    typedef map<BSONObj,ChunkPtr,BSONObjCmp> ChunkMap;
    typedef map<BSONObj,shared_ptr<ChunkRange>,BSONObjCmp> ChunkRangeMap;

    typedef shared_ptr<const ChunkManager> ChunkManagerPtr;

    /**
       config.chunks
       { ns : "alleyinsider.fs.chunks" , min : {} , max : {} , server : "localhost:30001" }

       x is in a shard iff
       min <= x < max
     */
    class Chunk : boost::noncopyable {
    public:
        Chunk( const ChunkManager * info , BSONObj from);
        Chunk( const ChunkManager * info ,
               const BSONObj& min,
               const BSONObj& max,
               const Shard& shard,
               ChunkVersion lastmod = ChunkVersion() );

        //
        // serialization support
        //

        void serialize(BSONObjBuilder& to, ChunkVersion myLastMod=ChunkVersion(0,OID()));

        //
        // chunk boundary support
        //

        const BSONObj& getMin() const { return _min; }
        const BSONObj& getMax() const { return _max; }

        // if min/max key is pos/neg infinity
        bool minIsInf() const;
        bool maxIsInf() const;

        // Returns true if this chunk contains the given point, and false otherwise
        //
        // Note: this function takes an extracted *key*, not an original document
        // (the point may be computed by, say, hashing a given field or projecting
        //  to a subset of fields).
        bool containsPoint( const BSONObj& point ) const;

        string genID() const;
        static string genID( const string& ns , const BSONObj& min );

        //
        // chunk version support
        //

        void appendShortVersion( const char * name , BSONObjBuilder& b ) const;

        ChunkVersion getLastmod() const { return _lastmod; }
        void setLastmod( ChunkVersion v ) { _lastmod = v; }

        //
        // split support
        //

        long getBytesWritten() const { return _dataWritten; }
        // Const since _dataWritten is mutable and a heuristic
        // TODO: Split data tracking and chunk information
        void setBytesWritten( long bytesWritten ) const { _dataWritten = bytesWritten; }

        /**
         * if the amount of data written nears the max size of a shard
         * then we check the real size, and if its too big, we split
         * @return if something was split
         */
        bool splitIfShould( long dataWritten ) const;

        /**
         * Splits this chunk at a non-specificed split key to be chosen by the mongod holding this chunk.
         *
         * @param atMedian if set to true, will split the chunk at the middle regardless if
         *      the split is really necessary size wise. If set to false, will only split if
         *      the chunk has reached the currently desired maximum size. Setting to false also
         *      has the effect of splitting the chunk such that the resulting chunks will never
         *      be greater than the current chunk size setting.
         * @param res the object containing details about the split execution
         * @param resultingSplits the number of resulting split points. Set to NULL to ignore.
         *
         * @throws UserException
         */
        Status split( bool atMedian, size_t* resultingSplits ) const;

        /**
         * Splits this chunk at the given key (or keys)
         *
         * @param splitPoints the vector of keys that should be used to divide this chunk
         * @param res the object containing details about the split execution
         *
         * @throws UserException
         */
        Status multiSplit( const vector<BSONObj>& splitPoints ) const;

        /**
         * Asks the mongod holding this chunk to find a key that approximately divides this chunk in two
         *
         * @param medianKey the key that divides this chunk, if there is one, or empty
         */
        void pickMedianKey( BSONObj& medianKey ) const;

        /**
         * @param splitPoints vector to be filled in
         * @param chunkSize chunk size to target in bytes
         * @param maxPoints limits the number of split points that are needed, zero is max (optional)
         * @param maxObjs limits the number of objects in each chunk, zero is as max (optional)
         */
        void pickSplitVector( vector<BSONObj>& splitPoints , int chunkSize , int maxPoints = 0, int maxObjs = 0) const;

        //
        // migration support
        //

        /**
         * Issues a migrate request for this chunk
         *
         * @param to shard to move this chunk to
         * @param chunSize maximum number of bytes beyond which the migrate should no go trhough
         * @param secondaryThrottle whether during migrate all writes should block for repl
         * @param waitForDelete whether chunk move should wait for cleanup or return immediately
         * @param maxTimeMS max time for the migrate request
         * @param res the object containing details about the migrate execution
         * @return true if move was successful
         */
        bool moveAndCommit(const Shard& to,
                           long long chunkSize,
                           bool secondaryThrottle,
                           bool waitForDelete,
                           int maxTimeMS,
                           BSONObj& res) const;

        /**
         * @return size of shard in bytes
         *  talks to mongod to do this
         */
        long getPhysicalSize() const;

        /**
         * marks this chunk as a jumbo chunk
         * that means the chunk will be inelligble for migrates
         */
        void markAsJumbo() const;

        bool isJumbo() const { return _jumbo; }

        /**
         * Attempt to refresh maximum chunk size from config.
         */
        static void refreshChunkSize();

        /**
         * sets MaxChunkSize
         * 1 <= newMaxChunkSize <= 1024
         * @return true if newMaxChunkSize is valid and was set
         */
        static bool setMaxChunkSizeSizeMB( int newMaxChunkSize );

        //
        // public constants
        //

        static int MaxChunkSize;
        static int MaxObjectPerChunk;
        static bool ShouldAutoSplit;

        //
        // accessors and helpers
        //

        string toString() const;

        friend ostream& operator << (ostream& out, const Chunk& c) { return (out << c.toString()); }

        // chunk equality is determined by comparing the min and max bounds of the chunk
        bool operator==(const Chunk& s) const;
        bool operator!=(const Chunk& s) const { return ! ( *this == s ); }

        string getns() const;
        Shard getShard() const { return _shard; }
        const ChunkManager* getManager() const { return _manager; }
        

    private:

        // main shard info
        
        const ChunkManager * _manager;

        BSONObj _min;
        BSONObj _max;
        Shard _shard;
        ChunkVersion _lastmod;
        mutable bool _jumbo;

        // transient stuff

        mutable long _dataWritten;

        // methods, etc..

        /** Returns the highest or lowest existing value in the shard-key space.
         *  Warning: this assumes that the shard key is not "special"- that is, the shardKeyPattern
         *           is simply an ordered list of ascending/descending field names. Examples:
         *           {a : 1, b : -1} is not special. {a : "hashed"} is.
         *
         * if sort 1, return lowest key
         * if sort -1, return highest key
         * will return empty object if have none
         */
        BSONObj _getExtremeKey( int sort ) const;

        /**
         * Determines the appropriate split points for this chunk.
         *
         * @param atMedian perform a single split at the middle of this chunk.
         * @param splitPoints out parameter containing the chosen split points. Can be empty.
         */
        void determineSplitPoints(bool atMedian, std::vector<BSONObj>* splitPoints) const;

        /** initializes _dataWritten with a random value so that a mongos restart wouldn't cause delay in splitting */
        static int mkDataWritten();

        ShardKeyPattern skey() const;
    };

    class ChunkRange {
    public:
        const ChunkManager* getManager() const { return _manager; }
        Shard getShard() const { return _shard; }

        const BSONObj& getMin() const { return _min; }
        const BSONObj& getMax() const { return _max; }

        // clones of Chunk methods
        // Returns true if this ChunkRange contains the given point, and false otherwise
        //
        // Note: this function takes an extracted *key*, not an original document
        // (the point may be computed by, say, hashing a given field or projecting
        //  to a subset of fields).
        bool containsPoint( const BSONObj& point ) const;

        ChunkRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end)
            : _manager(begin->second->getManager())
            , _shard(begin->second->getShard())
            , _min(begin->second->getMin())
            , _max(boost::prior(end)->second->getMax()) {
            verify( begin != end );

            DEV while (begin != end) {
                verify(begin->second->getManager() == _manager);
                verify(begin->second->getShard() == _shard);
                ++begin;
            }
        }

        // Merge min and max (must be adjacent ranges)
        ChunkRange(const ChunkRange& min, const ChunkRange& max)
            : _manager(min.getManager())
            , _shard(min.getShard())
            , _min(min.getMin())
            , _max(max.getMax()) {
            verify(min.getShard() == max.getShard());
            verify(min.getManager() == max.getManager());
            verify(min.getMax() == max.getMin());
        }

        friend ostream& operator<<(ostream& out, const ChunkRange& cr) {
            return (out << "ChunkRange(min=" << cr._min << ", max=" << cr._max << ", shard=" << cr._shard <<")");
        }

    private:
        const ChunkManager* _manager;
        const Shard _shard;
        const BSONObj _min;
        const BSONObj _max;
    };


    class ChunkRangeManager {
    public:
        const ChunkRangeMap& ranges() const { return _ranges; }

        void clear() { _ranges.clear(); }

        void reloadAll(const ChunkMap& chunks);

        // Slow operation -- wrap with DEV
        void assertValid() const;

        ChunkRangeMap::const_iterator upper_bound(const BSONObj& o) const { return _ranges.upper_bound(o); }
        ChunkRangeMap::const_iterator lower_bound(const BSONObj& o) const { return _ranges.lower_bound(o); }

    private:
        // assumes nothing in this range exists in _ranges
        void _insertRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end);

        ChunkRangeMap _ranges;
    };

    /* config.sharding
         { ns: 'alleyinsider.fs.chunks' ,
           key: { ts : 1 } ,
           shards: [ { min: 1, max: 100, server: a } , { min: 101, max: 200 , server : b } ]
         }
    */
    class ChunkManager {
    public:
        typedef map<Shard,ChunkVersion> ShardVersionMap;

        // Loads a new chunk manager from a collection document
        ChunkManager( const BSONObj& collDoc );

        // Creates an empty chunk manager for the namespace
        ChunkManager( const string& ns, const ShardKeyPattern& pattern, bool unique );

        // Updates a chunk manager based on an older manager
        ChunkManager( ChunkManagerPtr oldManager );

        string getns() const { return _ns; }

        const ShardKeyPattern& getShardKey() const {  return _key; }

        bool hasShardKey( const BSONObj& obj ) const;

        bool isUnique() const { return _unique; }

        /**
         * this is just an increasing number of how many ChunkManagers we have so we know if something has been updated
         */
        unsigned long long getSequenceNumber() const { return _sequenceNumber; }

        //
        // After constructor is invoked, we need to call loadExistingRanges.  If this is a new
        // sharded collection, we can call createFirstChunks first.
        //

        // Creates new chunks based on info in chunk manager
        void createFirstChunks( const string& config,
                                const Shard& primary,
                                const vector<BSONObj>* initPoints,
                                const vector<Shard>* initShards );

        // Loads existing ranges based on info in chunk manager
        void loadExistingRanges( const string& config );


        // Helpers for load
        void calcInitSplitsAndShards( const Shard& primary,
                                      const vector<BSONObj>* initPoints,
                                      const vector<Shard>* initShards,
                                      vector<BSONObj>* splitPoints,
                                      vector<Shard>* shards ) const;

        //
        // Methods to use once loaded / created
        //

        int numChunks() const { return _chunkMap.size(); }

        /** Given a document, returns the chunk which contains that document.
         *  This works by extracting the shard key part of the given document, then
         *  calling findIntersectingChunk() on the extracted key.
         *
         *  See also the description for findIntersectingChunk().
         */
        ChunkPtr findChunkForDoc( const BSONObj& doc ) const;

        /** Given a key that has been extracted from a document, returns the
         *  chunk that contains that key.
         *
         *  For instance, to locate the chunk for document {a : "foo" , b : "bar"}
         *  when the shard key is {a : "hashed"}, you can call
         *      findChunkForDoc() on {a : "foo" , b : "bar"}, or
         *      findIntersectingChunk() on {a : hash("foo") }
         */
        ChunkPtr findIntersectingChunk( const BSONObj& point ) const;


        ChunkPtr findChunkOnServer( const Shard& shard ) const;

        void getShardsForQuery( set<Shard>& shards , const BSONObj& query ) const;
        void getAllShards( set<Shard>& all ) const;
        /** @param shards set to the shards covered by the interval [min, max], see SERVER-4791 */
        void getShardsForRange( set<Shard>& shards, const BSONObj& min, const BSONObj& max ) const;

        // Transforms query into bounds for each field in the shard key
        // for example :
        //   Key { a: 1, b: 1 },
        //   Query { a : { $gte : 1, $lt : 2 },
        //            b : { $gte : 3, $lt : 4 } }
        //   => Bounds { a : [1, 2), b : [3, 4) }
        static IndexBounds getIndexBoundsForQuery(const BSONObj& key, const CanonicalQuery* canonicalQuery);

        // Collapse query solution tree.
        //
        // If it has OR node, the result could be a superset of the index bounds generated.
        // Since to give a single IndexBounds, this gives the union of bounds on each field.
        // for example:
        //   OR: { a: (0, 1), b: (0, 1) },
        //       { a: (2, 3), b: (2, 3) }
        //   =>  { a: (0, 1), (2, 3), b: (0, 1), (2, 3) }
        static IndexBounds collapseQuerySolution( const QuerySolutionNode* node );

        ChunkMap getChunkMap() const { return _chunkMap; }

        /**
         * Returns true if, for this shard, the chunks are identical in both chunk managers
         */
        bool compatibleWith( const ChunkManager& other, const Shard& shard ) const;
        bool compatibleWith( ChunkManagerPtr other, const Shard& shard ) const { if( ! other ) return false; return compatibleWith( *other, shard ); }

        bool compatibleWith( const Chunk& other ) const;
        bool compatibleWith( ChunkPtr other ) const { if( ! other ) return false; return compatibleWith( *other ); }

        string toString() const;

        ChunkVersion getVersion( const StringData& shardName ) const;
        ChunkVersion getVersion( const Shard& shard ) const;
        ChunkVersion getVersion() const;

        void getInfo( BSONObjBuilder& b ) const;

        /**
         * @param me - so i don't get deleted before i'm done
         */
        void drop( ChunkManagerPtr me ) const;

        void _printChunks() const;

        int getCurrentDesiredChunkSize() const;

        ChunkManagerPtr reload(bool force=true) const; // doesn't modify self!

        void markMinorForReload( ChunkVersion majorVersion ) const;
        void getMarkedMinorVersions( set<ChunkVersion>& minorVersions ) const;

    private:

        // helpers for loading

        // returns true if load was consistent
        bool _load( const string& config, ChunkMap& chunks, set<Shard>& shards,
                                    ShardVersionMap& shardVersions, ChunkManagerPtr oldManager);
        static bool _isValid(const ChunkMap& chunks);

        // end helpers

        // All members should be const for thread-safety
        const string _ns;
        const ShardKeyPattern _key;
        const bool _unique;

        const ChunkMap _chunkMap;
        const ChunkRangeManager _chunkRanges;

        const set<Shard> _shards;

        const ShardVersionMap _shardVersions; // max version per shard

        // max version of any chunk
        ChunkVersion _version;

        // the previous manager this was based on
        // cleared after loading chunks
        ChunkManagerPtr _oldManager;

        mutable mutex _mutex; // only used with _nsLock

        const unsigned long long _sequenceNumber;

        //
        // Split Heuristic info
        //


        class SplitHeuristics {
        public:

            SplitHeuristics() :
                _splitTickets( maxParallelSplits ),
                _staleMinorSetMutex( "SplitHeuristics::staleMinorSet" ),
                _staleMinorCount( 0 ) {}

            void markMinorForReload( const string& ns, ChunkVersion majorVersion );
            void getMarkedMinorVersions( set<ChunkVersion>& minorVersions );

            TicketHolder _splitTickets;

            mutex _staleMinorSetMutex;

            // mutex protects below
            int _staleMinorCount;
            set<ChunkVersion> _staleMinorSet;

            // Test whether we should split once data * splitTestFactor > chunkSize (approximately)
            static const int splitTestFactor = 5;
            // Maximum number of parallel threads requesting a split
            static const int maxParallelSplits = 5;

            // The idea here is that we're over-aggressive on split testing by a factor of
            // splitTestFactor, so we can safely wait until we get to splitTestFactor invalid splits
            // before changing.  Unfortunately, we also potentially over-request the splits by a
            // factor of maxParallelSplits, but since the factors are identical it works out
            // (for now) for parallel or sequential oversplitting.
            // TODO: Make splitting a separate thread with notifications?
            static const int staleMinorReloadThreshold = maxParallelSplits;

        };

        mutable SplitHeuristics _splitHeuristics;

        //
        // End split heuristics
        //

        friend class Chunk;
        friend class ChunkRangeManager; // only needed for CRM::assertValid()
        static AtomicUInt NextSequenceNumber;
        
        /** Just for testing */
        friend class TestableChunkManager;
        ChunkManager();
    };

    // like BSONObjCmp. for use as an STL comparison functor
    // key-order in "order" argument must match key-order in shardkey
    class ChunkCmp {
    public:
        ChunkCmp( const BSONObj &order = BSONObj() ) : _cmp( order ) {}
        bool operator()( const Chunk &l, const Chunk &r ) const {
            return _cmp(l.getMin(), r.getMin());
        }
        bool operator()( const ptr<Chunk> l, const ptr<Chunk> r ) const {
            return operator()(*l, *r);
        }

        // Also support ChunkRanges
        bool operator()( const ChunkRange &l, const ChunkRange &r ) const {
            return _cmp(l.getMin(), r.getMin());
        }
        bool operator()( const shared_ptr<ChunkRange> l, const shared_ptr<ChunkRange> r ) const {
            return operator()(*l, *r);
        }
    private:
        BSONObjCmp _cmp;
    };

    /*
    struct chunk_lock {
        chunk_lock( const Chunk* c ){

        }

        Chunk _c;
    };
    */
    inline string Chunk::genID() const { return genID(_manager->getns(), _min); }

    bool setShardVersion( DBClientBase & conn,
                          const string& ns,
                          ChunkVersion version,
                          ChunkManagerPtr manager,
                          bool authoritative,
                          BSONObj& result );

} // namespace mongo