summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_workloads/rename_sharded_collection.js
blob: 7b919761b83bc9bcde37a201202aa03c000541b7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
'use strict';

/**
 * Perform continuous renames on 3 collections per database, with the objective to verify that:
 * - Upon successful renames, no data are lost
 * - Upon unsuccessful renames, no unexpected exception is thrown. Admitted errors:
 * ---- NamespaceNotFound (tried to rename a random non-existing collection)
 * ---- ConflictingOperationInProgress (tried to perform concurrent renames on the same source
 *      collection with different target collections)
 * - The aforementioned acceptable exceptions must be thrown at least once, given the high level of
 * concurrency
 *
 * @tags: [
 *   requires_sharding,
 *   # TODO (SERVER-56879): Support add/remove shards in new DDL paths
 *   does_not_support_add_remove_shards,
 *   # This test just performs rename operations that can't be executed in transactions
 *   does_not_support_transactions,
 *   # Can be removed once PM-1965-Milestone-1 is completed.
 *
 *   # TODO SERVER-73385 reenable when fixed.
 *   assumes_balancer_off,
 *  ]
 */

load('jstests/concurrency/fsm_workload_helpers/balancer.js');

const numChunks = 20;
const documentsPerChunk = 5;
const dbNames = ['db0', 'db1'];
const collNames =
    ['rename_sharded_collectionA', 'rename_sharded_collectionB', 'rename_sharded_collectionC'];

/*
 * Initialize a collection with expected number of chunks/documents and randomly distribute chunks
 */
function initAndFillShardedCollection(db, collName, shardNames) {
    const coll = db[collName];
    const ns = coll.getFullName();
    db.adminCommand({shardCollection: ns, key: {x: 1}});

    // Disallow balancing 'ns' during $setup so it does not interfere with the splits.
    BalancerHelper.disableBalancerForCollection(db, ns);
    BalancerHelper.joinBalancerRound(db);

    var nextShardKeyValue = 0;
    for (var i = 0; i < numChunks; i++) {
        for (var j = 0; j < documentsPerChunk; j++) {
            coll.insert({x: nextShardKeyValue++});
        }

        assert.commandWorked(db.adminCommand({split: ns, middle: {x: nextShardKeyValue}}));

        const lastInsertedShardKeyValue = nextShardKeyValue - 1;

        // When balancer is enabled, move chunks could overlap and fail with
        // ConflictingOperationInProgress
        const res = db.adminCommand({
            moveChunk: ns,
            find: {x: lastInsertedShardKeyValue},
            to: shardNames[Random.randInt(shardNames.length)],
        });
        assert.commandWorkedOrFailedWithCode(res, ErrorCodes.ConflictingOperationInProgress);
    }

    // Allow balancing 'ns' again.
    BalancerHelper.enableBalancerForCollection(db, ns);
}

/*
 * Get a random db/coll name from the test lists.
 *
 * Using the thread id to introduce more randomness: it has been observed that concurrent calls to
 * Random.randInt(array.length) are returning too often the same number to different threads.
 */
function getRandomDbName(tid) {
    return dbNames[Random.randInt(tid * tid) % dbNames.length];
}
function getRandomCollName(tid) {
    return collNames[Random.randInt(tid * tid) % collNames.length];
}

/*
 * Keep track of raised exceptions in a collection to be checked during teardown.
 */
const expectedExceptions =
    [ErrorCodes.NamespaceNotFound, ErrorCodes.ConflictingOperationInProgress];
const logExceptionsDBName = 'exceptions';
const logExceptionsCollName = 'log';

function logException(db, exceptionCode) {
    db = db.getSiblingDB(logExceptionsDBName);
    const coll = db[logExceptionsCollName];
    assert.commandWorked(coll.insert({code: exceptionCode}));
}

function checkExceptionHasBeenThrown(db, exceptionCode) {
    db = db.getSiblingDB(logExceptionsDBName);
    const coll = db[logExceptionsCollName];
    const count = coll.countDocuments({code: exceptionCode});
    assert.gte(count, 1, 'No exception with error code ' + exceptionCode + ' has been thrown');
}

var $config = (function() {
    let states = {
        rename: function(db, collName, connCache) {
            const dbName = getRandomDbName(this.threadCount);
            db = db.getSiblingDB(dbName);
            collName = getRandomCollName(this.threadCount);
            var srcColl = db[collName];
            const destCollName = getRandomCollName(this.threadCount);
            try {
                assertAlways.commandWorked(srcColl.renameCollection(destCollName));
            } catch (e) {
                const exceptionCode = e.code;
                if (exceptionCode == ErrorCodes.IllegalOperation) {
                    assert.eq(
                        collName,
                        destCollName,
                        "The FSM thread can fail with IllegalOperation just if a rename collection is happening on the same collection.");
                    return;
                }
                if (exceptionCode) {
                    logException(db, exceptionCode);
                    if (expectedExceptions.includes(exceptionCode)) {
                        return;
                    }
                }
                throw e;
            }
        }
    };

    let setup = function(db, collName, cluster) {
        const shardNames = Object.keys(cluster.getSerializedCluster().shards);
        const numShards = shardNames.length;

        // Initialize databases
        for (var i = 0; i < dbNames.length; i++) {
            const dbName = dbNames[i];
            const newDb = db.getSiblingDB(dbName);
            newDb.adminCommand({enablesharding: dbName, primaryShard: shardNames[i % numShards]});
            // Initialize one sharded collection per db
            initAndFillShardedCollection(
                newDb, collNames[Random.randInt(collNames.length)], shardNames);
        }
    };

    let teardown = function(db, collName, cluster) {
        // Ensure that NamespaceNotFound and ConflictingOperationInProgress have been raised at
        // least once: with a high level of concurrency, it's too improbable for such exceptions to
        // never be thrown (in that case, it's very likely that a bug has been introduced).
        expectedExceptions.forEach(errCode => checkExceptionHasBeenThrown(db, errCode));

        // Check that at most one collection per test DB is present and that no data has been lost
        // upon multiple renames.
        for (var i = 0; i < dbNames.length; i++) {
            const dbName = dbNames[i];
            db = db.getSiblingDB(dbName);
            const listColl = db.getCollectionNames();
            assert.eq(1, listColl.length);
            collName = listColl[0];
            const numDocs = db[collName].countDocuments({});
            assert.eq(numChunks * documentsPerChunk, numDocs, 'Unexpected number of chunks');
        }
    };

    let transitions = {rename: {rename: 1.0}};

    return {
        threadCount: 12,
        iterations: 64,
        startState: 'rename',
        states: states,
        transitions: transitions,
        data: {},
        setup: setup,
        teardown: teardown,
        passConnectionCache: true
    };
})();