diff options
Diffstat (limited to 'src/mongo/db/pipeline')
8 files changed, 46 insertions, 20 deletions
diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index bf576960b20..ef90138a584 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -40,6 +40,7 @@ #include "mongo/db/query/explain.h" #include "mongo/db/query/find_common.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/logv2/log.h" #include "mongo/util/fail_point.h" #include "mongo/util/log.h" #include "mongo/util/scopeguard.h" @@ -80,7 +81,8 @@ void DocumentSourceCursor::loadBatch() { } while (MONGO_unlikely(hangBeforeDocumentSourceCursorLoadBatch.shouldFail())) { - log() << "Hanging aggregation due to 'hangBeforeDocumentSourceCursorLoadBatch' failpoint"; + LOGV2(20895, + "Hanging aggregation due to 'hangBeforeDocumentSourceCursorLoadBatch' failpoint"); sleepmillis(10); } diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp index 9e8e37e97cd..8c7b990ba78 100644 --- a/src/mongo/db/pipeline/document_source_exchange.cpp +++ b/src/mongo/db/pipeline/document_source_exchange.cpp @@ -39,6 +39,7 @@ #include "mongo/db/hasher.h" #include "mongo/db/pipeline/document_source_exchange.h" #include "mongo/db/storage/key_string.h" +#include "mongo/logv2/log.h" #include "mongo/util/log.h" namespace mongo { @@ -302,7 +303,8 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx, // There is not any document so try to load more from the source. if (_loadingThreadId == kInvalidThreadId) { - LOG(3) << "A consumer " << consumerId << " begins loading"; + LOGV2_DEBUG( + 20896, 3, "A consumer {consumerId} begins loading", "consumerId"_attr = consumerId); try { // This consumer won the race and will fill the buffers. @@ -316,7 +318,7 @@ DocumentSource::GetNextResult Exchange::getNext(OperationContext* opCtx, size_t fullConsumerId = loadNextBatch(); if (MONGO_unlikely(exchangeFailLoadNextBatch.shouldFail())) { - log() << "exchangeFailLoadNextBatch fail point enabled."; + LOGV2(20897, "exchangeFailLoadNextBatch fail point enabled."); uasserted(ErrorCodes::FailPointEnabled, "Asserting on loading the next batch due to failpoint."); } diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp index 65ab3f8b4a4..539cbc5cef9 100644 --- a/src/mongo/db/pipeline/document_source_exchange_test.cpp +++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp @@ -27,6 +27,8 @@ * it in the license file. */ +#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault + #include "mongo/platform/basic.h" #include "mongo/db/hasher.h" @@ -36,11 +38,13 @@ #include "mongo/db/storage/key_string.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/logv2/log.h" #include "mongo/platform/random.h" #include "mongo/unittest/temp_dir.h" #include "mongo/unittest/unittest.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/log.h" #include "mongo/util/system_clock_source.h" #include "mongo/util/time_support.h" @@ -117,7 +121,7 @@ protected: static auto getNewSeed() { auto seed = Date_t::now().asInt64(); - unittest::log() << "Generated new seed is " << seed; + LOGV2(20898, "Generated new seed is {seed}", "seed"_attr = seed); return seed; } diff --git a/src/mongo/db/pipeline/document_source_match_test.cpp b/src/mongo/db/pipeline/document_source_match_test.cpp index cb4821161fd..9873ba1c55b 100644 --- a/src/mongo/db/pipeline/document_source_match_test.cpp +++ b/src/mongo/db/pipeline/document_source_match_test.cpp @@ -27,6 +27,8 @@ * it in the license file. */ +#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kDefault + #include "mongo/platform/basic.h" #include <string> @@ -40,8 +42,10 @@ #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/logv2/log.h" #include "mongo/unittest/death_test.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/log.h" namespace mongo { namespace { @@ -59,7 +63,7 @@ TEST_F(DocumentSourceMatchTest, RedactSafePortion) { auto match = DocumentSourceMatch::create(fromjson(input), expCtx); ASSERT_BSONOBJ_EQ(match->redactSafePortion(), fromjson(safePortion)); } catch (...) { - unittest::log() << "Problem with redactSafePortion() of: " << input; + LOGV2(20899, "Problem with redactSafePortion() of: {input}", "input"_attr = input); throw; } }; diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp index bc3239eb726..ad68c53598c 100644 --- a/src/mongo/db/pipeline/document_source_merge.cpp +++ b/src/mongo/db/pipeline/document_source_merge.cpp @@ -40,6 +40,7 @@ #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/pipeline/document_path_support.h" +#include "mongo/logv2/log.h" #include "mongo/util/log.h" namespace mongo { @@ -487,8 +488,9 @@ void DocumentSourceMerge::waitWhileFailPointEnabled() { pExpCtx->opCtx, "hangWhileBuildingDocumentSourceMergeBatch", []() { - log() << "Hanging aggregation due to 'hangWhileBuildingDocumentSourceMergeBatch' " - << "failpoint"; + LOGV2( + 20900, + "Hanging aggregation due to 'hangWhileBuildingDocumentSourceMergeBatch' failpoint"); }); } diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp index 67df77f760c..93122dac504 100644 --- a/src/mongo/db/pipeline/document_source_out.cpp +++ b/src/mongo/db/pipeline/document_source_out.cpp @@ -39,6 +39,7 @@ #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/pipeline/document_path_support.h" +#include "mongo/logv2/log.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/fail_point.h" @@ -152,8 +153,8 @@ void DocumentSourceOut::initialize() { pExpCtx->opCtx, "outWaitAfterTempCollectionCreation", []() { - log() << "Hanging aggregation due to 'outWaitAfterTempCollectionCreation' " - << "failpoint"; + LOGV2(20901, + "Hanging aggregation due to 'outWaitAfterTempCollectionCreation' failpoint"); }); if (_originalIndexes.empty()) { return; @@ -245,8 +246,8 @@ void DocumentSourceOut::waitWhileFailPointEnabled() { pExpCtx->opCtx, "hangWhileBuildingDocumentSourceOutBatch", []() { - log() << "Hanging aggregation due to 'hangWhileBuildingDocumentSourceOutBatch' " - << "failpoint"; + LOGV2(20902, + "Hanging aggregation due to 'hangWhileBuildingDocumentSourceOutBatch' failpoint"); }); } diff --git a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp index 09e4b1fc148..64ca6cdf86b 100644 --- a/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_sample_from_random_cursor.cpp @@ -40,6 +40,7 @@ #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/logv2/log.h" #include "mongo/util/log.h" namespace mongo { @@ -121,8 +122,10 @@ DocumentSource::GetNextResult DocumentSourceSampleFromRandomCursor::getNextNonDu if (_seenDocs.insert(std::move(idField)).second) { return nextInput; } - LOG(1) << "$sample encountered duplicate document: " - << nextInput.getDocument().toString(); + LOGV2_DEBUG(20903, + 1, + "$sample encountered duplicate document: {nextInput_getDocument}", + "nextInput_getDocument"_attr = nextInput.getDocument().toString()); break; // Try again with the next document. } case GetNextResult::ReturnStatus::kPauseExecution: { diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp index d6d3569e5a8..e288214f60d 100644 --- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp +++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp @@ -49,6 +49,7 @@ #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/semantic_analysis.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/logv2/log.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/query/cluster_query_knobs_gen.h" @@ -171,7 +172,10 @@ std::vector<RemoteCursor> establishShardCursors( const std::set<ShardId>& shardIds, const BSONObj& cmdObj, const ReadPreferenceSetting& readPref) { - LOG(1) << "Dispatching command " << redact(cmdObj) << " to establish cursors on shards"; + LOGV2_DEBUG(20904, + 1, + "Dispatching command {cmdObj} to establish cursors on shards", + "cmdObj"_attr = redact(cmdObj)); const bool mustRunOnAll = mustRunOnAllShards(nss, hasChangeStream); std::vector<std::pair<ShardId, BSONObj>> requests; @@ -204,8 +208,9 @@ std::vector<RemoteCursor> establishShardCursors( } if (MONGO_unlikely(shardedAggregateHangBeforeEstablishingShardCursors.shouldFail())) { - log() << "shardedAggregateHangBeforeEstablishingShardCursors fail point enabled. Blocking " - "until fail point is disabled."; + LOGV2(20905, + "shardedAggregateHangBeforeEstablishingShardCursors fail point enabled. Blocking " + "until fail point is disabled."); while (MONGO_unlikely(shardedAggregateHangBeforeEstablishingShardCursors.shouldFail())) { sleepsecs(1); } @@ -754,10 +759,13 @@ DispatchShardPipelineResults dispatchShardPipeline( boost::optional<SplitPipeline> splitPipelines; if (needsSplit) { - LOG(5) << "Splitting pipeline: " - << "targeting = " << shardIds.size() - << " shards, needsMongosMerge = " << needsMongosMerge - << ", needsPrimaryShardMerge = " << needsPrimaryShardMerge; + LOGV2_DEBUG(20906, + 5, + "Splitting pipeline: targeting = {shardIds_size} shards, needsMongosMerge = " + "{needsMongosMerge}, needsPrimaryShardMerge = {needsPrimaryShardMerge}", + "shardIds_size"_attr = shardIds.size(), + "needsMongosMerge"_attr = needsMongosMerge, + "needsPrimaryShardMerge"_attr = needsPrimaryShardMerge); splitPipelines = splitPipeline(std::move(pipeline)); exchangeSpec = checkIfEligibleForExchange(opCtx, splitPipelines->mergePipeline.get()); |