summaryrefslogtreecommitdiff
path: root/spec/workers/bulk_imports/entity_worker_spec.rb
blob: ce45299c7f7eae985b7d9ac8055e9d541bd48796 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
# frozen_string_literal: true

require 'spec_helper'

RSpec.describe BulkImports::EntityWorker do
  let_it_be(:entity) { create(:bulk_import_entity) }

  let_it_be(:pipeline_tracker) do
    create(
      :bulk_import_tracker,
      entity: entity,
      pipeline_name: 'Stage0::Pipeline',
      stage: 0
    )
  end

  let(:job_args) { entity.id }

  it 'updates pipeline trackers to enqueued state when selected' do
    worker = BulkImports::EntityWorker.new

    next_tracker = worker.send(:next_pipeline_trackers_for, entity.id).first

    next_tracker.reload

    expect(next_tracker.enqueued?).to be_truthy

    expect(worker.send(:next_pipeline_trackers_for, entity.id))
      .not_to include(next_tracker)
  end

  include_examples 'an idempotent worker' do
    it 'enqueues the first stage pipelines work' do
      expect_next_instance_of(Gitlab::Import::Logger) do |logger|
        # the worker runs twice but only executes once
        expect(logger)
          .to receive(:info).twice
          .with(
            worker: described_class.name,
            entity_id: entity.id,
            current_stage: nil
          )
      end

      expect(BulkImports::PipelineWorker)
        .to receive(:perform_async)
        .with(
          pipeline_tracker.id,
          pipeline_tracker.stage,
          entity.id
        )

      subject
    end

    it 'logs and tracks the raised exceptions' do
      exception = StandardError.new('Error!')

      expect(BulkImports::PipelineWorker)
        .to receive(:perform_async)
              .and_raise(exception)

      expect_next_instance_of(Gitlab::Import::Logger) do |logger|
        expect(logger)
          .to receive(:info).twice
          .with(
            worker: described_class.name,
            entity_id: entity.id,
            current_stage: nil
          )

        expect(logger)
          .to receive(:error)
          .with(
            worker: described_class.name,
            entity_id: entity.id,
            current_stage: nil,
            error_message: 'Error!'
          )
      end

      expect(Gitlab::ErrorTracking)
        .to receive(:track_exception)
              .with(exception, entity_id: entity.id)

      subject
    end

    context 'in first stage' do
      let(:job_args) { [entity.id, 0] }

      it 'do not enqueue a new pipeline job if the current stage still running' do
        expect(BulkImports::PipelineWorker)
          .not_to receive(:perform_async)

        subject
      end

      it 'enqueues the next stage pipelines when the current stage is finished' do
        next_stage_pipeline_tracker = create(
          :bulk_import_tracker,
          entity: entity,
          pipeline_name: 'Stage1::Pipeline',
          stage: 1
        )

        pipeline_tracker.fail_op!

        expect_next_instance_of(Gitlab::Import::Logger) do |logger|
          expect(logger)
            .to receive(:info).twice
            .with(
              worker: described_class.name,
              entity_id: entity.id,
              current_stage: 0
            )
        end

        expect(BulkImports::PipelineWorker)
          .to receive(:perform_async)
            .with(
              next_stage_pipeline_tracker.id,
              next_stage_pipeline_tracker.stage,
              entity.id
            )

        subject
      end
    end
  end
end