summaryrefslogtreecommitdiff
path: root/spec/support/helpers
diff options
context:
space:
mode:
Diffstat (limited to 'spec/support/helpers')
-rw-r--r--spec/support/helpers/database/database_helpers.rb15
-rw-r--r--spec/support/helpers/database/partitioning_helpers.rb96
-rw-r--r--spec/support/helpers/database/table_schema_helpers.rb149
-rw-r--r--spec/support/helpers/database/trigger_helpers.rb68
-rw-r--r--spec/support/helpers/database_helpers.rb13
-rw-r--r--spec/support/helpers/graphql_helpers.rb42
-rw-r--r--spec/support/helpers/multipart_helpers.rb20
-rw-r--r--spec/support/helpers/partitioning_helpers.rb94
-rw-r--r--spec/support/helpers/rack_attack_spec_helpers.rb21
-rw-r--r--spec/support/helpers/stub_experiments.rb8
-rw-r--r--spec/support/helpers/stub_feature_flags.rb4
-rw-r--r--spec/support/helpers/table_schema_helpers.rb112
-rw-r--r--spec/support/helpers/test_env.rb9
-rw-r--r--spec/support/helpers/trigger_helpers.rb66
14 files changed, 409 insertions, 308 deletions
diff --git a/spec/support/helpers/database/database_helpers.rb b/spec/support/helpers/database/database_helpers.rb
new file mode 100644
index 00000000000..b8d7ea3662f
--- /dev/null
+++ b/spec/support/helpers/database/database_helpers.rb
@@ -0,0 +1,15 @@
+# frozen_string_literal: true
+
+module Database
+ module DatabaseHelpers
+ # In order to directly work with views using factories,
+ # we can swapout the view for a table of identical structure.
+ def swapout_view_for_table(view)
+ ActiveRecord::Base.connection.execute(<<~SQL)
+ CREATE TABLE #{view}_copy (LIKE #{view});
+ DROP VIEW #{view};
+ ALTER TABLE #{view}_copy RENAME TO #{view};
+ SQL
+ end
+ end
+end
diff --git a/spec/support/helpers/database/partitioning_helpers.rb b/spec/support/helpers/database/partitioning_helpers.rb
new file mode 100644
index 00000000000..80b31fe0603
--- /dev/null
+++ b/spec/support/helpers/database/partitioning_helpers.rb
@@ -0,0 +1,96 @@
+# frozen_string_literal: true
+
+module Database
+ module PartitioningHelpers
+ def expect_table_partitioned_by(table, columns, part_type: :range)
+ columns_with_part_type = columns.map { |c| [part_type.to_s, c] }
+ actual_columns = find_partitioned_columns(table)
+
+ expect(columns_with_part_type).to match_array(actual_columns)
+ end
+
+ def expect_range_partition_of(partition_name, table_name, min_value, max_value)
+ definition = find_partition_definition(partition_name, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
+
+ expect(definition).not_to be_nil
+ expect(definition['base_table']).to eq(table_name.to_s)
+ expect(definition['condition']).to eq("FOR VALUES FROM (#{min_value}) TO (#{max_value})")
+ end
+
+ def expect_total_partitions(table_name, count, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
+ partitions = find_partitions(table_name, schema: schema)
+
+ expect(partitions.size).to eq(count)
+ end
+
+ def expect_range_partitions_for(table_name, partitions)
+ partitions.each do |suffix, (min_value, max_value)|
+ partition_name = "#{table_name}_#{suffix}"
+ expect_range_partition_of(partition_name, table_name, min_value, max_value)
+ end
+
+ expect_total_partitions(table_name, partitions.size, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
+ end
+
+ def expect_hash_partition_of(partition_name, table_name, modulus, remainder)
+ definition = find_partition_definition(partition_name, schema: Gitlab::Database::STATIC_PARTITIONS_SCHEMA)
+
+ expect(definition).not_to be_nil
+ expect(definition['base_table']).to eq(table_name.to_s)
+ expect(definition['condition']).to eq("FOR VALUES WITH (modulus #{modulus}, remainder #{remainder})")
+ end
+
+ private
+
+ def find_partitioned_columns(table)
+ connection.select_rows(<<~SQL)
+ select
+ case partstrat
+ when 'l' then 'list'
+ when 'r' then 'range'
+ when 'h' then 'hash'
+ end as partstrat,
+ cols.column_name
+ from (
+ select partrelid, partstrat, unnest(partattrs) as col_pos
+ from pg_partitioned_table
+ ) pg_part
+ inner join pg_class
+ on pg_part.partrelid = pg_class.oid
+ inner join information_schema.columns cols
+ on cols.table_name = pg_class.relname
+ and cols.ordinal_position = pg_part.col_pos
+ where pg_class.relname = '#{table}';
+ SQL
+ end
+
+ def find_partition_definition(partition, schema: )
+ connection.select_one(<<~SQL)
+ select
+ parent_class.relname as base_table,
+ pg_get_expr(pg_class.relpartbound, inhrelid) as condition
+ from pg_class
+ inner join pg_inherits i on pg_class.oid = inhrelid
+ inner join pg_class parent_class on parent_class.oid = inhparent
+ inner join pg_namespace ON pg_namespace.oid = pg_class.relnamespace
+ where pg_namespace.nspname = '#{schema}'
+ and pg_class.relname = '#{partition}'
+ and pg_class.relispartition
+ SQL
+ end
+
+ def find_partitions(partition, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
+ connection.select_rows(<<~SQL)
+ select
+ pg_class.relname
+ from pg_class
+ inner join pg_inherits i on pg_class.oid = inhrelid
+ inner join pg_class parent_class on parent_class.oid = inhparent
+ inner join pg_namespace ON pg_namespace.oid = pg_class.relnamespace
+ where pg_namespace.nspname = '#{schema}'
+ and parent_class.relname = '#{partition}'
+ and pg_class.relispartition
+ SQL
+ end
+ end
+end
diff --git a/spec/support/helpers/database/table_schema_helpers.rb b/spec/support/helpers/database/table_schema_helpers.rb
new file mode 100644
index 00000000000..48d33442110
--- /dev/null
+++ b/spec/support/helpers/database/table_schema_helpers.rb
@@ -0,0 +1,149 @@
+# frozen_string_literal: true
+
+module Database
+ module TableSchemaHelpers
+ def connection
+ ActiveRecord::Base.connection
+ end
+
+ def expect_table_to_be_replaced(original_table:, replacement_table:, archived_table:)
+ original_oid = table_oid(original_table)
+ replacement_oid = table_oid(replacement_table)
+
+ yield
+
+ expect(table_oid(original_table)).to eq(replacement_oid)
+ expect(table_oid(archived_table)).to eq(original_oid)
+ expect(table_oid(replacement_table)).to be_nil
+ end
+
+ def expect_table_columns_to_match(expected_column_attributes, table_name)
+ expect(connection.table_exists?(table_name)).to eq(true)
+
+ actual_columns = connection.columns(table_name)
+ expect(actual_columns.size).to eq(column_attributes.size)
+
+ column_attributes.each_with_index do |attributes, i|
+ actual_column = actual_columns[i]
+
+ attributes.each do |name, value|
+ actual_value = actual_column.public_send(name)
+ message = "expected #{actual_column.name}.#{name} to be #{value}, but got #{actual_value}"
+
+ expect(actual_value).to eq(value), message
+ end
+ end
+ end
+
+ def expect_index_to_exist(name, schema: nil)
+ expect(index_exists_by_name(name, schema: schema)).to eq(true)
+ end
+
+ def expect_index_not_to_exist(name, schema: nil)
+ expect(index_exists_by_name(name, schema: schema)).to be_nil
+ end
+
+ def expect_check_constraint(table_name, name, definition, schema: nil)
+ expect(check_constraint_definition(table_name, name, schema: schema)).to eq("CHECK ((#{definition}))")
+ end
+
+ def expect_primary_keys_after_tables(tables, schema: nil)
+ tables.each do |table|
+ primary_key = primary_key_constraint_name(table, schema: schema)
+
+ expect(primary_key).to eq("#{table}_pkey")
+ end
+ end
+
+ def table_oid(name)
+ connection.select_value(<<~SQL)
+ SELECT oid
+ FROM pg_catalog.pg_class
+ WHERE relname = '#{name}'
+ SQL
+ end
+
+ def table_type(name)
+ connection.select_value(<<~SQL)
+ SELECT
+ CASE class.relkind
+ WHEN 'r' THEN 'normal'
+ WHEN 'p' THEN 'partitioned'
+ ELSE 'other'
+ END as table_type
+ FROM pg_catalog.pg_class class
+ WHERE class.relname = '#{name}'
+ SQL
+ end
+
+ def sequence_owned_by(table_name, column_name)
+ connection.select_value(<<~SQL)
+ SELECT
+ sequence.relname as name
+ FROM pg_catalog.pg_class as sequence
+ INNER JOIN pg_catalog.pg_depend depend
+ ON depend.objid = sequence.oid
+ INNER JOIN pg_catalog.pg_class class
+ ON class.oid = depend.refobjid
+ INNER JOIN pg_catalog.pg_attribute attribute
+ ON attribute.attnum = depend.refobjsubid
+ AND attribute.attrelid = depend.refobjid
+ WHERE class.relname = '#{table_name}'
+ AND attribute.attname = '#{column_name}'
+ SQL
+ end
+
+ def default_expression_for(table_name, column_name)
+ connection.select_value(<<~SQL)
+ SELECT
+ pg_get_expr(attrdef.adbin, attrdef.adrelid) AS default_value
+ FROM pg_catalog.pg_attribute attribute
+ INNER JOIN pg_catalog.pg_attrdef attrdef
+ ON attribute.attrelid = attrdef.adrelid
+ AND attribute.attnum = attrdef.adnum
+ WHERE attribute.attrelid = '#{table_name}'::regclass
+ AND attribute.attname = '#{column_name}'
+ SQL
+ end
+
+ def primary_key_constraint_name(table_name, schema: nil)
+ table_name = schema ? "#{schema}.#{table_name}" : table_name
+
+ connection.select_value(<<~SQL)
+ SELECT
+ conname AS constraint_name
+ FROM pg_catalog.pg_constraint
+ WHERE pg_constraint.conrelid = '#{table_name}'::regclass
+ AND pg_constraint.contype = 'p'
+ SQL
+ end
+
+ def index_exists_by_name(index, schema: nil)
+ schema = schema ? "'#{schema}'" : 'current_schema'
+
+ connection.select_value(<<~SQL)
+ SELECT true
+ FROM pg_catalog.pg_index i
+ INNER JOIN pg_catalog.pg_class c
+ ON c.oid = i.indexrelid
+ INNER JOIN pg_catalog.pg_namespace n
+ ON c.relnamespace = n.oid
+ WHERE c.relname = '#{index}'
+ AND n.nspname = #{schema}
+ SQL
+ end
+
+ def check_constraint_definition(table_name, constraint_name, schema: nil)
+ table_name = schema ? "#{schema}.#{table_name}" : table_name
+
+ connection.select_value(<<~SQL)
+ SELECT
+ pg_get_constraintdef(oid) AS constraint_definition
+ FROM pg_catalog.pg_constraint
+ WHERE pg_constraint.conrelid = '#{table_name}'::regclass
+ AND pg_constraint.contype = 'c'
+ AND pg_constraint.conname = '#{constraint_name}'
+ SQL
+ end
+ end
+end
diff --git a/spec/support/helpers/database/trigger_helpers.rb b/spec/support/helpers/database/trigger_helpers.rb
new file mode 100644
index 00000000000..9ec03e68413
--- /dev/null
+++ b/spec/support/helpers/database/trigger_helpers.rb
@@ -0,0 +1,68 @@
+# frozen_string_literal: true
+
+module Database
+ module TriggerHelpers
+ def expect_function_to_exist(name)
+ expect(find_function_def(name)).not_to be_nil
+ end
+
+ def expect_function_not_to_exist(name)
+ expect(find_function_def(name)).to be_nil
+ end
+
+ def expect_function_to_contain(name, *statements)
+ return_stmt, *body_stmts = parsed_function_statements(name).reverse
+
+ expect(return_stmt).to eq('return old')
+ expect(body_stmts).to contain_exactly(*statements)
+ end
+
+ def expect_trigger_not_to_exist(table_name, name)
+ expect(find_trigger_def(table_name, name)).to be_nil
+ end
+
+ def expect_valid_function_trigger(table_name, name, fn_name, fires_on)
+ events, timing, definition = cleaned_trigger_def(table_name, name)
+
+ events = events&.split(',')
+ expected_timing, expected_events = fires_on.first
+ expect(timing).to eq(expected_timing.to_s)
+ expect(events).to match_array(Array.wrap(expected_events))
+
+ expect(definition).to match(%r{execute (?:procedure|function) #{fn_name}()})
+ end
+
+ private
+
+ def parsed_function_statements(name)
+ cleaned_definition = find_function_def(name)['body'].downcase.gsub(/\s+/, ' ')
+ statements = cleaned_definition.sub(/\A\s*begin\s*(.*)\s*end\s*\Z/, "\\1")
+ statements.split(';').map! { |stmt| stmt.strip.presence }.compact!
+ end
+
+ def find_function_def(name)
+ connection.select_one(<<~SQL)
+ SELECT prosrc AS body
+ FROM pg_proc
+ WHERE proname = '#{name}'
+ SQL
+ end
+
+ def cleaned_trigger_def(table_name, name)
+ find_trigger_def(table_name, name).values_at('event', 'action_timing', 'action_statement').map!(&:downcase)
+ end
+
+ def find_trigger_def(table_name, name)
+ connection.select_one(<<~SQL)
+ SELECT
+ string_agg(event_manipulation, ',') AS event,
+ action_timing,
+ action_statement
+ FROM information_schema.triggers
+ WHERE event_object_table = '#{table_name}'
+ AND trigger_name = '#{name}'
+ GROUP BY 2, 3
+ SQL
+ end
+ end
+end
diff --git a/spec/support/helpers/database_helpers.rb b/spec/support/helpers/database_helpers.rb
deleted file mode 100644
index e9f0a74a8d1..00000000000
--- a/spec/support/helpers/database_helpers.rb
+++ /dev/null
@@ -1,13 +0,0 @@
-# frozen_string_literal: true
-
-module DatabaseHelpers
- # In order to directly work with views using factories,
- # we can swapout the view for a table of identical structure.
- def swapout_view_for_table(view)
- ActiveRecord::Base.connection.execute(<<~SQL)
- CREATE TABLE #{view}_copy (LIKE #{view});
- DROP VIEW #{view};
- ALTER TABLE #{view}_copy RENAME TO #{view};
- SQL
- end
-end
diff --git a/spec/support/helpers/graphql_helpers.rb b/spec/support/helpers/graphql_helpers.rb
index b20801bd3c4..35c298a4d48 100644
--- a/spec/support/helpers/graphql_helpers.rb
+++ b/spec/support/helpers/graphql_helpers.rb
@@ -67,14 +67,16 @@ module GraphqlHelpers
end
end
+ def with_clean_batchloader_executor(&block)
+ BatchLoader::Executor.ensure_current
+ yield
+ ensure
+ BatchLoader::Executor.clear_current
+ end
+
# Runs a block inside a BatchLoader::Executor wrapper
def batch(max_queries: nil, &blk)
- wrapper = proc do
- BatchLoader::Executor.ensure_current
- yield
- ensure
- BatchLoader::Executor.clear_current
- end
+ wrapper = -> { with_clean_batchloader_executor(&blk) }
if max_queries
result = nil
@@ -85,6 +87,32 @@ module GraphqlHelpers
end
end
+ # Use this when writing N+1 tests.
+ #
+ # It does not use the controller, so it avoids confounding factors due to
+ # authentication (token set-up, license checks)
+ # It clears the request store, rails cache, and BatchLoader Executor between runs.
+ def run_with_clean_state(query, **args)
+ ::Gitlab::WithRequestStore.with_request_store do
+ with_clean_rails_cache do
+ with_clean_batchloader_executor do
+ ::GitlabSchema.execute(query, **args)
+ end
+ end
+ end
+ end
+
+ # Basically a combination of use_sql_query_cache and use_clean_rails_memory_store_caching,
+ # but more fine-grained, suitable for comparing two runs in the same example.
+ def with_clean_rails_cache(&blk)
+ caching_store = Rails.cache
+ Rails.cache = ActiveSupport::Cache::MemoryStore.new
+
+ ActiveRecord::Base.cache(&blk)
+ ensure
+ Rails.cache = caching_store
+ end
+
# BatchLoader::GraphQL returns a wrapper, so we need to :sync in order
# to get the actual values
def batch_sync(max_queries: nil, &blk)
@@ -245,7 +273,7 @@ module GraphqlHelpers
return if max_depth <= 0
allow_unlimited_graphql_complexity
- allow_unlimited_graphql_depth
+ allow_unlimited_graphql_depth if max_depth > 1
allow_high_graphql_recursion
allow_high_graphql_transaction_threshold
diff --git a/spec/support/helpers/multipart_helpers.rb b/spec/support/helpers/multipart_helpers.rb
index bcb184f84c5..8438a83aa8a 100644
--- a/spec/support/helpers/multipart_helpers.rb
+++ b/spec/support/helpers/multipart_helpers.rb
@@ -13,29 +13,23 @@ module MultipartHelpers
)
end
- # This function assumes a `mode` variable to be set
- def upload_parameters_for(filepath: nil, key: nil, filename: 'filename', remote_id: 'remote_id')
+ def upload_parameters_for(filepath: nil, key: nil, mode: nil, filename: 'filename', remote_id: 'remote_id')
result = {
- "#{key}.name" => filename,
- "#{key}.type" => "application/octet-stream",
- "#{key}.sha256" => "1234567890"
+ "name" => filename,
+ "type" => "application/octet-stream",
+ "sha256" => "1234567890"
}
case mode
when :local
- result["#{key}.path"] = filepath
+ result["path"] = filepath
when :remote
- result["#{key}.remote_id"] = remote_id
- result["#{key}.size"] = 3.megabytes
+ result["remote_id"] = remote_id
+ result["size"] = 3.megabytes
else
raise ArgumentError, "can't handle #{mode} mode"
end
- return result if ::Feature.disabled?(:upload_middleware_jwt_params_handler, default_enabled: true)
-
- # the HandlerForJWTParams expects a jwt token with the upload parameters
- # *without* the "#{key}." prefix
- result.deep_transform_keys! { |k| k.remove("#{key}.") }
{
"#{key}.gitlab-workhorse-upload" => jwt_token(data: { 'upload' => result })
}
diff --git a/spec/support/helpers/partitioning_helpers.rb b/spec/support/helpers/partitioning_helpers.rb
deleted file mode 100644
index 8981fea04d5..00000000000
--- a/spec/support/helpers/partitioning_helpers.rb
+++ /dev/null
@@ -1,94 +0,0 @@
-# frozen_string_literal: true
-
-module PartitioningHelpers
- def expect_table_partitioned_by(table, columns, part_type: :range)
- columns_with_part_type = columns.map { |c| [part_type.to_s, c] }
- actual_columns = find_partitioned_columns(table)
-
- expect(columns_with_part_type).to match_array(actual_columns)
- end
-
- def expect_range_partition_of(partition_name, table_name, min_value, max_value)
- definition = find_partition_definition(partition_name, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
-
- expect(definition).not_to be_nil
- expect(definition['base_table']).to eq(table_name.to_s)
- expect(definition['condition']).to eq("FOR VALUES FROM (#{min_value}) TO (#{max_value})")
- end
-
- def expect_total_partitions(table_name, count, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
- partitions = find_partitions(table_name, schema: schema)
-
- expect(partitions.size).to eq(count)
- end
-
- def expect_range_partitions_for(table_name, partitions)
- partitions.each do |suffix, (min_value, max_value)|
- partition_name = "#{table_name}_#{suffix}"
- expect_range_partition_of(partition_name, table_name, min_value, max_value)
- end
-
- expect_total_partitions(table_name, partitions.size, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
- end
-
- def expect_hash_partition_of(partition_name, table_name, modulus, remainder)
- definition = find_partition_definition(partition_name, schema: Gitlab::Database::STATIC_PARTITIONS_SCHEMA)
-
- expect(definition).not_to be_nil
- expect(definition['base_table']).to eq(table_name.to_s)
- expect(definition['condition']).to eq("FOR VALUES WITH (modulus #{modulus}, remainder #{remainder})")
- end
-
- private
-
- def find_partitioned_columns(table)
- connection.select_rows(<<~SQL)
- select
- case partstrat
- when 'l' then 'list'
- when 'r' then 'range'
- when 'h' then 'hash'
- end as partstrat,
- cols.column_name
- from (
- select partrelid, partstrat, unnest(partattrs) as col_pos
- from pg_partitioned_table
- ) pg_part
- inner join pg_class
- on pg_part.partrelid = pg_class.oid
- inner join information_schema.columns cols
- on cols.table_name = pg_class.relname
- and cols.ordinal_position = pg_part.col_pos
- where pg_class.relname = '#{table}';
- SQL
- end
-
- def find_partition_definition(partition, schema: )
- connection.select_one(<<~SQL)
- select
- parent_class.relname as base_table,
- pg_get_expr(pg_class.relpartbound, inhrelid) as condition
- from pg_class
- inner join pg_inherits i on pg_class.oid = inhrelid
- inner join pg_class parent_class on parent_class.oid = inhparent
- inner join pg_namespace ON pg_namespace.oid = pg_class.relnamespace
- where pg_namespace.nspname = '#{schema}'
- and pg_class.relname = '#{partition}'
- and pg_class.relispartition
- SQL
- end
-
- def find_partitions(partition, schema: Gitlab::Database::DYNAMIC_PARTITIONS_SCHEMA)
- connection.select_rows(<<~SQL)
- select
- pg_class.relname
- from pg_class
- inner join pg_inherits i on pg_class.oid = inhrelid
- inner join pg_class parent_class on parent_class.oid = inhparent
- inner join pg_namespace ON pg_namespace.oid = pg_class.relnamespace
- where pg_namespace.nspname = '#{schema}'
- and parent_class.relname = '#{partition}'
- and pg_class.relispartition
- SQL
- end
-end
diff --git a/spec/support/helpers/rack_attack_spec_helpers.rb b/spec/support/helpers/rack_attack_spec_helpers.rb
index a8ae69885d8..d50a6382a40 100644
--- a/spec/support/helpers/rack_attack_spec_helpers.rb
+++ b/spec/support/helpers/rack_attack_spec_helpers.rb
@@ -21,10 +21,31 @@ module RackAttackSpecHelpers
{ 'AUTHORIZATION' => "Bearer #{oauth_access_token.token}" }
end
+ def basic_auth_headers(user, personal_access_token)
+ encoded_login = ["#{user.username}:#{personal_access_token.token}"].pack('m0')
+ { 'AUTHORIZATION' => "Basic #{encoded_login}" }
+ end
+
def expect_rejection(&block)
yield
expect(response).to have_gitlab_http_status(:too_many_requests)
+
+ expect(response.headers.to_h).to include(
+ 'RateLimit-Limit' => a_string_matching(/^\d+$/),
+ 'RateLimit-Name' => a_string_matching(/^throttle_.*$/),
+ 'RateLimit-Observed' => a_string_matching(/^\d+$/),
+ 'RateLimit-Remaining' => a_string_matching(/^\d+$/),
+ 'Retry-After' => a_string_matching(/^\d+$/)
+ )
+ expect(response).to have_header('RateLimit-Reset')
+ expect do
+ DateTime.strptime(response.headers['RateLimit-Reset'], '%s')
+ end.not_to raise_error
+ expect(response).to have_header('RateLimit-ResetTime')
+ expect do
+ Time.httpdate(response.headers['RateLimit-ResetTime'])
+ end.not_to raise_error
end
def expect_ok(&block)
diff --git a/spec/support/helpers/stub_experiments.rb b/spec/support/helpers/stub_experiments.rb
index 247692d83ee..408d16a7c08 100644
--- a/spec/support/helpers/stub_experiments.rb
+++ b/spec/support/helpers/stub_experiments.rb
@@ -11,6 +11,7 @@ module StubExperiments
allow(Gitlab::Experimentation).to receive(:active?).and_call_original
experiments.each do |experiment_key, enabled|
+ Feature.persist_used!("#{experiment_key}#{feature_flag_suffix}")
allow(Gitlab::Experimentation).to receive(:active?).with(experiment_key) { enabled }
end
end
@@ -25,7 +26,14 @@ module StubExperiments
allow(Gitlab::Experimentation).to receive(:in_experiment_group?).and_call_original
experiments.each do |experiment_key, enabled|
+ Feature.persist_used!("#{experiment_key}#{feature_flag_suffix}")
allow(Gitlab::Experimentation).to receive(:in_experiment_group?).with(experiment_key, anything) { enabled }
end
end
+
+ private
+
+ def feature_flag_suffix
+ Gitlab::Experimentation::Experiment::FEATURE_FLAG_SUFFIX
+ end
end
diff --git a/spec/support/helpers/stub_feature_flags.rb b/spec/support/helpers/stub_feature_flags.rb
index 7f30a2a70cd..77f31169ecb 100644
--- a/spec/support/helpers/stub_feature_flags.rb
+++ b/spec/support/helpers/stub_feature_flags.rb
@@ -66,4 +66,8 @@ module StubFeatureFlags
def skip_feature_flags_yaml_validation
allow(Feature::Definition).to receive(:valid_usage!)
end
+
+ def skip_default_enabled_yaml_check
+ allow(Feature::Definition).to receive(:default_enabled?).and_return(false)
+ end
end
diff --git a/spec/support/helpers/table_schema_helpers.rb b/spec/support/helpers/table_schema_helpers.rb
deleted file mode 100644
index 28794211190..00000000000
--- a/spec/support/helpers/table_schema_helpers.rb
+++ /dev/null
@@ -1,112 +0,0 @@
-# frozen_string_literal: true
-
-module TableSchemaHelpers
- def connection
- ActiveRecord::Base.connection
- end
-
- def expect_table_to_be_replaced(original_table:, replacement_table:, archived_table:)
- original_oid = table_oid(original_table)
- replacement_oid = table_oid(replacement_table)
-
- yield
-
- expect(table_oid(original_table)).to eq(replacement_oid)
- expect(table_oid(archived_table)).to eq(original_oid)
- expect(table_oid(replacement_table)).to be_nil
- end
-
- def expect_index_to_exist(name, schema: nil)
- expect(index_exists_by_name(name, schema: schema)).to eq(true)
- end
-
- def expect_index_not_to_exist(name, schema: nil)
- expect(index_exists_by_name(name, schema: schema)).to be_nil
- end
-
- def expect_primary_keys_after_tables(tables, schema: nil)
- tables.each do |table|
- primary_key = primary_key_constraint_name(table, schema: schema)
-
- expect(primary_key).to eq("#{table}_pkey")
- end
- end
-
- def table_oid(name)
- connection.select_value(<<~SQL)
- SELECT oid
- FROM pg_catalog.pg_class
- WHERE relname = '#{name}'
- SQL
- end
-
- def table_type(name)
- connection.select_value(<<~SQL)
- SELECT
- CASE class.relkind
- WHEN 'r' THEN 'normal'
- WHEN 'p' THEN 'partitioned'
- ELSE 'other'
- END as table_type
- FROM pg_catalog.pg_class class
- WHERE class.relname = '#{name}'
- SQL
- end
-
- def sequence_owned_by(table_name, column_name)
- connection.select_value(<<~SQL)
- SELECT
- sequence.relname as name
- FROM pg_catalog.pg_class as sequence
- INNER JOIN pg_catalog.pg_depend depend
- ON depend.objid = sequence.oid
- INNER JOIN pg_catalog.pg_class class
- ON class.oid = depend.refobjid
- INNER JOIN pg_catalog.pg_attribute attribute
- ON attribute.attnum = depend.refobjsubid
- AND attribute.attrelid = depend.refobjid
- WHERE class.relname = '#{table_name}'
- AND attribute.attname = '#{column_name}'
- SQL
- end
-
- def default_expression_for(table_name, column_name)
- connection.select_value(<<~SQL)
- SELECT
- pg_get_expr(attrdef.adbin, attrdef.adrelid) AS default_value
- FROM pg_catalog.pg_attribute attribute
- INNER JOIN pg_catalog.pg_attrdef attrdef
- ON attribute.attrelid = attrdef.adrelid
- AND attribute.attnum = attrdef.adnum
- WHERE attribute.attrelid = '#{table_name}'::regclass
- AND attribute.attname = '#{column_name}'
- SQL
- end
-
- def primary_key_constraint_name(table_name, schema: nil)
- table_name = schema ? "#{schema}.#{table_name}" : table_name
-
- connection.select_value(<<~SQL)
- SELECT
- conname AS constraint_name
- FROM pg_catalog.pg_constraint
- WHERE pg_constraint.conrelid = '#{table_name}'::regclass
- AND pg_constraint.contype = 'p'
- SQL
- end
-
- def index_exists_by_name(index, schema: nil)
- schema = schema ? "'#{schema}'" : 'current_schema'
-
- connection.select_value(<<~SQL)
- SELECT true
- FROM pg_catalog.pg_index i
- INNER JOIN pg_catalog.pg_class c
- ON c.oid = i.indexrelid
- INNER JOIN pg_catalog.pg_namespace n
- ON c.relnamespace = n.oid
- WHERE c.relname = '#{index}'
- AND n.nspname = #{schema}
- SQL
- end
-end
diff --git a/spec/support/helpers/test_env.rb b/spec/support/helpers/test_env.rb
index 01571277a1d..cb25f5f9429 100644
--- a/spec/support/helpers/test_env.rb
+++ b/spec/support/helpers/test_env.rb
@@ -203,10 +203,13 @@ module TestEnv
end
gitaly_pid = Integer(File.read(TMP_TEST_PATH.join('gitaly.pid')))
+ gitaly2_pid = Integer(File.read(TMP_TEST_PATH.join('gitaly2.pid')))
praefect_pid = Integer(File.read(TMP_TEST_PATH.join('praefect.pid')))
- Kernel.at_exit { stop(gitaly_pid) }
- Kernel.at_exit { stop(praefect_pid) }
+ Kernel.at_exit do
+ pids = [gitaly_pid, gitaly2_pid, praefect_pid]
+ pids.each { |pid| stop(pid) }
+ end
wait('gitaly')
wait('praefect')
@@ -284,7 +287,7 @@ module TestEnv
@workhorse_path ||= File.join('tmp', 'tests', 'gitlab-workhorse')
end
- def with_workhorse(workhorse_dir, host, port, upstream, &blk)
+ def with_workhorse(host, port, upstream, &blk)
host = "[#{host}]" if host.include?(':')
listen_addr = [host, port].join(':')
diff --git a/spec/support/helpers/trigger_helpers.rb b/spec/support/helpers/trigger_helpers.rb
deleted file mode 100644
index dd6d8ff5bb5..00000000000
--- a/spec/support/helpers/trigger_helpers.rb
+++ /dev/null
@@ -1,66 +0,0 @@
-# frozen_string_literal: true
-
-module TriggerHelpers
- def expect_function_to_exist(name)
- expect(find_function_def(name)).not_to be_nil
- end
-
- def expect_function_not_to_exist(name)
- expect(find_function_def(name)).to be_nil
- end
-
- def expect_function_to_contain(name, *statements)
- return_stmt, *body_stmts = parsed_function_statements(name).reverse
-
- expect(return_stmt).to eq('return old')
- expect(body_stmts).to contain_exactly(*statements)
- end
-
- def expect_trigger_not_to_exist(table_name, name)
- expect(find_trigger_def(table_name, name)).to be_nil
- end
-
- def expect_valid_function_trigger(table_name, name, fn_name, fires_on)
- events, timing, definition = cleaned_trigger_def(table_name, name)
-
- events = events&.split(',')
- expected_timing, expected_events = fires_on.first
- expect(timing).to eq(expected_timing.to_s)
- expect(events).to match_array(Array.wrap(expected_events))
-
- expect(definition).to match(%r{execute (?:procedure|function) #{fn_name}()})
- end
-
- private
-
- def parsed_function_statements(name)
- cleaned_definition = find_function_def(name)['body'].downcase.gsub(/\s+/, ' ')
- statements = cleaned_definition.sub(/\A\s*begin\s*(.*)\s*end\s*\Z/, "\\1")
- statements.split(';').map! { |stmt| stmt.strip.presence }.compact!
- end
-
- def find_function_def(name)
- connection.select_one(<<~SQL)
- SELECT prosrc AS body
- FROM pg_proc
- WHERE proname = '#{name}'
- SQL
- end
-
- def cleaned_trigger_def(table_name, name)
- find_trigger_def(table_name, name).values_at('event', 'action_timing', 'action_statement').map!(&:downcase)
- end
-
- def find_trigger_def(table_name, name)
- connection.select_one(<<~SQL)
- SELECT
- string_agg(event_manipulation, ',') AS event,
- action_timing,
- action_statement
- FROM information_schema.triggers
- WHERE event_object_table = '#{table_name}'
- AND trigger_name = '#{name}'
- GROUP BY 2, 3
- SQL
- end
-end