diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2022-05-12 15:17:30 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2022-05-12 15:17:30 -0400 |
commit | 23e7b38bfe396f919fdb66057174d29e17086418 (patch) | |
tree | 335c3962ef8afe0f6193d0413dbc51642276b147 /src/backend/optimizer/path | |
parent | 93909599cdba64c8759d646983c0a4ef93de1e50 (diff) | |
download | postgresql-23e7b38bfe396f919fdb66057174d29e17086418.tar.gz |
Pre-beta mechanical code beautification.
Run pgindent, pgperltidy, and reformat-dat-files.
I manually fixed a couple of comments that pgindent uglified.
Diffstat (limited to 'src/backend/optimizer/path')
-rw-r--r-- | src/backend/optimizer/path/allpaths.c | 15 | ||||
-rw-r--r-- | src/backend/optimizer/path/costsize.c | 111 | ||||
-rw-r--r-- | src/backend/optimizer/path/equivclass.c | 6 | ||||
-rw-r--r-- | src/backend/optimizer/path/joinpath.c | 2 | ||||
-rw-r--r-- | src/backend/optimizer/path/pathkeys.c | 143 |
5 files changed, 145 insertions, 132 deletions
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index d84f66a81b..7ac116a791 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -1777,17 +1777,18 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, } /* - * When building a fractional path, determine a cheapest fractional - * path for each child relation too. Looking at startup and total - * costs is not enough, because the cheapest fractional path may be - * dominated by two separate paths (one for startup, one for total). + * When building a fractional path, determine a cheapest + * fractional path for each child relation too. Looking at startup + * and total costs is not enough, because the cheapest fractional + * path may be dominated by two separate paths (one for startup, + * one for total). * * When needed (building fractional path), determine the cheapest * fractional path too. */ if (root->tuple_fraction > 0) { - double path_fraction = (1.0 / root->tuple_fraction); + double path_fraction = (1.0 / root->tuple_fraction); cheapest_fractional = get_cheapest_fractional_path_for_pathkeys(childrel->pathlist, @@ -1796,8 +1797,8 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel, path_fraction); /* - * If we found no path with matching pathkeys, use the cheapest - * total path instead. + * If we found no path with matching pathkeys, use the + * cheapest total path instead. * * XXX We might consider partially sorted paths too (with an * incremental sort on top). But we'd have to build all the diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 6673d271c2..ed98ba7dbd 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -1794,7 +1794,7 @@ is_fake_var(Expr *expr) static double get_width_cost_multiplier(PlannerInfo *root, Expr *expr) { - double width = -1.0; /* fake value */ + double width = -1.0; /* fake value */ if (IsA(expr, RelabelType)) expr = (Expr *) ((RelabelType *) expr)->arg; @@ -1802,17 +1802,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) /* Try to find actual stat in corresponding relation */ if (IsA(expr, Var)) { - Var *var = (Var *) expr; + Var *var = (Var *) expr; if (var->varno > 0 && var->varno < root->simple_rel_array_size) { - RelOptInfo *rel = root->simple_rel_array[var->varno]; + RelOptInfo *rel = root->simple_rel_array[var->varno]; if (rel != NULL && var->varattno >= rel->min_attr && var->varattno <= rel->max_attr) { - int ndx = var->varattno - rel->min_attr; + int ndx = var->varattno - rel->min_attr; if (rel->attr_widths[ndx] > 0) width = rel->attr_widths[ndx]; @@ -1823,7 +1823,7 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) /* Didn't find any actual stats, try using type width instead. */ if (width < 0.0) { - Node *node = (Node*) expr; + Node *node = (Node *) expr; width = get_typavgwidth(exprType(node), exprTypmod(node)); } @@ -1832,17 +1832,17 @@ get_width_cost_multiplier(PlannerInfo *root, Expr *expr) * Values are passed as Datum type, so comparisons can't be cheaper than * comparing a Datum value. * - * FIXME I find this reasoning questionable. We may pass int2, and comparing - * it is probably a bit cheaper than comparing a bigint. + * FIXME I find this reasoning questionable. We may pass int2, and + * comparing it is probably a bit cheaper than comparing a bigint. */ if (width <= sizeof(Datum)) return 1.0; /* * We consider the cost of a comparison not to be directly proportional to - * width of the argument, because widths of the arguments could be slightly - * different (we only know the average width for the whole column). So we - * use log16(width) as an estimate. + * width of the argument, because widths of the arguments could be + * slightly different (we only know the average width for the whole + * column). So we use log16(width) as an estimate. */ return 1.0 + 0.125 * LOG2(width / sizeof(Datum)); } @@ -1902,23 +1902,23 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, bool heapSort) { Cost per_tuple_cost = 0.0; - ListCell *lc; - List *pathkeyExprs = NIL; + ListCell *lc; + List *pathkeyExprs = NIL; double tuplesPerPrevGroup = tuples; double totalFuncCost = 1.0; bool has_fake_var = false; int i = 0; Oid prev_datatype = InvalidOid; - List *cache_varinfos = NIL; + List *cache_varinfos = NIL; /* fallback if pathkeys is unknown */ if (list_length(pathkeys) == 0) { /* - * If we'll use a bounded heap-sort keeping just K tuples in memory, for - * a total number of tuple comparisons of N log2 K; but the constant - * factor is a bit higher than for quicksort. Tweak it so that the cost - * curve is continuous at the crossover point. + * If we'll use a bounded heap-sort keeping just K tuples in memory, + * for a total number of tuple comparisons of N log2 K; but the + * constant factor is a bit higher than for quicksort. Tweak it so + * that the cost curve is continuous at the crossover point. */ output_tuples = (heapSort) ? 2.0 * output_tuples : tuples; per_tuple_cost += 2.0 * cpu_operator_cost * LOG2(output_tuples); @@ -1930,17 +1930,17 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, } /* - * Computing total cost of sorting takes into account: - * - per column comparison function cost - * - we try to compute needed number of comparison per column + * Computing total cost of sorting takes into account the per-column + * comparison function cost. We try to compute the needed number of + * comparisons per column. */ foreach(lc, pathkeys) { - PathKey *pathkey = (PathKey*) lfirst(lc); - EquivalenceMember *em; - double nGroups, - correctedNGroups; - Cost funcCost = 1.0; + PathKey *pathkey = (PathKey *) lfirst(lc); + EquivalenceMember *em; + double nGroups, + correctedNGroups; + Cost funcCost = 1.0; /* * We believe that equivalence members aren't very different, so, to @@ -1985,10 +1985,10 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, pathkeyExprs = lappend(pathkeyExprs, em->em_expr); /* - * We need to calculate the number of comparisons for this column, which - * requires knowing the group size. So we estimate the number of groups - * by calling estimate_num_groups_incremental(), which estimates the - * group size for "new" pathkeys. + * We need to calculate the number of comparisons for this column, + * which requires knowing the group size. So we estimate the number of + * groups by calling estimate_num_groups_incremental(), which + * estimates the group size for "new" pathkeys. * * Note: estimate_num_groups_incremental does not handle fake Vars, so * use a default estimate otherwise. @@ -1999,26 +1999,30 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, &cache_varinfos, list_length(pathkeyExprs) - 1); else if (tuples > 4.0) + /* * Use geometric mean as estimation if there are no stats. * - * We don't use DEFAULT_NUM_DISTINCT here, because that’s used for - * a single column, but here we’re dealing with multiple columns. + * We don't use DEFAULT_NUM_DISTINCT here, because that's used for + * a single column, but here we're dealing with multiple columns. */ nGroups = ceil(2.0 + sqrt(tuples) * (i + 1) / list_length(pathkeys)); else nGroups = tuples; /* - * Presorted keys are not considered in the cost above, but we still do - * have to compare them in the qsort comparator. So make sure to factor - * in the cost in that case. + * Presorted keys are not considered in the cost above, but we still + * do have to compare them in the qsort comparator. So make sure to + * factor in the cost in that case. */ if (i >= nPresortedKeys) { if (heapSort) { - /* have to keep at least one group, and a multiple of group size */ + /* + * have to keep at least one group, and a multiple of group + * size + */ correctedNGroups = ceil(output_tuples / tuplesPerPrevGroup); } else @@ -2033,19 +2037,20 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, i++; /* - * Uniform distributions with all groups being of the same size are the - * best case, with nice smooth behavior. Real-world distributions tend - * not to be uniform, though, and we don’t have any reliable easy-to-use - * information. As a basic defense against skewed distributions, we use - * a 1.5 factor to make the expected group a bit larger, but we need to - * be careful not to make the group larger than in the preceding step. + * Uniform distributions with all groups being of the same size are + * the best case, with nice smooth behavior. Real-world distributions + * tend not to be uniform, though, and we don't have any reliable + * easy-to-use information. As a basic defense against skewed + * distributions, we use a 1.5 factor to make the expected group a bit + * larger, but we need to be careful not to make the group larger than + * in the preceding step. */ tuplesPerPrevGroup = Min(tuplesPerPrevGroup, ceil(1.5 * tuplesPerPrevGroup / nGroups)); /* - * Once we get single-row group, it means tuples in the group are unique - * and we can skip all remaining columns. + * Once we get single-row group, it means tuples in the group are + * unique and we can skip all remaining columns. */ if (tuplesPerPrevGroup <= 1.0) break; @@ -2057,15 +2062,15 @@ compute_cpu_sort_cost(PlannerInfo *root, List *pathkeys, int nPresortedKeys, per_tuple_cost *= cpu_operator_cost; /* - * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles E. - * Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort estimation - * formula has additional term proportional to number of tuples (See Chapter - * 8.2 and Theorem 4.1). That affects cases with a low number of tuples, - * approximately less than 1e4. We could implement it as an additional - * multiplier under the logarithm, but we use a bit more complex formula - * which takes into account the number of unique tuples and it’s not clear - * how to combine the multiplier with the number of groups. Estimate it as - * 10 in cpu_operator_cost unit. + * Accordingly to "Introduction to algorithms", Thomas H. Cormen, Charles + * E. Leiserson, Ronald L. Rivest, ISBN 0-07-013143-0, quicksort + * estimation formula has additional term proportional to number of tuples + * (see Chapter 8.2 and Theorem 4.1). That affects cases with a low number + * of tuples, approximately less than 1e4. We could implement it as an + * additional multiplier under the logarithm, but we use a bit more + * complex formula which takes into account the number of unique tuples + * and it's not clear how to combine the multiplier with the number of + * groups. Estimate it as 10 cpu_operator_cost units. */ per_tuple_cost += 10 * cpu_operator_cost; @@ -2082,7 +2087,7 @@ cost_sort_estimate(PlannerInfo *root, List *pathkeys, int nPresortedKeys, double tuples) { return compute_cpu_sort_cost(root, pathkeys, nPresortedKeys, - 0, tuples, tuples, false); + 0, tuples, tuples, false); } /* diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 34c5ab1cb6..60c0e3f108 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -685,9 +685,9 @@ get_eclass_for_sort_expr(PlannerInfo *root, /* * Match! * - * Copy the sortref if it wasn't set yet. That may happen if the - * ec was constructed from WHERE clause, i.e. it doesn't have a - * target reference at all. + * Copy the sortref if it wasn't set yet. That may happen if + * the ec was constructed from WHERE clause, i.e. it doesn't + * have a target reference at all. */ if (cur_ec->ec_sortref == 0 && sortref > 0) cur_ec->ec_sortref = sortref; diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 9a8c5165b0..55206ec54d 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -1258,7 +1258,7 @@ sort_inner_and_outer(PlannerInfo *root, foreach(l, all_pathkeys) { - PathKey *front_pathkey = (PathKey *) lfirst(l); + PathKey *front_pathkey = (PathKey *) lfirst(l); List *cur_mergeclauses; List *outerkeys; List *innerkeys; diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c index 91556910ae..9775c4a722 100644 --- a/src/backend/optimizer/path/pathkeys.c +++ b/src/backend/optimizer/path/pathkeys.c @@ -32,7 +32,7 @@ #include "utils/selfuncs.h" /* Consider reordering of GROUP BY keys? */ -bool enable_group_by_reordering = true; +bool enable_group_by_reordering = true; static bool pathkey_is_redundant(PathKey *new_pathkey, List *pathkeys); static bool matches_boolean_partition_clause(RestrictInfo *rinfo, @@ -352,7 +352,7 @@ int group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, List **group_clauses) { - List *new_group_pathkeys= NIL, + List *new_group_pathkeys = NIL, *new_group_clauses = NIL; ListCell *lc; int n; @@ -365,16 +365,16 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, * there's a matching GROUP BY key. If we find one, we append it to the * list, and do the same for the clauses. * - * Once we find the first pathkey without a matching GROUP BY key, the rest - * of the pathkeys are useless and can't be used to evaluate the grouping, - * so we abort the loop and ignore the remaining pathkeys. + * Once we find the first pathkey without a matching GROUP BY key, the + * rest of the pathkeys are useless and can't be used to evaluate the + * grouping, so we abort the loop and ignore the remaining pathkeys. * * XXX Pathkeys are built in a way to allow simply comparing pointers. */ foreach(lc, pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(lc); - SortGroupClause *sgc; + PathKey *pathkey = (PathKey *) lfirst(lc); + SortGroupClause *sgc; /* abort on first mismatch */ if (!list_member_ptr(*group_pathkeys, pathkey)) @@ -403,13 +403,14 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys, /* * Used to generate all permutations of a pathkey list. */ -typedef struct PathkeyMutatorState { +typedef struct PathkeyMutatorState +{ List *elemsList; ListCell **elemCells; void **elems; int *positions; - int mutatorNColumns; - int count; + int mutatorNColumns; + int count; } PathkeyMutatorState; @@ -428,9 +429,9 @@ typedef struct PathkeyMutatorState { static void PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) { - int i; + int i; int n = end - start; - ListCell *lc; + ListCell *lc; memset(state, 0, sizeof(*state)); @@ -438,8 +439,8 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) state->elemsList = list_copy(elems); - state->elems = palloc(sizeof(void*) * n); - state->elemCells = palloc(sizeof(ListCell*) * n); + state->elems = palloc(sizeof(void *) * n); + state->elemCells = palloc(sizeof(ListCell *) * n); state->positions = palloc(sizeof(int) * n); i = 0; @@ -459,10 +460,10 @@ PathkeyMutatorInit(PathkeyMutatorState *state, List *elems, int start, int end) static void PathkeyMutatorSwap(int *a, int i, int j) { - int s = a[i]; + int s = a[i]; - a[i] = a[j]; - a[j] = s; + a[i] = a[j]; + a[j] = s; } /* @@ -471,7 +472,10 @@ PathkeyMutatorSwap(int *a, int i, int j) static bool PathkeyMutatorNextSet(int *a, int n) { - int j, k, l, r; + int j, + k, + l, + r; j = n - 2; @@ -507,7 +511,7 @@ PathkeyMutatorNextSet(int *a, int n) static List * PathkeyMutatorNext(PathkeyMutatorState *state) { - int i; + int i; state->count++; @@ -528,9 +532,9 @@ PathkeyMutatorNext(PathkeyMutatorState *state) } /* update the list cells to point to the right elements */ - for(i = 0; i < state->mutatorNColumns; i++) + for (i = 0; i < state->mutatorNColumns; i++) lfirst(state->elemCells[i]) = - (void *) state->elems[ state->positions[i] - 1 ]; + (void *) state->elems[state->positions[i] - 1]; return state->elemsList; } @@ -541,7 +545,7 @@ PathkeyMutatorNext(PathkeyMutatorState *state) typedef struct PathkeySortCost { Cost cost; - PathKey *pathkey; + PathKey *pathkey; } PathkeySortCost; static int @@ -581,41 +585,42 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, List **group_pathkeys, List **group_clauses, int n_preordered) { - List *new_group_pathkeys = NIL, - *new_group_clauses = NIL, - *var_group_pathkeys; + List *new_group_pathkeys = NIL, + *new_group_clauses = NIL, + *var_group_pathkeys; - ListCell *cell; - PathkeyMutatorState mstate; - double cheapest_sort_cost = -1.0; + ListCell *cell; + PathkeyMutatorState mstate; + double cheapest_sort_cost = -1.0; - int nFreeKeys; - int nToPermute; + int nFreeKeys; + int nToPermute; /* If there are less than 2 unsorted pathkeys, we're done. */ if (list_length(*group_pathkeys) - n_preordered < 2) return false; /* - * We could exhaustively cost all possible orderings of the pathkeys, but for - * a large number of pathkeys it might be prohibitively expensive. So we try - * to apply simple cheap heuristics first - we sort the pathkeys by sort cost - * (as if the pathkey was sorted independently) and then check only the four - * cheapest pathkeys. The remaining pathkeys are kept ordered by cost. + * We could exhaustively cost all possible orderings of the pathkeys, but + * for a large number of pathkeys it might be prohibitively expensive. So + * we try to apply simple cheap heuristics first - we sort the pathkeys by + * sort cost (as if the pathkey was sorted independently) and then check + * only the four cheapest pathkeys. The remaining pathkeys are kept + * ordered by cost. * * XXX This is a very simple heuristics, but likely to work fine for most - * cases (because the number of GROUP BY clauses tends to be lower than 4). - * But it ignores how the number of distinct values in each pathkey affects - * the following steps. It might be better to use "more expensive" pathkey - * first if it has many distinct values, because it then limits the number - * of comparisons for the remaining pathkeys. But evaluating that is likely - * quite the expensive. + * cases (because the number of GROUP BY clauses tends to be lower than + * 4). But it ignores how the number of distinct values in each pathkey + * affects the following steps. It might be better to use "more expensive" + * pathkey first if it has many distinct values, because it then limits + * the number of comparisons for the remaining pathkeys. But evaluating + * that is likely quite the expensive. */ nFreeKeys = list_length(*group_pathkeys) - n_preordered; nToPermute = 4; if (nFreeKeys > nToPermute) { - int i; + int i; PathkeySortCost *costs = palloc(sizeof(PathkeySortCost) * nFreeKeys); /* skip the pre-ordered pathkeys */ @@ -624,7 +629,7 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, /* estimate cost for sorting individual pathkeys */ for (i = 0; cell != NULL; i++, (cell = lnext(*group_pathkeys, cell))) { - List *to_cost = list_make1(lfirst(cell)); + List *to_cost = list_make1(lfirst(cell)); Assert(i < nFreeKeys); @@ -658,28 +663,29 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, Assert(list_length(new_group_pathkeys) == list_length(*group_pathkeys)); /* - * Generate pathkey lists with permutations of the first nToPermute pathkeys. + * Generate pathkey lists with permutations of the first nToPermute + * pathkeys. * * XXX We simply calculate sort cost for each individual pathkey list, but - * there's room for two dynamic programming optimizations here. Firstly, we - * may pass the current "best" cost to cost_sort_estimate so that it can - * "abort" if the estimated pathkeys list exceeds it. Secondly, it could pass - * the return information about the position when it exceeded the cost, and - * we could skip all permutations with the same prefix. + * there's room for two dynamic programming optimizations here. Firstly, + * we may pass the current "best" cost to cost_sort_estimate so that it + * can "abort" if the estimated pathkeys list exceeds it. Secondly, it + * could pass the return information about the position when it exceeded + * the cost, and we could skip all permutations with the same prefix. * * Imagine we've already found ordering with cost C1, and we're evaluating * another ordering - cost_sort_estimate() calculates cost by adding the * pathkeys one by one (more or less), and the cost only grows. If at any - * point it exceeds C1, it can't possibly be "better" so we can discard it. - * But we also know that we can discard all ordering with the same prefix, - * because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) then the - * same thing will happen for any ordering with this prefix. + * point it exceeds C1, it can't possibly be "better" so we can discard + * it. But we also know that we can discard all ordering with the same + * prefix, because if we're estimating (a,b,c,d) and we exceed C1 at (a,b) + * then the same thing will happen for any ordering with this prefix. */ PathkeyMutatorInit(&mstate, new_group_pathkeys, n_preordered, n_preordered + nToPermute); - while((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL) + while ((var_group_pathkeys = PathkeyMutatorNext(&mstate)) != NIL) { - Cost cost; + Cost cost; cost = cost_sort_estimate(root, var_group_pathkeys, n_preordered, nrows); @@ -694,11 +700,11 @@ get_cheapest_group_keys_order(PlannerInfo *root, double nrows, /* Reorder the group clauses according to the reordered pathkeys. */ foreach(cell, new_group_pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(cell); + PathKey *pathkey = (PathKey *) lfirst(cell); new_group_clauses = lappend(new_group_clauses, - get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref, - *group_clauses)); + get_sortgroupref_clause(pathkey->pk_eclass->ec_sortref, + *group_clauses)); } /* Just append the rest GROUP BY clauses */ @@ -745,8 +751,8 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, PathKeyInfo *info; int n_preordered = 0; - List *pathkeys = group_pathkeys; - List *clauses = group_clauses; + List *pathkeys = group_pathkeys; + List *clauses = group_clauses; /* always return at least the original pathkeys/clauses */ info = makeNode(PathKeyInfo); @@ -756,9 +762,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, infos = lappend(infos, info); /* - * Should we try generating alternative orderings of the group keys? If not, - * we produce only the order specified in the query, i.e. the optimization - * is effectively disabled. + * Should we try generating alternative orderings of the group keys? If + * not, we produce only the order specified in the query, i.e. the + * optimization is effectively disabled. */ if (!enable_group_by_reordering) return infos; @@ -782,8 +788,9 @@ get_useful_group_keys_orderings(PlannerInfo *root, double nrows, } /* - * If the path is sorted in some way, try reordering the group keys to match - * as much of the ordering as possible - we get this sort for free (mostly). + * If the path is sorted in some way, try reordering the group keys to + * match as much of the ordering as possible - we get this sort for free + * (mostly). * * We must not do this when there are no grouping sets, because those use * more complex logic to decide the ordering. @@ -2400,8 +2407,8 @@ pathkeys_useful_for_ordering(PlannerInfo *root, List *pathkeys) static int pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys) { - ListCell *key; - int n = 0; + ListCell *key; + int n = 0; /* no special ordering requested for grouping */ if (root->group_pathkeys == NIL) @@ -2414,7 +2421,7 @@ pathkeys_useful_for_grouping(PlannerInfo *root, List *pathkeys) /* walk the pathkeys and search for matching group key */ foreach(key, pathkeys) { - PathKey *pathkey = (PathKey *) lfirst(key); + PathKey *pathkey = (PathKey *) lfirst(key); /* no matching group key, we're done */ if (!list_member_ptr(root->group_pathkeys, pathkey)) |