summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/auto_explain/auto_explain.c2
-rw-r--r--contrib/dblink/dblink.c2
-rw-r--r--contrib/file_fdw/file_fdw.c67
-rw-r--r--contrib/pg_archivecleanup/pg_archivecleanup.c16
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c77
-rw-r--r--contrib/pg_test_fsync/pg_test_fsync.c7
-rw-r--r--contrib/pg_test_timing/pg_test_timing.c34
-rw-r--r--contrib/pg_trgm/trgm_gist.c9
-rw-r--r--contrib/pg_upgrade/check.c90
-rw-r--r--contrib/pg_upgrade/controldata.c7
-rw-r--r--contrib/pg_upgrade/exec.c15
-rw-r--r--contrib/pg_upgrade/file.c11
-rw-r--r--contrib/pg_upgrade/function.c102
-rw-r--r--contrib/pg_upgrade/info.c30
-rw-r--r--contrib/pg_upgrade/option.c47
-rw-r--r--contrib/pg_upgrade/pg_upgrade.c29
-rw-r--r--contrib/pg_upgrade/pg_upgrade.h49
-rw-r--r--contrib/pg_upgrade/relfilenode.c26
-rw-r--r--contrib/pg_upgrade/server.c16
-rw-r--r--contrib/pg_upgrade/tablespace.c4
-rw-r--r--contrib/pg_upgrade/version_old_8_3.c36
-rw-r--r--contrib/pgbench/pgbench.c53
-rw-r--r--contrib/pgcrypto/crypt-md5.c4
-rw-r--r--contrib/pgcrypto/px.h5
-rw-r--r--contrib/pgstattuple/pgstatindex.c4
-rw-r--r--contrib/pgstattuple/pgstattuple.c2
-rw-r--r--contrib/sepgsql/database.c33
-rw-r--r--contrib/sepgsql/dml.c2
-rw-r--r--contrib/sepgsql/hooks.c38
-rw-r--r--contrib/sepgsql/label.c60
-rw-r--r--contrib/sepgsql/proc.c38
-rw-r--r--contrib/sepgsql/relation.c38
-rw-r--r--contrib/sepgsql/schema.c26
-rw-r--r--contrib/sepgsql/sepgsql.h21
-rw-r--r--contrib/sepgsql/uavc.c162
-rw-r--r--contrib/spi/refint.c3
-rw-r--r--contrib/vacuumlo/vacuumlo.c10
-rw-r--r--contrib/xml2/xpath.c214
-rw-r--r--contrib/xml2/xslt_proc.c54
-rw-r--r--src/backend/access/gist/gist.c12
-rw-r--r--src/backend/access/gist/gistbuild.c69
-rw-r--r--src/backend/access/gist/gistbuildbuffers.c2
-rw-r--r--src/backend/access/gist/gistproc.c6
-rw-r--r--src/backend/access/gist/gistscan.c2
-rw-r--r--src/backend/access/gist/gistsplit.c3
-rw-r--r--src/backend/access/hash/hashovfl.c2
-rw-r--r--src/backend/access/heap/heapam.c132
-rw-r--r--src/backend/access/heap/hio.c39
-rw-r--r--src/backend/access/heap/tuptoaster.c6
-rw-r--r--src/backend/access/heap/visibilitymap.c40
-rw-r--r--src/backend/access/index/genam.c2
-rw-r--r--src/backend/access/index/indexam.c10
-rw-r--r--src/backend/access/nbtree/nbtcompare.c10
-rw-r--r--src/backend/access/nbtree/nbtpage.c2
-rw-r--r--src/backend/access/nbtree/nbtree.c4
-rw-r--r--src/backend/access/nbtree/nbtsearch.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c72
-rw-r--r--src/backend/access/spgist/spgdoinsert.c155
-rw-r--r--src/backend/access/spgist/spginsert.c2
-rw-r--r--src/backend/access/spgist/spgkdtreeproc.c12
-rw-r--r--src/backend/access/spgist/spgquadtreeproc.c4
-rw-r--r--src/backend/access/spgist/spgscan.c15
-rw-r--r--src/backend/access/spgist/spgtextproc.c26
-rw-r--r--src/backend/access/spgist/spgutils.c12
-rw-r--r--src/backend/access/spgist/spgvacuum.c56
-rw-r--r--src/backend/access/spgist/spgxlog.c46
-rw-r--r--src/backend/access/transam/clog.c2
-rw-r--r--src/backend/access/transam/slru.c44
-rw-r--r--src/backend/access/transam/twophase.c29
-rw-r--r--src/backend/access/transam/varsup.c4
-rw-r--r--src/backend/access/transam/xact.c55
-rw-r--r--src/backend/access/transam/xlog.c318
-rw-r--r--src/backend/access/transam/xlogutils.c8
-rw-r--r--src/backend/catalog/aclchk.c10
-rw-r--r--src/backend/catalog/dependency.c38
-rw-r--r--src/backend/catalog/heap.c8
-rw-r--r--src/backend/catalog/index.c52
-rw-r--r--src/backend/catalog/namespace.c61
-rw-r--r--src/backend/catalog/objectaddress.c57
-rw-r--r--src/backend/catalog/pg_constraint.c4
-rw-r--r--src/backend/catalog/pg_depend.c2
-rw-r--r--src/backend/catalog/pg_proc.c2
-rw-r--r--src/backend/catalog/pg_shdepend.c2
-rw-r--r--src/backend/catalog/storage.c4
-rw-r--r--src/backend/commands/analyze.c22
-rw-r--r--src/backend/commands/cluster.c8
-rw-r--r--src/backend/commands/copy.c22
-rw-r--r--src/backend/commands/createas.c46
-rw-r--r--src/backend/commands/dbcommands.c26
-rw-r--r--src/backend/commands/dropcmds.c16
-rw-r--r--src/backend/commands/explain.c20
-rw-r--r--src/backend/commands/extension.c10
-rw-r--r--src/backend/commands/foreigncmds.c8
-rw-r--r--src/backend/commands/functioncmds.c8
-rw-r--r--src/backend/commands/indexcmds.c32
-rw-r--r--src/backend/commands/lockcmds.c27
-rw-r--r--src/backend/commands/opclasscmds.c6
-rw-r--r--src/backend/commands/prepare.c12
-rw-r--r--src/backend/commands/proclang.c4
-rw-r--r--src/backend/commands/seclabel.c10
-rw-r--r--src/backend/commands/sequence.c20
-rw-r--r--src/backend/commands/tablecmds.c271
-rw-r--r--src/backend/commands/tablespace.c29
-rw-r--r--src/backend/commands/trigger.c34
-rw-r--r--src/backend/commands/typecmds.c28
-rw-r--r--src/backend/commands/user.c3
-rw-r--r--src/backend/commands/vacuum.c14
-rw-r--r--src/backend/commands/vacuumlazy.c60
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execCurrent.c2
-rw-r--r--src/backend/executor/execMain.c4
-rw-r--r--src/backend/executor/execQual.c24
-rw-r--r--src/backend/executor/execUtils.c4
-rw-r--r--src/backend/executor/functions.c12
-rw-r--r--src/backend/executor/nodeBitmapHeapscan.c3
-rw-r--r--src/backend/executor/nodeIndexonlyscan.c20
-rw-r--r--src/backend/executor/nodeMaterial.c2
-rw-r--r--src/backend/executor/nodeMergeAppend.c4
-rw-r--r--src/backend/executor/nodeMergejoin.c8
-rw-r--r--src/backend/executor/nodeModifyTable.c4
-rw-r--r--src/backend/executor/nodeSetOp.c2
-rw-r--r--src/backend/executor/spi.c18
-rw-r--r--src/backend/libpq/auth.c20
-rw-r--r--src/backend/libpq/be-secure.c14
-rw-r--r--src/backend/libpq/hba.c105
-rw-r--r--src/backend/libpq/pqcomm.c8
-rw-r--r--src/backend/nodes/bitmapset.c4
-rw-r--r--src/backend/nodes/copyfuncs.c4
-rw-r--r--src/backend/nodes/equalfuncs.c4
-rw-r--r--src/backend/nodes/list.c26
-rw-r--r--src/backend/nodes/nodeFuncs.c36
-rw-r--r--src/backend/nodes/outfuncs.c2
-rw-r--r--src/backend/nodes/print.c14
-rw-r--r--src/backend/nodes/readfuncs.c4
-rw-r--r--src/backend/nodes/tidbitmap.c16
-rw-r--r--src/backend/optimizer/geqo/geqo_selection.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c40
-rw-r--r--src/backend/optimizer/path/costsize.c33
-rw-r--r--src/backend/optimizer/path/equivclass.c22
-rw-r--r--src/backend/optimizer/path/indxpath.c177
-rw-r--r--src/backend/optimizer/path/joinpath.c58
-rw-r--r--src/backend/optimizer/path/joinrels.c4
-rw-r--r--src/backend/optimizer/path/orindxpath.c6
-rw-r--r--src/backend/optimizer/path/pathkeys.c2
-rw-r--r--src/backend/optimizer/plan/createplan.c61
-rw-r--r--src/backend/optimizer/plan/initsplan.c6
-rw-r--r--src/backend/optimizer/plan/planagg.c4
-rw-r--r--src/backend/optimizer/plan/planmain.c4
-rw-r--r--src/backend/optimizer/plan/planner.c44
-rw-r--r--src/backend/optimizer/plan/setrefs.c12
-rw-r--r--src/backend/optimizer/plan/subselect.c4
-rw-r--r--src/backend/optimizer/prep/prepjointree.c22
-rw-r--r--src/backend/optimizer/prep/prepunion.c6
-rw-r--r--src/backend/optimizer/util/clauses.c56
-rw-r--r--src/backend/optimizer/util/pathnode.c139
-rw-r--r--src/backend/optimizer/util/placeholder.c4
-rw-r--r--src/backend/optimizer/util/plancat.c8
-rw-r--r--src/backend/optimizer/util/predtest.c2
-rw-r--r--src/backend/optimizer/util/relnode.c10
-rw-r--r--src/backend/optimizer/util/var.c2
-rw-r--r--src/backend/parser/analyze.c12
-rw-r--r--src/backend/parser/parse_coerce.c74
-rw-r--r--src/backend/parser/parse_expr.c10
-rw-r--r--src/backend/parser/parse_func.c8
-rw-r--r--src/backend/parser/parse_relation.c7
-rw-r--r--src/backend/parser/parse_target.c4
-rw-r--r--src/backend/parser/parse_type.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c14
-rw-r--r--src/backend/port/darwin/system.c2
-rw-r--r--src/backend/port/dynloader/aix.h2
-rw-r--r--src/backend/port/dynloader/cygwin.h2
-rw-r--r--src/backend/port/dynloader/freebsd.h2
-rw-r--r--src/backend/port/dynloader/irix.h2
-rw-r--r--src/backend/port/dynloader/linux.h2
-rw-r--r--src/backend/port/dynloader/netbsd.h2
-rw-r--r--src/backend/port/dynloader/openbsd.h2
-rw-r--r--src/backend/port/dynloader/osf.h2
-rw-r--r--src/backend/port/dynloader/sco.h2
-rw-r--r--src/backend/port/dynloader/solaris.h2
-rw-r--r--src/backend/port/dynloader/unixware.h2
-rw-r--r--src/backend/port/dynloader/win32.h2
-rw-r--r--src/backend/port/unix_latch.c36
-rw-r--r--src/backend/port/win32/mingwcompat.c4
-rw-r--r--src/backend/port/win32/socket.c7
-rw-r--r--src/backend/port/win32/timer.c8
-rw-r--r--src/backend/port/win32_latch.c5
-rw-r--r--src/backend/port/win32_sema.c4
-rw-r--r--src/backend/postmaster/autovacuum.c12
-rw-r--r--src/backend/postmaster/bgwriter.c10
-rw-r--r--src/backend/postmaster/checkpointer.c73
-rw-r--r--src/backend/postmaster/pgarch.c16
-rw-r--r--src/backend/postmaster/pgstat.c21
-rw-r--r--src/backend/postmaster/postmaster.c101
-rw-r--r--src/backend/postmaster/syslogger.c5
-rw-r--r--src/backend/postmaster/walwriter.c8
-rw-r--r--src/backend/regex/regc_locale.c13
-rw-r--r--src/backend/regex/regc_pg_locale.c10
-rw-r--r--src/backend/regex/regcomp.c10
-rw-r--r--src/backend/regex/rege_dfa.c2
-rw-r--r--src/backend/regex/regerror.c2
-rw-r--r--src/backend/regex/regexec.c34
-rw-r--r--src/backend/replication/basebackup.c12
-rw-r--r--src/backend/replication/syncrep.c24
-rw-r--r--src/backend/replication/walreceiver.c8
-rw-r--r--src/backend/replication/walreceiverfuncs.c10
-rw-r--r--src/backend/replication/walsender.c61
-rw-r--r--src/backend/rewrite/rewriteDefine.c2
-rw-r--r--src/backend/rewrite/rewriteSupport.c4
-rw-r--r--src/backend/storage/buffer/bufmgr.c24
-rw-r--r--src/backend/storage/buffer/freelist.c2
-rw-r--r--src/backend/storage/file/fd.c24
-rw-r--r--src/backend/storage/ipc/pmsignal.c5
-rw-r--r--src/backend/storage/ipc/procarray.c161
-rw-r--r--src/backend/storage/ipc/sinval.c2
-rw-r--r--src/backend/storage/ipc/sinvaladt.c23
-rw-r--r--src/backend/storage/ipc/standby.c12
-rw-r--r--src/backend/storage/lmgr/lock.c201
-rw-r--r--src/backend/storage/lmgr/lwlock.c5
-rw-r--r--src/backend/storage/lmgr/predicate.c58
-rw-r--r--src/backend/storage/lmgr/proc.c44
-rw-r--r--src/backend/storage/lmgr/s_lock.c2
-rw-r--r--src/backend/storage/smgr/md.c26
-rw-r--r--src/backend/storage/smgr/smgr.c4
-rw-r--r--src/backend/tcop/postgres.c59
-rw-r--r--src/backend/tcop/utility.c12
-rw-r--r--src/backend/tsearch/dict_thesaurus.c4
-rw-r--r--src/backend/tsearch/spell.c4
-rw-r--r--src/backend/tsearch/to_tsany.c4
-rw-r--r--src/backend/tsearch/ts_utils.c2
-rw-r--r--src/backend/utils/adt/acl.c6
-rw-r--r--src/backend/utils/adt/array_selfuncs.c42
-rw-r--r--src/backend/utils/adt/array_typanalyze.c42
-rw-r--r--src/backend/utils/adt/cash.c4
-rw-r--r--src/backend/utils/adt/date.c2
-rw-r--r--src/backend/utils/adt/datetime.c2
-rw-r--r--src/backend/utils/adt/dbsize.c2
-rw-r--r--src/backend/utils/adt/float.c26
-rw-r--r--src/backend/utils/adt/formatting.c6
-rw-r--r--src/backend/utils/adt/inet_net_pton.c3
-rw-r--r--src/backend/utils/adt/json.c229
-rw-r--r--src/backend/utils/adt/lockfuncs.c6
-rw-r--r--src/backend/utils/adt/mac.c16
-rw-r--r--src/backend/utils/adt/misc.c19
-rw-r--r--src/backend/utils/adt/numeric.c8
-rw-r--r--src/backend/utils/adt/pg_locale.c6
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c8
-rw-r--r--src/backend/utils/adt/rangetypes.c54
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c231
-rw-r--r--src/backend/utils/adt/ruleutils.c92
-rw-r--r--src/backend/utils/adt/selfuncs.c91
-rw-r--r--src/backend/utils/adt/timestamp.c18
-rw-r--r--src/backend/utils/adt/tsgistidx.c4
-rw-r--r--src/backend/utils/adt/tsquery_util.c2
-rw-r--r--src/backend/utils/adt/tsrank.c4
-rw-r--r--src/backend/utils/adt/tsvector_op.c6
-rw-r--r--src/backend/utils/adt/varbit.c2
-rw-r--r--src/backend/utils/adt/varchar.c2
-rw-r--r--src/backend/utils/adt/varlena.c11
-rw-r--r--src/backend/utils/adt/xml.c36
-rw-r--r--src/backend/utils/cache/catcache.c4
-rw-r--r--src/backend/utils/cache/inval.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c8
-rw-r--r--src/backend/utils/cache/plancache.c82
-rw-r--r--src/backend/utils/cache/relcache.c12
-rw-r--r--src/backend/utils/cache/ts_cache.c8
-rw-r--r--src/backend/utils/error/elog.c12
-rw-r--r--src/backend/utils/fmgr/fmgr.c4
-rw-r--r--src/backend/utils/fmgr/funcapi.c16
-rw-r--r--src/backend/utils/init/miscinit.c2
-rw-r--r--src/backend/utils/mb/wchar.c23
-rw-r--r--src/backend/utils/misc/guc.c24
-rw-r--r--src/backend/utils/mmgr/portalmem.c2
-rw-r--r--src/backend/utils/sort/sortsupport.c5
-rw-r--r--src/backend/utils/sort/tuplesort.c31
-rw-r--r--src/backend/utils/sort/tuplestore.c2
-rw-r--r--src/backend/utils/time/snapmgr.c82
-rw-r--r--src/backend/utils/time/tqual.c10
-rw-r--r--src/bin/initdb/findtimezone.c3
-rw-r--r--src/bin/initdb/initdb.c41
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c9
-rw-r--r--src/bin/pg_basebackup/pg_receivexlog.c13
-rw-r--r--src/bin/pg_basebackup/receivelog.c25
-rw-r--r--src/bin/pg_basebackup/receivelog.h16
-rw-r--r--src/bin/pg_basebackup/streamutil.c4
-rw-r--r--src/bin/pg_ctl/pg_ctl.c17
-rw-r--r--src/bin/pg_dump/common.c4
-rw-r--r--src/bin/pg_dump/dumputils.c28
-rw-r--r--src/bin/pg_dump/dumputils.h15
-rw-r--r--src/bin/pg_dump/pg_backup.h2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c47
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h2
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c8
-rw-r--r--src/bin/pg_dump/pg_backup_db.c14
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c14
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c12
-rw-r--r--src/bin/pg_dump/pg_dump.c96
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c28
-rw-r--r--src/bin/pg_dump/pg_dumpall.c6
-rw-r--r--src/bin/pgevent/pgevent.c2
-rw-r--r--src/bin/psql/command.c27
-rw-r--r--src/bin/psql/common.c9
-rw-r--r--src/bin/psql/copy.c5
-rw-r--r--src/bin/psql/describe.c48
-rw-r--r--src/bin/psql/help.c6
-rw-r--r--src/bin/psql/input.c3
-rw-r--r--src/bin/psql/print.c7
-rw-r--r--src/bin/psql/print.h6
-rw-r--r--src/bin/psql/startup.c7
-rw-r--r--src/bin/psql/stringutils.c4
-rw-r--r--src/bin/psql/tab-complete.c50
-rw-r--r--src/bin/psql/variables.c2
-rw-r--r--src/bin/scripts/clusterdb.c6
-rw-r--r--src/bin/scripts/common.c2
-rw-r--r--src/bin/scripts/common.h8
-rw-r--r--src/bin/scripts/createlang.c7
-rw-r--r--src/bin/scripts/dropdb.c4
-rw-r--r--src/bin/scripts/droplang.c7
-rw-r--r--src/bin/scripts/reindexdb.c6
-rw-r--r--src/bin/scripts/vacuumdb.c10
-rw-r--r--src/include/access/gist_private.h2
-rw-r--r--src/include/access/heapam.h2
-rw-r--r--src/include/access/htup.h6
-rw-r--r--src/include/access/nbtree.h10
-rw-r--r--src/include/access/slru.h2
-rw-r--r--src/include/access/spgist.h22
-rw-r--r--src/include/access/spgist_private.h60
-rw-r--r--src/include/access/xact.h3
-rw-r--r--src/include/access/xlog_internal.h4
-rw-r--r--src/include/catalog/catalog.h2
-rw-r--r--src/include/catalog/genbki.h2
-rw-r--r--src/include/catalog/index.h2
-rw-r--r--src/include/catalog/namespace.h8
-rw-r--r--src/include/catalog/objectaccess.h6
-rw-r--r--src/include/catalog/objectaddress.h4
-rw-r--r--src/include/catalog/pg_aggregate.h1
-rw-r--r--src/include/catalog/pg_attrdef.h1
-rw-r--r--src/include/catalog/pg_attribute.h2
-rw-r--r--src/include/catalog/pg_constraint.h1
-rw-r--r--src/include/catalog/pg_control.h12
-rw-r--r--src/include/catalog/pg_database.h1
-rw-r--r--src/include/catalog/pg_db_role_setting.h1
-rw-r--r--src/include/catalog/pg_default_acl.h1
-rw-r--r--src/include/catalog/pg_description.h1
-rw-r--r--src/include/catalog/pg_extension.h3
-rw-r--r--src/include/catalog/pg_foreign_data_wrapper.h1
-rw-r--r--src/include/catalog/pg_foreign_server.h1
-rw-r--r--src/include/catalog/pg_foreign_table.h1
-rw-r--r--src/include/catalog/pg_index.h1
-rw-r--r--src/include/catalog/pg_language.h1
-rw-r--r--src/include/catalog/pg_largeobject.h1
-rw-r--r--src/include/catalog/pg_largeobject_metadata.h1
-rw-r--r--src/include/catalog/pg_namespace.h1
-rw-r--r--src/include/catalog/pg_opclass.h4
-rw-r--r--src/include/catalog/pg_operator.h2
-rw-r--r--src/include/catalog/pg_pltemplate.h1
-rw-r--r--src/include/catalog/pg_proc.h57
-rw-r--r--src/include/catalog/pg_range.h6
-rw-r--r--src/include/catalog/pg_rewrite.h1
-rw-r--r--src/include/catalog/pg_seclabel.h1
-rw-r--r--src/include/catalog/pg_shdescription.h1
-rw-r--r--src/include/catalog/pg_shseclabel.h15
-rw-r--r--src/include/catalog/pg_statistic.h6
-rw-r--r--src/include/catalog/pg_tablespace.h1
-rw-r--r--src/include/catalog/pg_trigger.h7
-rw-r--r--src/include/catalog/pg_ts_dict.h1
-rw-r--r--src/include/catalog/pg_type.h6
-rw-r--r--src/include/commands/createas.h2
-rw-r--r--src/include/commands/defrem.h2
-rw-r--r--src/include/commands/explain.h2
-rw-r--r--src/include/commands/tablecmds.h2
-rw-r--r--src/include/commands/typecmds.h2
-rw-r--r--src/include/commands/vacuum.h6
-rw-r--r--src/include/datatype/timestamp.h8
-rw-r--r--src/include/executor/executor.h2
-rw-r--r--src/include/executor/instrument.h14
-rw-r--r--src/include/executor/spi_priv.h2
-rw-r--r--src/include/foreign/fdwapi.h32
-rw-r--r--src/include/lib/stringinfo.h3
-rw-r--r--src/include/libpq/hba.h2
-rw-r--r--src/include/libpq/ip.h4
-rw-r--r--src/include/nodes/execnodes.h2
-rw-r--r--src/include/nodes/parsenodes.h18
-rw-r--r--src/include/nodes/primnodes.h10
-rw-r--r--src/include/nodes/relation.h14
-rw-r--r--src/include/optimizer/cost.h20
-rw-r--r--src/include/optimizer/pathnode.h26
-rw-r--r--src/include/optimizer/paths.h4
-rw-r--r--src/include/optimizer/prep.h2
-rw-r--r--src/include/optimizer/subselect.h2
-rw-r--r--src/include/parser/analyze.h2
-rw-r--r--src/include/pg_config_manual.h4
-rw-r--r--src/include/pg_trace.h2
-rw-r--r--src/include/pgstat.h19
-rw-r--r--src/include/port.h3
-rw-r--r--src/include/port/win32.h2
-rw-r--r--src/include/postgres.h2
-rw-r--r--src/include/postmaster/postmaster.h8
-rw-r--r--src/include/regex/regguts.h4
-rw-r--r--src/include/replication/walprotocol.h2
-rw-r--r--src/include/replication/walreceiver.h4
-rw-r--r--src/include/replication/walsender_private.h3
-rw-r--r--src/include/rewrite/rewriteSupport.h4
-rw-r--r--src/include/snowball/header.h2
-rw-r--r--src/include/storage/barrier.h19
-rw-r--r--src/include/storage/latch.h10
-rw-r--r--src/include/storage/lock.h4
-rw-r--r--src/include/storage/lwlock.h6
-rw-r--r--src/include/storage/predicate.h2
-rw-r--r--src/include/storage/proc.h9
-rw-r--r--src/include/storage/procarray.h2
-rw-r--r--src/include/storage/sinval.h4
-rw-r--r--src/include/storage/smgr.h2
-rw-r--r--src/include/tsearch/ts_public.h10
-rw-r--r--src/include/utils/acl.h2
-rw-r--r--src/include/utils/builtins.h2
-rw-r--r--src/include/utils/guc.h2
-rw-r--r--src/include/utils/guc_tables.h4
-rw-r--r--src/include/utils/json.h2
-rw-r--r--src/include/utils/lsyscache.h10
-rw-r--r--src/include/utils/memutils.h2
-rw-r--r--src/include/utils/pg_crc_tables.h1
-rw-r--r--src/include/utils/plancache.h34
-rw-r--r--src/include/utils/rangetypes.h18
-rw-r--r--src/include/utils/rel.h2
-rw-r--r--src/include/utils/selfuncs.h2
-rw-r--r--src/include/utils/sortsupport.h26
-rw-r--r--src/include/utils/timestamp.h2
-rw-r--r--src/include/utils/tqual.h2
-rw-r--r--src/include/utils/typcache.h8
-rw-r--r--src/include/utils/xml.h12
-rw-r--r--src/interfaces/ecpg/ecpglib/connect.c27
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/extern.h6
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h6
-rw-r--r--src/interfaces/ecpg/preproc/type.c5
-rw-r--r--src/interfaces/libpq/fe-connect.c86
-rw-r--r--src/interfaces/libpq/fe-exec.c6
-rw-r--r--src/interfaces/libpq/fe-protocol2.c15
-rw-r--r--src/interfaces/libpq/fe-protocol3.c4
-rw-r--r--src/interfaces/libpq/fe-secure.c53
-rw-r--r--src/interfaces/libpq/libpq-fe.h2
-rw-r--r--src/interfaces/libpq/libpq-int.h4
-rw-r--r--src/interfaces/libpq/test/uri-regress.c6
-rw-r--r--src/pl/plperl/plperl.c10
-rw-r--r--src/pl/plperl/plperl_helpers.h28
-rw-r--r--src/pl/plpgsql/src/pl_comp.c2
-rw-r--r--src/pl/plpgsql/src/pl_exec.c46
-rw-r--r--src/pl/plpython/plpy_cursorobject.c31
-rw-r--r--src/pl/plpython/plpy_cursorobject.h4
-rw-r--r--src/pl/plpython/plpy_elog.c16
-rw-r--r--src/pl/plpython/plpy_elog.h13
-rw-r--r--src/pl/plpython/plpy_exec.c6
-rw-r--r--src/pl/plpython/plpy_exec.h2
-rw-r--r--src/pl/plpython/plpy_main.c13
-rw-r--r--src/pl/plpython/plpy_main.h6
-rw-r--r--src/pl/plpython/plpy_planobject.h2
-rw-r--r--src/pl/plpython/plpy_plpymodule.c5
-rw-r--r--src/pl/plpython/plpy_plpymodule.h2
-rw-r--r--src/pl/plpython/plpy_procedure.h2
-rw-r--r--src/pl/plpython/plpy_resultobject.c6
-rw-r--r--src/pl/plpython/plpy_resultobject.h5
-rw-r--r--src/pl/plpython/plpy_spi.c40
-rw-r--r--src/pl/plpython/plpy_spi.h2
-rw-r--r--src/pl/plpython/plpy_subxactobject.c2
-rw-r--r--src/pl/plpython/plpy_subxactobject.h2
-rw-r--r--src/pl/plpython/plpy_typeio.c6
-rw-r--r--src/pl/plpython/plpy_typeio.h2
-rw-r--r--src/pl/plpython/plpy_util.c1
-rw-r--r--src/pl/plpython/plpy_util.h2
-rw-r--r--src/pl/plpython/plpython.h8
-rw-r--r--src/port/erand48.c2
-rw-r--r--src/port/fls.c18
-rw-r--r--src/port/getaddrinfo.c2
-rw-r--r--src/port/path.c2
-rw-r--r--src/port/win32setlocale.c38
-rw-r--r--src/test/isolation/isolationtester.c87
-rw-r--r--src/test/regress/pg_regress.c2
-rw-r--r--src/test/thread/thread_test.c17
-rw-r--r--src/timezone/pgtz.c8
-rw-r--r--src/tools/msvc/Install.pm1056
-rw-r--r--src/tools/msvc/MSBuildProject.pm325
-rw-r--r--src/tools/msvc/Mkvcbuild.pm1278
-rw-r--r--src/tools/msvc/Project.pm607
-rw-r--r--src/tools/msvc/Solution.pm980
-rw-r--r--src/tools/msvc/VCBuildProject.pm292
-rw-r--r--src/tools/msvc/VSObjectFactory.pm152
-rw-r--r--src/tools/msvc/build.pl20
-rw-r--r--src/tools/msvc/builddoc.pl32
-rw-r--r--src/tools/msvc/config_default.pl38
-rw-r--r--src/tools/msvc/gendef.pl66
-rwxr-xr-xsrc/tools/msvc/install.pl4
-rw-r--r--src/tools/msvc/pgbison.pl14
-rw-r--r--src/tools/msvc/pgflex.pl60
-rw-r--r--src/tools/msvc/vcregress.pl400
494 files changed, 7504 insertions, 7207 deletions
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index e48ea489dc..ad333b6644 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -23,7 +23,7 @@ static int auto_explain_log_min_duration = -1; /* msec or -1 */
static bool auto_explain_log_analyze = false;
static bool auto_explain_log_verbose = false;
static bool auto_explain_log_buffers = false;
-static bool auto_explain_log_timing = false;
+static bool auto_explain_log_timing = false;
static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
static bool auto_explain_log_nested_statements = false;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 71acb35af6..1e62d8091a 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -1140,7 +1140,7 @@ storeHandler(PGresult *res, const PGdataValue *columns,
* strings and add null termination. As a micro-optimization, allocate
* all the strings with one palloc.
*/
- pbuflen = nfields; /* count the null terminators themselves */
+ pbuflen = nfields; /* count the null terminators themselves */
for (i = 0; i < nfields; i++)
{
int len = columns[i].len;
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index 66fd0e62cc..e3b9223b3e 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -109,17 +109,17 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
* FDW callback routines
*/
static void fileGetForeignRelSize(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static void fileGetForeignPaths(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
static ForeignScan *fileGetForeignPlan(PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid,
- ForeignPath *best_path,
- List *tlist,
- List *scan_clauses);
+ RelOptInfo *baserel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses);
static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
static void fileBeginForeignScan(ForeignScanState *node, int eflags);
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
@@ -141,7 +141,7 @@ static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private,
Cost *startup_cost, Cost *total_cost);
-static int file_acquire_sample_rows(Relation onerel, int elevel,
+static int file_acquire_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
@@ -180,7 +180,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
Oid catalog = PG_GETARG_OID(1);
char *filename = NULL;
- DefElem *force_not_null = NULL;
+ DefElem *force_not_null = NULL;
List *other_options = NIL;
ListCell *cell;
@@ -233,7 +233,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
buf.len > 0
? errhint("Valid options in this context are: %s",
buf.data)
- : errhint("There are no valid options in this context.")));
+ : errhint("There are no valid options in this context.")));
}
/*
@@ -393,13 +393,13 @@ get_file_fdw_attribute_options(Oid relid)
options = GetForeignColumnOptions(relid, attnum);
foreach(lc, options)
{
- DefElem *def = (DefElem *) lfirst(lc);
+ DefElem *def = (DefElem *) lfirst(lc);
if (strcmp(def->defname, "force_not_null") == 0)
{
if (defGetBoolean(def))
{
- char *attname = pstrdup(NameStr(attr->attname));
+ char *attname = pstrdup(NameStr(attr->attname));
fnncolumns = lappend(fnncolumns, makeString(attname));
}
@@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
FileFdwPlanState *fdw_private;
/*
- * Fetch options. We only need filename at this point, but we might
- * as well get everything and not need to re-fetch it later in planning.
+ * Fetch options. We only need filename at this point, but we might as
+ * well get everything and not need to re-fetch it later in planning.
*/
fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
fileGetOptions(foreigntableid,
@@ -468,13 +468,14 @@ fileGetForeignPaths(PlannerInfo *root,
baserel->rows,
startup_cost,
total_cost,
- NIL, /* no pathkeys */
- NULL, /* no outer rel either */
- NIL)); /* no fdw_private data */
+ NIL, /* no pathkeys */
+ NULL, /* no outer rel either */
+ NIL)); /* no fdw_private data */
/*
* If data file was sorted, and we knew it somehow, we could insert
- * appropriate pathkeys into the ForeignPath node to tell the planner that.
+ * appropriate pathkeys into the ForeignPath node to tell the planner
+ * that.
*/
}
@@ -505,8 +506,8 @@ fileGetForeignPlan(PlannerInfo *root,
return make_foreignscan(tlist,
scan_clauses,
scan_relid,
- NIL, /* no expressions to evaluate */
- NIL); /* no private state either */
+ NIL, /* no expressions to evaluate */
+ NIL); /* no private state either */
}
/*
@@ -665,14 +666,14 @@ fileAnalyzeForeignTable(Relation relation,
{
char *filename;
List *options;
- struct stat stat_buf;
+ struct stat stat_buf;
/* Fetch options of foreign table */
fileGetOptions(RelationGetRelid(relation), &filename, &options);
/*
- * Get size of the file. (XXX if we fail here, would it be better to
- * just return false to skip analyzing the table?)
+ * Get size of the file. (XXX if we fail here, would it be better to just
+ * return false to skip analyzing the table?)
*/
if (stat(filename, &stat_buf) < 0)
ereport(ERROR,
@@ -746,7 +747,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
* planner's idea of the relation width; which is bogus if not all
* columns are being read, not to mention that the text representation
* of a row probably isn't the same size as its internal
- * representation. Possibly we could do something better, but the
+ * representation. Possibly we could do something better, but the
* real answer to anyone who complains is "ANALYZE" ...
*/
int tuple_width;
@@ -811,7 +812,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the file and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the file. Therefore, correlation estimates derived later
@@ -824,7 +825,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
double *totalrows, double *totaldeadrows)
{
int numrows = 0;
- double rowstoskip = -1; /* -1 means not set yet */
+ double rowstoskip = -1; /* -1 means not set yet */
double rstate;
TupleDesc tupDesc;
Datum *values;
@@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
cstate = BeginCopyFrom(onerel, filename, NIL, options);
/*
- * Use per-tuple memory context to prevent leak of memory used to read rows
- * from the file with Copy routines.
+ * Use per-tuple memory context to prevent leak of memory used to read
+ * rows from the file with Copy routines.
*/
tupcontext = AllocSetContextCreate(CurrentMemoryContext,
"file_fdw temporary context",
@@ -912,10 +913,10 @@ file_acquire_sample_rows(Relation onerel, int elevel,
if (rowstoskip <= 0)
{
/*
- * Found a suitable tuple, so save it, replacing one
- * old tuple at random
+ * Found a suitable tuple, so save it, replacing one old tuple
+ * at random
*/
- int k = (int) (targrows * anl_random_fract());
+ int k = (int) (targrows * anl_random_fract());
Assert(k >= 0 && k < targrows);
heap_freetuple(rows[k]);
diff --git a/contrib/pg_archivecleanup/pg_archivecleanup.c b/contrib/pg_archivecleanup/pg_archivecleanup.c
index 20977805c8..a226101bbc 100644
--- a/contrib/pg_archivecleanup/pg_archivecleanup.c
+++ b/contrib/pg_archivecleanup/pg_archivecleanup.c
@@ -37,7 +37,7 @@ const char *progname;
/* Options and defaults */
bool debug = false; /* are we debugging? */
bool dryrun = false; /* are we performing a dry-run operation? */
-char *additional_ext = NULL; /* Extension to remove from filenames */
+char *additional_ext = NULL; /* Extension to remove from filenames */
char *archiveLocation; /* where to find the archive? */
char *restartWALFileName; /* the file from which we can restart restore */
@@ -136,12 +136,13 @@ CleanupPriorWALFiles(void)
* they were originally written, in case this worries you.
*/
if (strlen(walfile) == XLOG_DATA_FNAME_LEN &&
- strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
+ strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
{
- /*
- * Use the original file name again now, including any extension
- * that might have been chopped off before testing the sequence.
+ /*
+ * Use the original file name again now, including any
+ * extension that might have been chopped off before testing
+ * the sequence.
*/
snprintf(WALFilePath, MAXPGPATH, "%s/%s",
archiveLocation, xlde->d_name);
@@ -150,7 +151,7 @@ CleanupPriorWALFiles(void)
{
/*
* Prints the name of the file to be removed and skips the
- * actual removal. The regular printout is so that the
+ * actual removal. The regular printout is so that the
* user can pipe the output into some other program.
*/
printf("%s\n", WALFilePath);
@@ -298,7 +299,8 @@ main(int argc, char **argv)
dryrun = true;
break;
case 'x':
- additional_ext = optarg; /* Extension to remove from xlogfile names */
+ additional_ext = optarg; /* Extension to remove from
+ * xlogfile names */
break;
default:
fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 06869fa344..aa11c144d6 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -103,19 +103,19 @@ typedef struct Counters
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
int64 rows; /* total # of retrieved or affected rows */
- int64 shared_blks_hit; /* # of shared buffer hits */
+ int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
int64 shared_blks_dirtied; /* # of shared disk blocks dirtied */
int64 shared_blks_written; /* # of shared disk blocks written */
- int64 local_blks_hit; /* # of local buffer hits */
- int64 local_blks_read; /* # of local disk blocks read */
+ int64 local_blks_hit; /* # of local buffer hits */
+ int64 local_blks_read; /* # of local disk blocks read */
int64 local_blks_dirtied; /* # of local disk blocks dirtied */
int64 local_blks_written; /* # of local disk blocks written */
- int64 temp_blks_read; /* # of temp blocks read */
+ int64 temp_blks_read; /* # of temp blocks read */
int64 temp_blks_written; /* # of temp blocks written */
- double blk_read_time; /* time spent reading, in msec */
- double blk_write_time; /* time spent writing, in msec */
- double usage; /* usage factor */
+ double blk_read_time; /* time spent reading, in msec */
+ double blk_write_time; /* time spent writing, in msec */
+ double usage; /* usage factor */
} Counters;
/*
@@ -140,7 +140,7 @@ typedef struct pgssSharedState
{
LWLockId lock; /* protects hashtable search/modification */
int query_size; /* max query length in bytes */
- double cur_median_usage; /* current median usage in hashtable */
+ double cur_median_usage; /* current median usage in hashtable */
} pgssSharedState;
/*
@@ -150,7 +150,7 @@ typedef struct pgssLocationLen
{
int location; /* start offset in query text */
int length; /* length in bytes, or -1 to ignore */
-} pgssLocationLen;
+} pgssLocationLen;
/*
* Working state for computing a query jumble and producing a normalized
@@ -172,7 +172,7 @@ typedef struct pgssJumbleState
/* Current number of valid entries in clocations array */
int clocations_count;
-} pgssJumbleState;
+} pgssJumbleState;
/*---- Local variables ----*/
@@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
static void pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
- pgssJumbleState * jstate);
+ pgssJumbleState *jstate);
static Size pgss_memsize(void);
static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
- int query_len, bool sticky);
+ int query_len, bool sticky);
static void entry_dealloc(void);
static void entry_reset(void);
-static void AppendJumble(pgssJumbleState * jstate,
+static void AppendJumble(pgssJumbleState *jstate,
const unsigned char *item, Size size);
-static void JumbleQuery(pgssJumbleState * jstate, Query *query);
-static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable);
-static void JumbleExpr(pgssJumbleState * jstate, Node *node);
-static void RecordConstLocation(pgssJumbleState * jstate, int location);
-static char *generate_normalized_query(pgssJumbleState * jstate, const char *query,
+static void JumbleQuery(pgssJumbleState *jstate, Query *query);
+static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
+static void JumbleExpr(pgssJumbleState *jstate, Node *node);
+static void RecordConstLocation(pgssJumbleState *jstate, int location);
+static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding);
-static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query);
+static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
static int comp_location(const void *a, const void *b);
@@ -513,8 +513,8 @@ pgss_shmem_startup(void)
FreeFile(file);
/*
- * Remove the file so it's not included in backups/replication
- * slaves, etc. A new file will be written on next shutdown.
+ * Remove the file so it's not included in backups/replication slaves,
+ * etc. A new file will be written on next shutdown.
*/
unlink(PGSS_DUMP_FILE);
@@ -600,7 +600,7 @@ error:
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not write pg_stat_statement file \"%s\": %m",
- PGSS_DUMP_FILE ".tmp")));
+ PGSS_DUMP_FILE ".tmp")));
if (file)
FreeFile(file);
unlink(PGSS_DUMP_FILE ".tmp");
@@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
* the statement contains an optimizable statement for which a queryId
* could be derived (such as EXPLAIN or DECLARE CURSOR). For such cases,
* runtime control will first go through ProcessUtility and then the
- * executor, and we don't want the executor hooks to do anything, since
- * we are already measuring the statement's costs at the utility level.
+ * executor, and we don't want the executor hooks to do anything, since we
+ * are already measuring the statement's costs at the utility level.
*/
if (query->utilityStmt)
{
@@ -768,7 +768,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
pgss_store(queryDesc->sourceText,
queryId,
- queryDesc->totaltime->total * 1000.0, /* convert to msec */
+ queryDesc->totaltime->total * 1000.0, /* convert to msec */
queryDesc->estate->es_processed,
&queryDesc->totaltime->bufusage,
NULL);
@@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
DestReceiver *dest, char *completionTag)
{
/*
- * If it's an EXECUTE statement, we don't track it and don't increment
- * the nesting level. This allows the cycles to be charged to the
- * underlying PREPARE instead (by the Executor hooks), which is much more
- * useful.
+ * If it's an EXECUTE statement, we don't track it and don't increment the
+ * nesting level. This allows the cycles to be charged to the underlying
+ * PREPARE instead (by the Executor hooks), which is much more useful.
*
* We also don't track execution of PREPARE. If we did, we would get one
* hash table entry for the PREPARE (with hash calculated from the query
@@ -942,7 +941,7 @@ static void
pgss_store(const char *query, uint32 queryId,
double total_time, uint64 rows,
const BufferUsage *bufusage,
- pgssJumbleState * jstate)
+ pgssJumbleState *jstate)
{
pgssHashKey key;
pgssEntry *entry;
@@ -1355,7 +1354,7 @@ entry_reset(void)
* the current jumble.
*/
static void
-AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
+AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
{
unsigned char *jumble = jstate->jumble;
Size jumble_len = jstate->jumble_len;
@@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
* of information).
*/
static void
-JumbleQuery(pgssJumbleState * jstate, Query *query)
+JumbleQuery(pgssJumbleState *jstate, Query *query)
{
Assert(IsA(query, Query));
Assert(query->utilityStmt == NULL);
@@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
* Jumble a range table
*/
static void
-JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
+JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
{
ListCell *lc;
@@ -1485,11 +1484,11 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
*
* Note: the reason we don't simply use expression_tree_walker() is that the
* point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types. We should complain
+ * most tree node types, but here we care about all types. We should complain
* about any unrecognized node type.
*/
static void
-JumbleExpr(pgssJumbleState * jstate, Node *node)
+JumbleExpr(pgssJumbleState *jstate, Node *node)
{
ListCell *temp;
@@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
* that is currently being walked.
*/
static void
-RecordConstLocation(pgssJumbleState * jstate, int location)
+RecordConstLocation(pgssJumbleState *jstate, int location)
{
/* -1 indicates unknown or undefined location */
if (location >= 0)
@@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
* Returns a palloc'd string, which is not necessarily null-terminated.
*/
static char *
-generate_normalized_query(pgssJumbleState * jstate, const char *query,
+generate_normalized_query(pgssJumbleState *jstate, const char *query,
int *query_len_p, int encoding)
{
char *norm_query;
@@ -2007,7 +2006,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* a problem.
*
* Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored. (Actually, we assume the
+ * marked as '-1', so that they are later ignored. (Actually, we assume the
* lengths were initialized as -1 to start with, and don't change them here.)
*
* N.B. There is an assumption that a '-' character at a Const location begins
@@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
* reason for a constant to start with a '-'.
*/
static void
-fill_in_constant_lengths(pgssJumbleState * jstate, const char *query)
+fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
{
pgssLocationLen *locs;
core_yyscan_t yyscanner;
diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c
index 7f92bc8818..9fe2301e41 100644
--- a/contrib/pg_test_fsync/pg_test_fsync.c
+++ b/contrib/pg_test_fsync/pg_test_fsync.c
@@ -29,7 +29,7 @@
/* These are macros to avoid timing the function call overhead. */
#ifndef WIN32
-#define START_TIMER \
+#define START_TIMER \
do { \
alarm_triggered = false; \
alarm(secs_per_test); \
@@ -37,7 +37,7 @@ do { \
} while (0)
#else
/* WIN32 doesn't support alarm, so we create a thread and sleep there */
-#define START_TIMER \
+#define START_TIMER \
do { \
alarm_triggered = false; \
if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \
@@ -55,7 +55,7 @@ do { \
gettimeofday(&stop_t, NULL); \
print_elapse(start_t, stop_t, ops); \
} while (0)
-
+
static const char *progname;
@@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
static void test_open_syncs(void);
static void test_open_sync(const char *msg, int writes_size);
static void test_file_descriptor_sync(void);
+
#ifndef WIN32
static void process_alarm(int sig);
#else
diff --git a/contrib/pg_test_timing/pg_test_timing.c b/contrib/pg_test_timing/pg_test_timing.c
index 4e43694338..b3f98abe5c 100644
--- a/contrib/pg_test_timing/pg_test_timing.c
+++ b/contrib/pg_test_timing/pg_test_timing.c
@@ -1,7 +1,7 @@
/*
* pg_test_timing.c
- * tests overhead of timing calls and their monotonicity: that
- * they always move forward
+ * tests overhead of timing calls and their monotonicity: that
+ * they always move forward
*/
#include "postgres_fe.h"
@@ -35,8 +35,8 @@ handle_args(int argc, char *argv[])
{"duration", required_argument, NULL, 'd'},
{NULL, 0, NULL, 0}
};
- int option; /* Command line option */
- int optindex = 0; /* used by getopt_long */
+ int option; /* Command line option */
+ int optindex = 0; /* used by getopt_long */
if (argc > 1)
{
@@ -87,7 +87,7 @@ handle_args(int argc, char *argv[])
else
{
fprintf(stderr,
- "%s: duration must be a positive integer (duration is \"%d\")\n",
+ "%s: duration must be a positive integer (duration is \"%d\")\n",
progname, test_duration);
fprintf(stderr, "Try \"%s --help\" for more information.\n",
progname);
@@ -98,16 +98,22 @@ handle_args(int argc, char *argv[])
static void
test_timing(int32 duration)
{
- uint64 total_time;
- int64 time_elapsed = 0;
- uint64 loop_count = 0;
- uint64 prev, cur;
- int32 diff, i, bits, found;
-
- instr_time start_time, end_time, temp;
+ uint64 total_time;
+ int64 time_elapsed = 0;
+ uint64 loop_count = 0;
+ uint64 prev,
+ cur;
+ int32 diff,
+ i,
+ bits,
+ found;
+
+ instr_time start_time,
+ end_time,
+ temp;
static int64 histogram[32];
- char buf[100];
+ char buf[100];
total_time = duration > 0 ? duration * 1000000 : 0;
@@ -146,7 +152,7 @@ test_timing(int32 duration)
INSTR_TIME_SUBTRACT(end_time, start_time);
printf("Per loop time including overhead: %0.2f nsec\n",
- INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
+ INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
printf("Histogram of timing durations:\n");
printf("%9s: %10s %9s\n", "< usec", "count", "percent");
diff --git a/contrib/pg_trgm/trgm_gist.c b/contrib/pg_trgm/trgm_gist.c
index 57bce01207..d59c8eb670 100644
--- a/contrib/pg_trgm/trgm_gist.c
+++ b/contrib/pg_trgm/trgm_gist.c
@@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
* trigram extraction is relatively CPU-expensive. We must include
* strategy number because trigram extraction depends on strategy.
*
- * The cached structure contains the strategy number, then the input
- * query (starting at a MAXALIGN boundary), then the TRGM value (also
- * starting at a MAXALIGN boundary).
+ * The cached structure contains the strategy number, then the input query
+ * (starting at a MAXALIGN boundary), then the TRGM value (also starting
+ * at a MAXALIGN boundary).
*/
if (cache == NULL ||
strategy != *((StrategyNumber *) cache) ||
@@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
char *cache = (char *) fcinfo->flinfo->fn_extra;
/*
- * Cache the generated trigrams across multiple calls with the same
- * query.
+ * Cache the generated trigrams across multiple calls with the same query.
*/
if (cache == NULL ||
VARSIZE(cache) != querysize ||
diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c
index 2669c09658..eed4a1eba7 100644
--- a/contrib/pg_upgrade/check.c
+++ b/contrib/pg_upgrade/check.c
@@ -168,7 +168,7 @@ issue_warnings(char *sequence_script_file_name)
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
"--set ON_ERROR_STOP=on "
"--no-psqlrc --port %d --username \"%s\" "
- "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
+ "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
sequence_script_file_name, UTILITY_LOG_FILE);
unlink(sequence_script_file_name);
@@ -204,7 +204,7 @@ output_completion_banner(char *analyze_script_file_name,
else
pg_log(PG_REPORT,
"Optimizer statistics and free space information are not transferred\n"
- "by pg_upgrade so, once you start the new server, consider running:\n"
+ "by pg_upgrade so, once you start the new server, consider running:\n"
" %s\n\n", analyze_script_file_name);
pg_log(PG_REPORT,
@@ -238,7 +238,8 @@ check_cluster_versions(void)
/*
* We can't allow downgrading because we use the target pg_dumpall, and
- * pg_dumpall cannot operate on new database versions, only older versions.
+ * pg_dumpall cannot operate on new database versions, only older
+ * versions.
*/
if (old_cluster.major_version > new_cluster.major_version)
pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
@@ -402,31 +403,31 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %swith increasing accuracy. When it is done, your system will%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sthis script and run:%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s vacuumdb --all %s%s\n", ECHO_QUOTE,
- /* Did we copy the free space files? */
- (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
- "--analyze-only" : "--analyze", ECHO_QUOTE);
+ /* Did we copy the free space files? */
+ (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
+ "--analyze-only" : "--analyze", ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
@@ -441,15 +442,15 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s--------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n");
fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo\n\n");
#ifndef WIN32
@@ -462,9 +463,9 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s---------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all --analyze-only\n");
fprintf(script, "echo\n\n");
@@ -475,17 +476,17 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
#endif
fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "echo %s-------------------------------------------------------------%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fprintf(script, "vacuumdb --all %s\n",
- /* Did we copy the free space files? */
- (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
- "--analyze-only" : "--analyze");
+ /* Did we copy the free space files? */
+ (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
+ "--analyze-only" : "--analyze");
fprintf(script, "echo\n\n");
fprintf(script, "echo %sDone%s\n",
- ECHO_QUOTE, ECHO_QUOTE);
+ ECHO_QUOTE, ECHO_QUOTE);
fclose(script);
@@ -716,8 +717,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains \"contrib/isn\" functions which rely on the\n"
- "bigint data type. Your old and new clusters pass bigint values\n"
- "differently so this cluster cannot currently be upgraded. You can\n"
+ "bigint data type. Your old and new clusters pass bigint values\n"
+ "differently so this cluster cannot currently be upgraded. You can\n"
"manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
"\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
"the problem functions is in the file:\n"
@@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
PGconn *conn = connectToServer(cluster, active_db->db_name);
/*
- * While several relkinds don't store any data, e.g. views, they
- * can be used to define data types of other columns, so we
- * check all relkinds.
+ * While several relkinds don't store any data, e.g. views, they can
+ * be used to define data types of other columns, so we check all
+ * relkinds.
*/
res = executeQueryOrDie(conn,
"SELECT n.nspname, c.relname, a.attname "
@@ -777,16 +778,16 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid IN ( "
" 'pg_catalog.regproc'::pg_catalog.regtype, "
- " 'pg_catalog.regprocedure'::pg_catalog.regtype, "
+ " 'pg_catalog.regprocedure'::pg_catalog.regtype, "
" 'pg_catalog.regoper'::pg_catalog.regtype, "
- " 'pg_catalog.regoperator'::pg_catalog.regtype, "
+ " 'pg_catalog.regoperator'::pg_catalog.regtype, "
/* regclass.oid is preserved, so 'regclass' is OK */
/* regtype.oid is preserved, so 'regtype' is OK */
- " 'pg_catalog.regconfig'::pg_catalog.regtype, "
- " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
- " c.relnamespace = n.oid AND "
- " n.nspname != 'pg_catalog' AND "
- " n.nspname != 'information_schema'");
+ " 'pg_catalog.regconfig'::pg_catalog.regtype, "
+ " 'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
+ " c.relnamespace = n.oid AND "
+ " n.nspname != 'pg_catalog' AND "
+ " n.nspname != 'information_schema'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -822,8 +823,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains one of the reg* data types in user tables.\n"
- "These data types reference system OIDs that are not preserved by\n"
- "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
+ "These data types reference system OIDs that are not preserved by\n"
+ "pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
"remove the problem tables and restart the upgrade. A list of the problem\n"
"columns is in the file:\n"
" %s\n\n", output_path);
@@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
static void
get_bin_version(ClusterInfo *cluster)
{
- char cmd[MAXPGPATH], cmd_output[MAX_STRING];
+ char cmd[MAXPGPATH],
+ cmd_output[MAX_STRING];
FILE *output;
- int pre_dot, post_dot;
+ int pre_dot,
+ post_dot;
snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
@@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
}
-
diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c
index e01280db9e..6bffe549e5 100644
--- a/contrib/pg_upgrade/controldata.c
+++ b/contrib/pg_upgrade/controldata.c
@@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
pg_log(PG_VERBOSE, "%s", bufin);
#ifdef WIN32
+
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
@@ -506,7 +507,7 @@ check_control_data(ControlData *oldctrl,
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_log(PG_FATAL,
- "You will need to rebuild the new server with configure option\n"
+ "You will need to rebuild the new server with configure option\n"
"--disable-integer-datetimes or get server binaries built with those\n"
"options.\n");
}
@@ -531,6 +532,6 @@ disable_old_cluster(void)
pg_log(PG_REPORT, "\n"
"If you want to start the old cluster, you will need to remove\n"
"the \".old\" suffix from %s/global/pg_control.old.\n"
- "Because \"link\" mode was used, the old cluster cannot be safely\n"
- "started once the new cluster has been started.\n\n", old_cluster.pgdata);
+ "Because \"link\" mode was used, the old cluster cannot be safely\n"
+ "started once the new cluster has been started.\n\n", old_cluster.pgdata);
}
diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c
index 68cf0795aa..9e63bd5856 100644
--- a/contrib/pg_upgrade/exec.c
+++ b/contrib/pg_upgrade/exec.c
@@ -18,8 +18,9 @@
static void check_data_dir(const char *pg_data);
static void check_bin_dir(ClusterInfo *cluster);
static void validate_exec(const char *dir, const char *cmdName);
+
#ifdef WIN32
-static int win32_check_directory_write_permissions(void);
+static int win32_check_directory_write_permissions(void);
#endif
@@ -64,7 +65,7 @@ exec_prog(bool throw_error, bool is_priv,
pg_log(throw_error ? PG_FATAL : PG_REPORT,
"Consult the last few lines of \"%s\" for\n"
"the probable cause of the failure.\n",
- log_file);
+ log_file);
return 1;
}
@@ -142,12 +143,12 @@ verify_directories(void)
static int
win32_check_directory_write_permissions(void)
{
- int fd;
+ int fd;
/*
- * We open a file we would normally create anyway. We do this even in
- * 'check' mode, which isn't ideal, but this is the best we can do.
- */
+ * We open a file we would normally create anyway. We do this even in
+ * 'check' mode, which isn't ideal, but this is the best we can do.
+ */
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
return -1;
close(fd);
@@ -184,7 +185,7 @@ check_data_dir(const char *pg_data)
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data,
- /* Win32 can't stat() a directory with a trailing slash. */
+ /* Win32 can't stat() a directory with a trailing slash. */
*requiredSubdirs[subdirnum] ? "/" : "",
requiredSubdirs[subdirnum]);
diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c
index 0276636e03..1dd3722142 100644
--- a/contrib/pg_upgrade/file.c
+++ b/contrib/pg_upgrade/file.c
@@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
* large number of times.
*/
int
-load_directory(const char *dirname, struct dirent ***namelist)
+load_directory(const char *dirname, struct dirent *** namelist)
{
DIR *dirdesc;
struct dirent *direntry;
@@ -251,7 +251,7 @@ load_directory(const char *dirname, struct dirent ***namelist)
count++;
*namelist = (struct dirent **) realloc((void *) (*namelist),
- (size_t) ((name_num + 1) * sizeof(struct dirent *)));
+ (size_t) ((name_num + 1) * sizeof(struct dirent *)));
if (*namelist == NULL)
{
@@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
else
return 0;
}
-
#endif
@@ -322,13 +321,11 @@ win32_pghardlink(const char *src, const char *dst)
FILE *
fopen_priv(const char *path, const char *mode)
{
- mode_t old_umask = umask(S_IRWXG | S_IRWXO);
- FILE *fp;
+ mode_t old_umask = umask(S_IRWXG | S_IRWXO);
+ FILE *fp;
fp = fopen(path, mode);
umask(old_umask);
return fp;
}
-
-
diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c
index b4b17badb2..77bd3a0359 100644
--- a/contrib/pg_upgrade/function.c
+++ b/contrib/pg_upgrade/function.c
@@ -133,7 +133,7 @@ get_loadable_libraries(void)
int totaltups;
int dbnum;
bool found_public_plpython_handler = false;
-
+
ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
@@ -144,10 +144,10 @@ get_loadable_libraries(void)
PGconn *conn = connectToServer(&old_cluster, active_db->db_name);
/*
- * Fetch all libraries referenced in this DB. We can't exclude
- * the "pg_catalog" schema because, while such functions are not
- * explicitly dumped by pg_dump, they do reference implicit objects
- * that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
+ * Fetch all libraries referenced in this DB. We can't exclude the
+ * "pg_catalog" schema because, while such functions are not
+ * explicitly dumped by pg_dump, they do reference implicit objects
+ * that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
*/
ress[dbnum] = executeQueryOrDie(conn,
"SELECT DISTINCT probin "
@@ -158,26 +158,26 @@ get_loadable_libraries(void)
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
- /*
- * Systems that install plpython before 8.1 have
- * plpython_call_handler() defined in the "public" schema, causing
- * pg_dumpall to dump it. However that function still references
- * "plpython" (no "2"), so it throws an error on restore. This code
- * checks for the problem function, reports affected databases to the
- * user and explains how to remove them.
- * 8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69
- * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
- * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
- */
+ /*
+ * Systems that install plpython before 8.1 have
+ * plpython_call_handler() defined in the "public" schema, causing
+ * pg_dumpall to dump it. However that function still references
+ * "plpython" (no "2"), so it throws an error on restore. This code
+ * checks for the problem function, reports affected databases to the
+ * user and explains how to remove them. 8.1 git commit:
+ * e0dedd0559f005d60c69c9772163e69c204bac69
+ * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
+ * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
+ */
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901)
{
- PGresult *res;
+ PGresult *res;
res = executeQueryOrDie(conn,
"SELECT 1 "
- "FROM pg_catalog.pg_proc JOIN pg_namespace "
- " ON pronamespace = pg_namespace.oid "
- "WHERE proname = 'plpython_call_handler' AND "
+ "FROM pg_catalog.pg_proc JOIN pg_namespace "
+ " ON pronamespace = pg_namespace.oid "
+ "WHERE proname = 'plpython_call_handler' AND "
"nspname = 'public' AND "
"prolang = 13 /* C */ AND "
"probin = '$libdir/plpython' AND "
@@ -188,23 +188,23 @@ get_loadable_libraries(void)
if (!found_public_plpython_handler)
{
pg_log(PG_WARNING,
- "\nThe old cluster has a \"plpython_call_handler\" function defined\n"
- "in the \"public\" schema which is a duplicate of the one defined\n"
- "in the \"pg_catalog\" schema. You can confirm this by executing\n"
- "in psql:\n"
- "\n"
- " \\df *.plpython_call_handler\n"
- "\n"
- "The \"public\" schema version of this function was created by a\n"
- "pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
- "to complete because it references a now-obsolete \"plpython\"\n"
- "shared object file. You can remove the \"public\" schema version\n"
- "of this function by running the following command:\n"
- "\n"
- " DROP FUNCTION public.plpython_call_handler()\n"
- "\n"
- "in each affected database:\n"
- "\n");
+ "\nThe old cluster has a \"plpython_call_handler\" function defined\n"
+ "in the \"public\" schema which is a duplicate of the one defined\n"
+ "in the \"pg_catalog\" schema. You can confirm this by executing\n"
+ "in psql:\n"
+ "\n"
+ " \\df *.plpython_call_handler\n"
+ "\n"
+ "The \"public\" schema version of this function was created by a\n"
+ "pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
+ "to complete because it references a now-obsolete \"plpython\"\n"
+ "shared object file. You can remove the \"public\" schema version\n"
+ "of this function by running the following command:\n"
+ "\n"
+ " DROP FUNCTION public.plpython_call_handler()\n"
+ "\n"
+ "in each affected database:\n"
+ "\n");
}
pg_log(PG_WARNING, " %s\n", active_db->db_name);
found_public_plpython_handler = true;
@@ -217,9 +217,9 @@ get_loadable_libraries(void)
if (found_public_plpython_handler)
pg_log(PG_FATAL,
- "Remove the problem functions from the old cluster to continue.\n");
-
- totaltups++; /* reserve for pg_upgrade_support */
+ "Remove the problem functions from the old cluster to continue.\n");
+
+ totaltups++; /* reserve for pg_upgrade_support */
/* Allocate what's certainly enough space */
os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
@@ -293,17 +293,17 @@ check_loadable_libraries(void)
PGresult *res;
/*
- * In Postgres 9.0, Python 3 support was added, and to do that, a
- * plpython2u language was created with library name plpython2.so
- * as a symbolic link to plpython.so. In Postgres 9.1, only the
- * plpython2.so library was created, and both plpythonu and
- * plpython2u pointing to it. For this reason, any reference to
- * library name "plpython" in an old PG <= 9.1 cluster must look
- * for "plpython2" in the new cluster.
+ * In Postgres 9.0, Python 3 support was added, and to do that, a
+ * plpython2u language was created with library name plpython2.so as a
+ * symbolic link to plpython.so. In Postgres 9.1, only the
+ * plpython2.so library was created, and both plpythonu and plpython2u
+ * pointing to it. For this reason, any reference to library name
+ * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
+ * the new cluster.
*
- * For this case, we could check pg_pltemplate, but that only works
- * for languages, and does not help with function shared objects,
- * so we just do a general fix.
+ * For this case, we could check pg_pltemplate, but that only works
+ * for languages, and does not help with function shared objects, so
+ * we just do a general fix.
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
strcmp(lib, "$libdir/plpython") == 0)
@@ -325,7 +325,7 @@ check_loadable_libraries(void)
/* exit and report missing support library with special message */
if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
pg_log(PG_FATAL,
- "The pg_upgrade_support module must be created and installed in the new cluster.\n");
+ "The pg_upgrade_support module must be created and installed in the new cluster.\n");
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c
index 5b2b9eb28c..74b13e782d 100644
--- a/contrib/pg_upgrade/info.c
+++ b/contrib/pg_upgrade/info.c
@@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
- * TOAST table names initially match the heap pg_class oid.
- * In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0,
- * TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE.
- * In >= 9.0, TOAST relation names always use heap table oids, hence
- * we cannot check relation names when upgrading from pre-9.0.
- * Clusters upgraded to 9.0 will get matching TOAST names.
+ * TOAST table names initially match the heap pg_class oid. In
+ * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
+ * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
+ * 9.0, TOAST relation names always use heap table oids, hence we
+ * cannot check relation names when upgrading from pre-9.0. Clusters
+ * upgraded to 9.0 will get matching TOAST names.
*/
if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
@@ -194,16 +194,16 @@ get_db_infos(ClusterInfo *cluster)
char query[QUERY_ALLOC];
snprintf(query, sizeof(query),
- "SELECT d.oid, d.datname, %s "
- "FROM pg_catalog.pg_database d "
- " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
- " ON d.dattablespace = t.oid "
- "WHERE d.datallowconn = true "
+ "SELECT d.oid, d.datname, %s "
+ "FROM pg_catalog.pg_database d "
+ " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+ " ON d.dattablespace = t.oid "
+ "WHERE d.datallowconn = true "
/* we don't preserve pg_database.oid so we sort by name */
- "ORDER BY 2",
+ "ORDER BY 2",
/* 9.2 removed the spclocation column */
- (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
- "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+ (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+ "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);
@@ -276,7 +276,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON c.reltablespace = t.oid "
"WHERE relkind IN ('r','t', 'i'%s) AND "
- /* exclude possible orphaned temp tables */
+ /* exclude possible orphaned temp tables */
" ((n.nspname !~ '^pg_temp_' AND "
" n.nspname !~ '^pg_toast_temp_' AND "
" n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c
index 66a70cac8a..ccf00434d3 100644
--- a/contrib/pg_upgrade/option.c
+++ b/contrib/pg_upgrade/option.c
@@ -56,10 +56,10 @@ parseCommandLine(int argc, char *argv[])
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
int os_user_effective_id;
- FILE *fp;
- char **filename;
+ FILE *fp;
+ char **filename;
time_t run_time = time(NULL);
-
+
user_opts.transfer_mode = TRANSFER_MODE_COPY;
os_info.progname = get_progname(argv[0]);
@@ -138,11 +138,11 @@ parseCommandLine(int argc, char *argv[])
new_cluster.pgopts = pg_strdup(optarg);
break;
- /*
- * Someday, the port number option could be removed and
- * passed using -o/-O, but that requires postmaster -C
- * to be supported on all old/new versions.
- */
+ /*
+ * Someday, the port number option could be removed and passed
+ * using -o/-O, but that requires postmaster -C to be
+ * supported on all old/new versions.
+ */
case 'p':
if ((old_cluster.port = atoi(optarg)) <= 0)
{
@@ -196,21 +196,21 @@ parseCommandLine(int argc, char *argv[])
/* Start with newline because we might be appending to a file. */
fprintf(fp, "\n"
"-----------------------------------------------------------------\n"
- " pg_upgrade run on %s"
- "-----------------------------------------------------------------\n\n",
- ctime(&run_time));
+ " pg_upgrade run on %s"
+ "-----------------------------------------------------------------\n\n",
+ ctime(&run_time));
fclose(fp);
}
/* Get values from env if not already set */
check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b",
- "old cluster binaries reside");
+ "old cluster binaries reside");
check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B",
- "new cluster binaries reside");
+ "new cluster binaries reside");
check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d",
- "old cluster data resides");
+ "old cluster data resides");
check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D",
- "new cluster data resides");
+ "new cluster data resides");
}
@@ -285,7 +285,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
*/
static void
check_required_directory(char **dirpath, char *envVarName,
- char *cmdLineOption, char *description)
+ char *cmdLineOption, char *description)
{
if (*dirpath == NULL || strlen(*dirpath) == 0)
{
@@ -322,8 +322,10 @@ void
adjust_data_dir(ClusterInfo *cluster)
{
char filename[MAXPGPATH];
- char cmd[MAXPGPATH], cmd_output[MAX_STRING];
- FILE *fp, *output;
+ char cmd[MAXPGPATH],
+ cmd_output[MAX_STRING];
+ FILE *fp,
+ *output;
/* If there is no postgresql.conf, it can't be a config-only dir */
snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
@@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
CLUSTER_NAME(cluster));
/*
- * We don't have a data directory yet, so we can't check the PG
- * version, so this might fail --- only works for PG 9.2+. If this
- * fails, pg_upgrade will fail anyway because the data files will not
- * be found.
+ * We don't have a data directory yet, so we can't check the PG version,
+ * so this might fail --- only works for PG 9.2+. If this fails,
+ * pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
cluster->bindir, cluster->pgconfig);
@@ -356,7 +357,7 @@ adjust_data_dir(ClusterInfo *cluster)
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_log(PG_FATAL, "Could not get data directory using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
pclose(output);
diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c
index 465ecdd6b3..3537fc2bd0 100644
--- a/contrib/pg_upgrade/pg_upgrade.c
+++ b/contrib/pg_upgrade/pg_upgrade.c
@@ -55,7 +55,7 @@ ClusterInfo old_cluster,
new_cluster;
OSInfo os_info;
-char *output_files[] = {
+char *output_files[] = {
SERVER_LOG_FILE,
#ifdef WIN32
/* unique file for pg_ctl start */
@@ -122,11 +122,10 @@ main(int argc, char **argv)
stop_postmaster(false);
/*
- * Most failures happen in create_new_objects(), which has
- * completed at this point. We do this here because it is just
- * before linking, which will link the old and new cluster data
- * files, preventing the old cluster from being safely started
- * once the new cluster is started.
+ * Most failures happen in create_new_objects(), which has completed at
+ * this point. We do this here because it is just before linking, which
+ * will link the old and new cluster data files, preventing the old
+ * cluster from being safely started once the new cluster is started.
*/
if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster();
@@ -215,8 +214,8 @@ prepare_new_cluster(void)
exec_prog(true, true, UTILITY_LOG_FILE,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+ new_cluster.bindir, new_cluster.port, os_info.user,
+ log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
check_ok();
/*
@@ -229,8 +228,8 @@ prepare_new_cluster(void)
exec_prog(true, true, UTILITY_LOG_FILE,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
"--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
- new_cluster.bindir, new_cluster.port, os_info.user,
- log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+ new_cluster.bindir, new_cluster.port, os_info.user,
+ log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
check_ok();
get_pg_database_relfilenode(&new_cluster);
@@ -252,8 +251,8 @@ prepare_new_databases(void)
/*
* Install support functions in the global-object restore database to
- * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
- * database so objects we add into 'template1' are not propogated. They
+ * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
+ * database so objects we add into 'template1' are not propogated. They
* are removed on pg_upgrade exit.
*/
install_support_functions_in_new_db("template1");
@@ -267,7 +266,7 @@ prepare_new_databases(void)
exec_prog(true, true, RESTORE_LOG_FILE,
SYSTEMQUOTE "\"%s/psql\" --echo-queries "
"--set ON_ERROR_STOP=on "
- /* --no-psqlrc prevents AUTOCOMMIT=off */
+ /* --no-psqlrc prevents AUTOCOMMIT=off */
"--no-psqlrc --port %d --username \"%s\" "
"-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
new_cluster.bindir, new_cluster.port, os_info.user,
@@ -453,13 +452,13 @@ set_frozenxids(void)
static void
cleanup(void)
{
-
+
fclose(log_opts.internal);
/* Remove dump and log files? */
if (!log_opts.retain)
{
- char **filename;
+ char **filename;
for (filename = output_files; *filename != NULL; filename++)
unlink(*filename);
diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h
index 26aa7bb1d2..d12590ac6b 100644
--- a/contrib/pg_upgrade/pg_upgrade.h
+++ b/contrib/pg_upgrade/pg_upgrade.h
@@ -75,7 +75,7 @@ extern char *output_files[];
#define RM_CMD "rm -f"
#define RMDIR_CMD "rm -rf"
#define SCRIPT_EXT "sh"
-#define ECHO_QUOTE "'"
+#define ECHO_QUOTE "'"
#else
#define pg_copy_file CopyFile
#define pg_mv_file pgrename
@@ -85,7 +85,7 @@ extern char *output_files[];
#define RMDIR_CMD "RMDIR /s/q"
#define SCRIPT_EXT "bat"
#define EXE_EXT ".exe"
-#define ECHO_QUOTE ""
+#define ECHO_QUOTE ""
#endif
#define CLUSTER_NAME(cluster) ((cluster) == &old_cluster ? "old" : \
@@ -98,7 +98,7 @@ extern char *output_files[];
/* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */
#define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251
/*
- * Visibility map changed with this 9.2 commit,
+ * Visibility map changed with this 9.2 commit,
* 8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version.
*/
#define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
@@ -114,7 +114,7 @@ typedef struct
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
/* relation tablespace path, or "" for the cluster default */
- char tablespace[MAXPGPATH];
+ char tablespace[MAXPGPATH];
} RelInfo;
typedef struct
@@ -222,9 +222,11 @@ typedef struct
ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
- char *pgconfig; /* pathname for cluster's config file directory */
+ char *pgconfig; /* pathname for cluster's config file
+ * directory */
char *bindir; /* pathname for cluster's executable directory */
- char *pgopts; /* options to pass to the server, like pg_ctl -o */
+ char *pgopts; /* options to pass to the server, like pg_ctl
+ * -o */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
@@ -291,8 +293,8 @@ void check_old_cluster(bool live_check,
void check_new_cluster(void);
void report_clusters_compatible(void);
void issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *analyze_script_file_name,
- char *deletion_script_file_name);
+void output_completion_banner(char *analyze_script_file_name,
+ char *deletion_script_file_name);
void check_cluster_versions(void);
void check_cluster_compatibility(bool live_check);
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
@@ -314,9 +316,10 @@ void split_old_dump(void);
/* exec.c */
-int exec_prog(bool throw_error, bool is_priv,
- const char *log_file, const char *cmd, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
+int
+exec_prog(bool throw_error, bool is_priv,
+ const char *log_file, const char *cmd,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
void verify_directories(void);
bool is_server_running(const char *datadir);
@@ -353,14 +356,14 @@ const char *setupPageConverter(pageCnvCtx **result);
typedef void *pageCnvCtx;
#endif
-int load_directory(const char *dirname, struct dirent ***namelist);
+int load_directory(const char *dirname, struct dirent *** namelist);
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
const char *dst);
void check_hard_link(void);
-FILE *fopen_priv(const char *path, const char *mode);
+FILE *fopen_priv(const char *path, const char *mode);
/* function.c */
@@ -399,8 +402,9 @@ void init_tablespaces(void);
/* server.c */
PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
-PGresult *executeQueryOrDie(PGconn *conn, const char *fmt, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+PGresult *
+executeQueryOrDie(PGconn *conn, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void start_postmaster(ClusterInfo *cluster);
void stop_postmaster(bool fast);
@@ -413,12 +417,15 @@ void check_pghost_envvar(void);
char *quote_identifier(const char *s);
int get_user_info(char **user_name);
void check_ok(void);
-void report_status(eLogType type, const char *fmt, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-void pg_log(eLogType type, char *fmt, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-void prep_status(const char *fmt, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+void
+report_status(eLogType type, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void
+pg_log(eLogType type, char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void
+prep_status(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void check_ok(void);
char *pg_strdup(const char *s);
void *pg_malloc(int size);
diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c
index 45d6c5415b..3509585de7 100644
--- a/contrib/pg_upgrade/relfilenode.c
+++ b/contrib/pg_upgrade/relfilenode.c
@@ -34,26 +34,28 @@ const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{
- int old_dbnum, new_dbnum;
+ int old_dbnum,
+ new_dbnum;
const char *msg = NULL;
prep_status("%s user relation files\n",
- user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
+ user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/* Scan the old cluster databases and transfer their files */
for (old_dbnum = new_dbnum = 0;
old_dbnum < old_db_arr->ndbs;
old_dbnum++, new_dbnum++)
{
- DbInfo *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL;
+ DbInfo *old_db = &old_db_arr->dbs[old_dbnum],
+ *new_db = NULL;
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
/*
- * Advance past any databases that exist in the new cluster
- * but not in the old, e.g. "postgres". (The user might
- * have removed the 'postgres' database from the old cluster.)
+ * Advance past any databases that exist in the new cluster but not in
+ * the old, e.g. "postgres". (The user might have removed the
+ * 'postgres' database from the old cluster.)
*/
for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
{
@@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
}
}
- prep_status(" "); /* in case nothing printed; pass a space so gcc
- * doesn't complain about empty format
+ prep_status(" "); /* in case nothing printed; pass a space so
+ * gcc doesn't complain about empty format
* string */
check_ok();
@@ -137,14 +139,14 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
int mapnum;
int fileno;
bool vm_crashsafe_change = false;
-
+
old_dir[0] = '\0';
/* Do not copy non-crashsafe vm files for binaries that assume crashsafety */
if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
vm_crashsafe_change = true;
-
+
for (mapnum = 0; mapnum < size; mapnum++)
{
char old_file[MAXPGPATH];
@@ -190,8 +192,8 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
for (fileno = 0; fileno < numFiles; fileno++)
{
- char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
- bool is_vm_file = false;
+ char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
+ bool is_vm_file = false;
/* Is a visibility map file? (name ends with _vm) */
if (vm_offset && strlen(vm_offset) == strlen("_vm"))
diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c
index f557453df2..f83d6fa866 100644
--- a/contrib/pg_upgrade/server.c
+++ b/contrib/pg_upgrade/server.c
@@ -161,7 +161,7 @@ start_postmaster(ClusterInfo *cluster)
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
"-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
- cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
+ cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
(cluster->controldata.cat_ver >=
BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
"-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
@@ -172,11 +172,11 @@ start_postmaster(ClusterInfo *cluster)
* it might supply a reason for the failure.
*/
pg_ctl_return = exec_prog(false, true,
- /* pass both file names if the differ */
- (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
- SERVER_LOG_FILE :
- SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
- "%s", cmd);
+ /* pass both file names if the differ */
+ (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
+ SERVER_LOG_FILE :
+ SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
+ "%s", cmd);
/* Check to see if we can connect to the server; if not, report it. */
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
@@ -211,14 +211,14 @@ stop_postmaster(bool fast)
else if (os_info.running_cluster == &new_cluster)
cluster = &new_cluster;
else
- return; /* no cluster running */
+ return; /* no cluster running */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
"%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
cluster->bindir, cluster->pgconfig,
cluster->pgopts ? cluster->pgopts : "",
- fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
+ fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd);
diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c
index 6b61f4bac1..b783b6251e 100644
--- a/contrib/pg_upgrade/tablespace.c
+++ b/contrib/pg_upgrade/tablespace.c
@@ -52,8 +52,8 @@ get_tablespace_paths(void)
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'",
/* 9.2 removed the spclocation column */
- (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
- "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
+ (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
+ "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
res = executeQueryOrDie(conn, "%s", query);
diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c
index 542425c7c9..b681c0984e 100644
--- a/contrib/pg_upgrade/version_old_8_3.c
+++ b/contrib/pg_upgrade/version_old_8_3.c
@@ -60,10 +60,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
- /* exclude possible orphaned temp tables */
+ /* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
- " n.nspname !~ '^pg_toast_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -98,9 +98,9 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
pg_log(PG_REPORT, "fatal\n");
pg_log(PG_FATAL,
"Your installation contains the \"name\" data type in user tables. This\n"
- "data type changed its internal alignment between your old and new\n"
+ "data type changed its internal alignment between your old and new\n"
"clusters so this cluster cannot currently be upgraded. You can remove\n"
- "the problem tables and restart the upgrade. A list of the problem\n"
+ "the problem tables and restart the upgrade. A list of the problem\n"
"columns is in the file:\n"
" %s\n\n", output_path);
}
@@ -150,10 +150,10 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
- /* exclude possible orphaned temp tables */
+ /* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
- " n.nspname !~ '^pg_toast_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
@@ -189,7 +189,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
pg_log(PG_FATAL,
"Your installation contains the \"tsquery\" data type. This data type\n"
"added a new internal field between your old and new clusters so this\n"
- "cluster cannot currently be upgraded. You can remove the problem\n"
+ "cluster cannot currently be upgraded. You can remove the problem\n"
"columns and restart the upgrade. A list of the problem columns is in the\n"
"file:\n"
" %s\n\n", output_path);
@@ -328,10 +328,10 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
- /* exclude possible orphaned temp tables */
+ /* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
- " n.nspname !~ '^pg_toast_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
/*
* This macro is used below to avoid reindexing indexes already rebuilt
@@ -527,7 +527,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
"must be reindexed with the REINDEX command. The file:\n"
" %s\n"
"when executed by psql by the database superuser will recreate all invalid\n"
- "indexes; until then, none of these indexes will be used.\n\n",
+ "indexes; until then, none of these indexes will be used.\n\n",
output_path);
}
else
@@ -648,10 +648,10 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
pg_log(PG_WARNING, "\n"
"Your installation contains indexes using \"bpchar_pattern_ops\". These\n"
"indexes have different internal formats between your old and new clusters\n"
- "so they must be reindexed with the REINDEX command. The file:\n"
+ "so they must be reindexed with the REINDEX command. The file:\n"
" %s\n"
"when executed by psql by the database superuser will recreate all invalid\n"
- "indexes; until then, none of these indexes will be used.\n\n",
+ "indexes; until then, none of these indexes will be used.\n\n",
output_path);
}
else
@@ -699,10 +699,10 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
" pg_catalog.pg_namespace n "
"WHERE c.relkind = 'S' AND "
" c.relnamespace = n.oid AND "
- /* exclude possible orphaned temp tables */
+ /* exclude possible orphaned temp tables */
" n.nspname !~ '^pg_temp_' AND "
- " n.nspname !~ '^pg_toast_temp_' AND "
- " n.nspname NOT IN ('pg_catalog', 'information_schema')");
+ " n.nspname !~ '^pg_toast_temp_' AND "
+ " n.nspname NOT IN ('pg_catalog', 'information_schema')");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index b0e699187b..25fb15a847 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -66,7 +66,7 @@
typedef struct win32_pthread *pthread_t;
typedef int pthread_attr_t;
-static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
+static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#elif defined(ENABLE_THREAD_SAFETY)
/* Use platform-dependent pthread capability */
@@ -84,7 +84,7 @@ static int pthread_join(pthread_t th, void **thread_return);
typedef struct fork_pthread *pthread_t;
typedef int pthread_attr_t;
-static int pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
+static int pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
static int pthread_join(pthread_t th, void **thread_return);
#endif
@@ -198,7 +198,7 @@ typedef struct
instr_time start_time; /* thread start time */
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
int *exec_count; /* number of cmd executions (per Command) */
- unsigned short random_state[3]; /* separate randomness for each thread */
+ unsigned short random_state[3]; /* separate randomness for each thread */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
@@ -1075,7 +1075,7 @@ top:
/*
* getrand() neeeds to be able to subtract max from min and add
- * one the result without overflowing. Since we know max > min,
+ * one the result without overflowing. Since we know max > min,
* we can detect overflow just by checking for a negative result.
* But we must check both that the subtraction doesn't overflow,
* and that adding one to the result doesn't overflow either.
@@ -1267,10 +1267,11 @@ init(void)
* versions. Since pgbench has never pretended to be fully TPC-B
* compliant anyway, we stick with the historical behavior.
*/
- struct ddlinfo {
- char *table;
- char *cols;
- int declare_fillfactor;
+ struct ddlinfo
+ {
+ char *table;
+ char *cols;
+ int declare_fillfactor;
};
struct ddlinfo DDLs[] = {
{
@@ -1321,15 +1322,16 @@ init(void)
/* Construct new create table statement. */
opts[0] = '\0';
if (ddl->declare_fillfactor)
- snprintf(opts+strlen(opts), 256-strlen(opts),
- " with (fillfactor=%d)", fillfactor);
+ snprintf(opts + strlen(opts), 256 - strlen(opts),
+ " with (fillfactor=%d)", fillfactor);
if (tablespace != NULL)
{
- char *escape_tablespace;
+ char *escape_tablespace;
+
escape_tablespace = PQescapeIdentifier(con, tablespace,
strlen(tablespace));
- snprintf(opts+strlen(opts), 256-strlen(opts),
- " tablespace %s", escape_tablespace);
+ snprintf(opts + strlen(opts), 256 - strlen(opts),
+ " tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
snprintf(buffer, 256, "create%s table %s(%s)%s",
@@ -1404,17 +1406,18 @@ init(void)
fprintf(stderr, "set primary key...\n");
for (i = 0; i < lengthof(DDLAFTERs); i++)
{
- char buffer[256];
+ char buffer[256];
strncpy(buffer, DDLAFTERs[i], 256);
if (index_tablespace != NULL)
{
- char *escape_tablespace;
+ char *escape_tablespace;
+
escape_tablespace = PQescapeIdentifier(con, index_tablespace,
strlen(index_tablespace));
- snprintf(buffer+strlen(buffer), 256-strlen(buffer),
- " using index tablespace %s", escape_tablespace);
+ snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
+ " using index tablespace %s", escape_tablespace);
PQfreemem(escape_tablespace);
}
@@ -1861,10 +1864,10 @@ main(int argc, char **argv)
int i;
static struct option long_options[] = {
- {"index-tablespace", required_argument, NULL, 3},
- {"tablespace", required_argument, NULL, 2},
- {"unlogged-tables", no_argument, &unlogged_tables, 1},
- {NULL, 0, NULL, 0}
+ {"index-tablespace", required_argument, NULL, 3},
+ {"tablespace", required_argument, NULL, 2},
+ {"unlogged-tables", no_argument, &unlogged_tables, 1},
+ {NULL, 0, NULL, 0}
};
#ifdef HAVE_GETRLIMIT
@@ -2065,10 +2068,10 @@ main(int argc, char **argv)
case 0:
/* This covers long options which take no argument. */
break;
- case 2: /* tablespace */
+ case 2: /* tablespace */
tablespace = optarg;
break;
- case 3: /* index-tablespace */
+ case 3: /* index-tablespace */
index_tablespace = optarg;
break;
default:
@@ -2571,7 +2574,7 @@ typedef struct fork_pthread
static int
pthread_create(pthread_t *thread,
- pthread_attr_t * attr,
+ pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
@@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
static int
pthread_create(pthread_t *thread,
- pthread_attr_t * attr,
+ pthread_attr_t *attr,
void *(*start_routine) (void *),
void *arg)
{
diff --git a/contrib/pgcrypto/crypt-md5.c b/contrib/pgcrypto/crypt-md5.c
index 6c7a2b329e..2a5cd70208 100644
--- a/contrib/pgcrypto/crypt-md5.c
+++ b/contrib/pgcrypto/crypt-md5.c
@@ -34,8 +34,8 @@ char *
px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
{
static char *magic = "$1$"; /* This string is magic for this algorithm.
- * Having it this way, we can get better
- * later on */
+ * Having it this way, we can get better later
+ * on */
static char *p;
static const char *sp,
*ep;
diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h
index 610b7fad78..80e8624460 100644
--- a/contrib/pgcrypto/px.h
+++ b/contrib/pgcrypto/px.h
@@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
void px_set_debug_handler(void (*handler) (const char *));
#ifdef PX_DEBUG
-void px_debug(const char *fmt, ...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+void
+px_debug(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
#else
#define px_debug(...)
#endif
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index 9f2ec1f210..d4fc8a0fd6 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -95,7 +95,7 @@ pgstatindex(PG_FUNCTION_ARGS)
BlockNumber nblocks;
BlockNumber blkno;
BTIndexStat indexStat;
- BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
+ BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
if (!superuser())
ereport(ERROR,
@@ -160,7 +160,7 @@ pgstatindex(PG_FUNCTION_ARGS)
CHECK_FOR_INTERRUPTS();
/* Read and lock buffer */
- buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
+ buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c
index c9be8c92e4..2b62b78506 100644
--- a/contrib/pgstattuple/pgstattuple.c
+++ b/contrib/pgstattuple/pgstattuple.c
@@ -62,7 +62,7 @@ typedef struct pgstattuple_type
} pgstattuple_type;
typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber,
- BufferAccessStrategy);
+ BufferAccessStrategy);
static Datum build_pgstattuple_type(pgstattuple_type *stat,
FunctionCallInfo fcinfo);
diff --git a/contrib/sepgsql/database.c b/contrib/sepgsql/database.c
index 0c395c42a3..5a4246752a 100644
--- a/contrib/sepgsql/database.c
+++ b/contrib/sepgsql/database.c
@@ -32,19 +32,19 @@ void
sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
{
Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
+ ScanKeyData skey;
+ SysScanDesc sscan;
HeapTuple tuple;
char *tcontext;
char *ncontext;
char audit_name[NAMEDATALEN + 20];
- ObjectAddress object;
- Form_pg_database datForm;
+ ObjectAddress object;
+ Form_pg_database datForm;
/*
- * Oid of the source database is not saved in pg_database catalog,
- * so we collect its identifier using contextual information.
- * If NULL, its default is "template1" according to createdb().
+ * Oid of the source database is not saved in pg_database catalog, so we
+ * collect its identifier using contextual information. If NULL, its
+ * default is "template1" according to createdb().
*/
if (!dtemplate)
dtemplate = "template1";
@@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
tcontext = sepgsql_get_label(object.classId,
object.objectId,
object.objectSubId);
+
/*
* check db_database:{getattr} permission
*/
@@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
true);
/*
- * Compute a default security label of the newly created database
- * based on a pair of security label of client and source database.
+ * Compute a default security label of the newly created database based on
+ * a pair of security label of client and source database.
*
- * XXX - uncoming version of libselinux supports to take object
- * name to handle special treatment on default security label.
+ * XXX - uncoming version of libselinux supports to take object name to
+ * handle special treatment on default security label.
*/
rel = heap_open(DatabaseRelationId, AccessShareLock);
@@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_DATABASE);
+
/*
* check db_database:{create} permission
*/
@@ -126,8 +128,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
void
sepgsql_database_drop(Oid databaseId)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
/*
* check db_database:{drop} permission
@@ -153,8 +155,8 @@ sepgsql_database_drop(Oid databaseId)
void
sepgsql_database_relabel(Oid databaseId, const char *seclabel)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
object.classId = DatabaseRelationId;
object.objectId = databaseId;
@@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
SEPG_DB_DATABASE__RELABELFROM,
audit_name,
true);
+
/*
* check db_database:{relabelto} permission
*/
diff --git a/contrib/sepgsql/dml.c b/contrib/sepgsql/dml.c
index 17aa41cf4e..47a1087417 100644
--- a/contrib/sepgsql/dml.c
+++ b/contrib/sepgsql/dml.c
@@ -150,7 +150,7 @@ check_relation_privileges(Oid relOid,
uint32 required,
bool abort)
{
- ObjectAddress object;
+ ObjectAddress object;
char *audit_name;
Bitmapset *columns;
int index;
diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c
index ffa078677c..914519109c 100644
--- a/contrib/sepgsql/hooks.c
+++ b/contrib/sepgsql/hooks.c
@@ -52,9 +52,9 @@ typedef struct
* command. Elsewhere (including the case of default) NULL.
*/
const char *createdb_dtemplate;
-} sepgsql_context_info_t;
+} sepgsql_context_info_t;
-static sepgsql_context_info_t sepgsql_context_info;
+static sepgsql_context_info_t sepgsql_context_info;
/*
* GUC: sepgsql.permissive = (on|off)
@@ -101,7 +101,7 @@ sepgsql_object_access(ObjectAccessType access,
{
case DatabaseRelationId:
sepgsql_database_post_create(objectId,
- sepgsql_context_info.createdb_dtemplate);
+ sepgsql_context_info.createdb_dtemplate);
break;
case NamespaceRelationId:
@@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
* All cases we want to apply permission checks on
* creation of a new relation are invocation of the
* heap_create_with_catalog via DefineRelation or
- * OpenIntoRel.
- * Elsewhere, we need neither assignment of security
- * label nor permission checks.
+ * OpenIntoRel. Elsewhere, we need neither assignment
+ * of security label nor permission checks.
*/
switch (sepgsql_context_info.cmdtype)
{
@@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
case OAT_DROP:
{
- ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
+ ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
/*
- * No need to apply permission checks on object deletion
- * due to internal cleanups; such as removal of temporary
- * database object on session closed.
+ * No need to apply permission checks on object deletion due
+ * to internal cleanups; such as removal of temporary database
+ * object on session closed.
*/
if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
break;
@@ -219,13 +218,13 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
/*
* sepgsql_executor_start
*
- * It saves contextual information during ExecutorStart to distinguish
+ * It saves contextual information during ExecutorStart to distinguish
* a case with/without permission checks later.
*/
static void
sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
{
- sepgsql_context_info_t saved_context_info = sepgsql_context_info;
+ sepgsql_context_info_t saved_context_info = sepgsql_context_info;
PG_TRY();
{
@@ -270,28 +269,29 @@ sepgsql_utility_command(Node *parsetree,
DestReceiver *dest,
char *completionTag)
{
- sepgsql_context_info_t saved_context_info = sepgsql_context_info;
- ListCell *cell;
+ sepgsql_context_info_t saved_context_info = sepgsql_context_info;
+ ListCell *cell;
PG_TRY();
{
/*
* Check command tag to avoid nefarious operations, and save the
- * current contextual information to determine whether we should
- * apply permission checks here, or not.
+ * current contextual information to determine whether we should apply
+ * permission checks here, or not.
*/
sepgsql_context_info.cmdtype = nodeTag(parsetree);
switch (nodeTag(parsetree))
{
case T_CreatedbStmt:
+
/*
* We hope to reference name of the source database, but it
* does not appear in system catalog. So, we save it here.
*/
- foreach (cell, ((CreatedbStmt *) parsetree)->options)
+ foreach(cell, ((CreatedbStmt *) parsetree)->options)
{
- DefElem *defel = (DefElem *) lfirst(cell);
+ DefElem *defel = (DefElem *) lfirst(cell);
if (strcmp(defel->defname, "template") == 0)
{
@@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
break;
case T_LoadStmt:
+
/*
* We reject LOAD command across the board on enforcing mode,
* because a binary module can arbitrarily override hooks.
@@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
}
break;
default:
+
/*
* Right now we don't check any other utility commands,
* because it needs more detailed information to make access
diff --git a/contrib/sepgsql/label.c b/contrib/sepgsql/label.c
index 85f4efe072..23577b5844 100644
--- a/contrib/sepgsql/label.c
+++ b/contrib/sepgsql/label.c
@@ -58,17 +58,18 @@ static fmgr_hook_type next_fmgr_hook = NULL;
* we use the list client_label_pending of pending_label to keep track of which
* labels were set during the (sub-)transactions.
*/
-static char *client_label_peer = NULL; /* set by getpeercon(3) */
-static List *client_label_pending = NIL; /* pending list being set by
- * sepgsql_setcon() */
-static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
- * and already committed */
-static char *client_label_func = NULL; /* set by trusted procedure */
-
-typedef struct {
- SubTransactionId subid;
- char *label;
-} pending_label;
+static char *client_label_peer = NULL; /* set by getpeercon(3) */
+static List *client_label_pending = NIL; /* pending list being set by
+ * sepgsql_setcon() */
+static char *client_label_committed = NULL; /* set by sepgsql_setcon(),
+ * and already committed */
+static char *client_label_func = NULL; /* set by trusted procedure */
+
+typedef struct
+{
+ SubTransactionId subid;
+ char *label;
+} pending_label;
/*
* sepgsql_get_client_label
@@ -87,7 +88,7 @@ sepgsql_get_client_label(void)
/* uncommitted sepgsql_setcon() value */
if (client_label_pending)
{
- pending_label *plabel = llast(client_label_pending);
+ pending_label *plabel = llast(client_label_pending);
if (plabel->label)
return plabel->label;
@@ -104,16 +105,16 @@ sepgsql_get_client_label(void)
* sepgsql_set_client_label
*
* This routine tries to switch the current security label of the client, and
- * checks related permissions. The supplied new label shall be added to the
+ * checks related permissions. The supplied new label shall be added to the
* client_label_pending list, then saved at transaction-commit time to ensure
* transaction-awareness.
*/
static void
sepgsql_set_client_label(const char *new_label)
{
- const char *tcontext;
- MemoryContext oldcxt;
- pending_label *plabel;
+ const char *tcontext;
+ MemoryContext oldcxt;
+ pending_label *plabel;
/* Reset to the initial client label, if NULL */
if (!new_label)
@@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
SEPG_PROCESS__DYNTRANSITION,
NULL,
true);
+
/*
- * Append the supplied new_label on the pending list until
- * the current transaction is committed.
+ * Append the supplied new_label on the pending list until the current
+ * transaction is committed.
*/
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
@@ -158,7 +160,7 @@ sepgsql_set_client_label(const char *new_label)
/*
* sepgsql_xact_callback
*
- * A callback routine of transaction commit/abort/prepare. Commmit or abort
+ * A callback routine of transaction commit/abort/prepare. Commmit or abort
* changes in the client_label_pending list.
*/
static void
@@ -168,8 +170,8 @@ sepgsql_xact_callback(XactEvent event, void *arg)
{
if (client_label_pending != NIL)
{
- pending_label *plabel = llast(client_label_pending);
- char *new_label;
+ pending_label *plabel = llast(client_label_pending);
+ char *new_label;
if (plabel->label)
new_label = MemoryContextStrdup(TopMemoryContext,
@@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
pfree(client_label_committed);
client_label_committed = new_label;
+
/*
- * XXX - Note that items of client_label_pending are allocated
- * on CurTransactionContext, thus, all acquired memory region
- * shall be released implicitly.
+ * XXX - Note that items of client_label_pending are allocated on
+ * CurTransactionContext, thus, all acquired memory region shall
+ * be released implicitly.
*/
client_label_pending = NIL;
}
@@ -212,7 +215,8 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
prev = NULL;
for (cell = list_head(client_label_pending); cell; cell = next)
{
- pending_label *plabel = lfirst(cell);
+ pending_label *plabel = lfirst(cell);
+
next = lnext(cell);
if (plabel->subid == mySubid)
@@ -272,7 +276,7 @@ sepgsql_client_auth(Port *port, int status)
static bool
sepgsql_needs_fmgr_hook(Oid functionId)
{
- ObjectAddress object;
+ ObjectAddress object;
if (next_needs_fmgr_hook &&
(*next_needs_fmgr_hook) (functionId))
@@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
/*
* process:transition permission between old and new label,
- * when user tries to switch security label of the client
- * on execution of trusted procedure.
+ * when user tries to switch security label of the client on
+ * execution of trusted procedure.
*/
if (stack->new_label)
sepgsql_avc_check_perms_label(stack->new_label,
diff --git a/contrib/sepgsql/proc.c b/contrib/sepgsql/proc.c
index 1efbc906c6..b68314d878 100644
--- a/contrib/sepgsql/proc.c
+++ b/contrib/sepgsql/proc.c
@@ -42,9 +42,9 @@ sepgsql_proc_post_create(Oid functionId)
char *tcontext;
char *ncontext;
int i;
- StringInfoData audit_name;
- ObjectAddress object;
- Form_pg_proc proForm;
+ StringInfoData audit_name;
+ ObjectAddress object;
+ Form_pg_proc proForm;
/*
* Fetch namespace of the new procedure. Because pg_proc entry is not
@@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
+
/*
* XXX - db_language:{implement} also should be checked here
*/
@@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
*/
initStringInfo(&audit_name);
appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
- for (i=0; i < proForm->pronargs; i++)
+ for (i = 0; i < proForm->pronargs; i++)
{
- Oid typeoid = proForm->proargtypes.values[i];
+ Oid typeoid = proForm->proargtypes.values[i];
+
if (i > 0)
appendStringInfoChar(&audit_name, ',');
appendStringInfoString(&audit_name, format_type_be(typeoid));
@@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
SEPG_DB_PROCEDURE__CREATE,
audit_name.data,
true);
+
/*
* Assign the default security label on a new procedure
*/
@@ -138,8 +141,8 @@ sepgsql_proc_post_create(Oid functionId)
void
sepgsql_proc_drop(Oid functionId)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
/*
* check db_schema:{remove_name} permission
@@ -156,19 +159,19 @@ sepgsql_proc_drop(Oid functionId)
true);
pfree(audit_name);
- /*
- * check db_procedure:{drop} permission
- */
+ /*
+ * check db_procedure:{drop} permission
+ */
object.classId = ProcedureRelationId;
object.objectId = functionId;
object.objectSubId = 0;
audit_name = getObjectDescription(&object);
- sepgsql_avc_check_perms(&object,
- SEPG_CLASS_DB_PROCEDURE,
- SEPG_DB_PROCEDURE__DROP,
- audit_name,
- true);
+ sepgsql_avc_check_perms(&object,
+ SEPG_CLASS_DB_PROCEDURE,
+ SEPG_DB_PROCEDURE__DROP,
+ audit_name,
+ true);
pfree(audit_name);
}
@@ -181,8 +184,8 @@ sepgsql_proc_drop(Oid functionId)
void
sepgsql_proc_relabel(Oid functionId, const char *seclabel)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
object.classId = ProcedureRelationId;
object.objectId = functionId;
@@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
SEPG_DB_PROCEDURE__RELABELFROM,
audit_name,
true);
+
/*
* check db_procedure:{relabelto} permission
*/
diff --git a/contrib/sepgsql/relation.c b/contrib/sepgsql/relation.c
index 259be49268..e759a7d98e 100644
--- a/contrib/sepgsql/relation.c
+++ b/contrib/sepgsql/relation.c
@@ -44,9 +44,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
char *scontext;
char *tcontext;
char *ncontext;
- char audit_name[2*NAMEDATALEN + 20];
+ char audit_name[2 * NAMEDATALEN + 20];
ObjectAddress object;
- Form_pg_attribute attForm;
+ Form_pg_attribute attForm;
/*
* Only attributes within regular relation have individual security
@@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
ncontext = sepgsql_compute_create(scontext, tcontext,
SEPG_CLASS_DB_COLUMN);
+
/*
* check db_column:{create} permission
*/
@@ -118,8 +119,8 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
void
sepgsql_attribute_drop(Oid relOid, AttrNumber attnum)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
if (get_rel_relkind(relOid) != RELKIND_RELATION)
return;
@@ -151,7 +152,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
const char *seclabel)
{
ObjectAddress object;
- char *audit_name;
+ char *audit_name;
if (get_rel_relkind(relOid) != RELKIND_RELATION)
ereport(ERROR,
@@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
SEPG_DB_COLUMN__RELABELFROM,
audit_name,
true);
+
/*
* check db_column:{relabelto} permission
*/
@@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
char *tcontext; /* schema */
char *rcontext; /* relation */
char *ccontext; /* column */
- char audit_name[2*NAMEDATALEN + 20];
+ char audit_name[2 * NAMEDATALEN + 20];
/*
* Fetch catalog record of the new relation. Because pg_class entry is not
@@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_SCHEMA__ADD_NAME,
getObjectDescription(&object),
true);
+
/*
* Compute a default security label when we create a new relation object
* under the specified namespace.
@@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
SEPG_DB_DATABASE__CREATE,
audit_name,
true);
+
/*
* Assign the default security label on the new relation
*/
@@ -288,10 +292,10 @@ sepgsql_relation_post_create(Oid relOid)
if (classForm->relkind == RELKIND_RELATION)
{
Relation arel;
- ScanKeyData akey;
- SysScanDesc ascan;
+ ScanKeyData akey;
+ SysScanDesc ascan;
HeapTuple atup;
- Form_pg_attribute attForm;
+ Form_pg_attribute attForm;
arel = heap_open(AttributeRelationId, AccessShareLock);
@@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
ccontext = sepgsql_compute_create(scontext,
rcontext,
SEPG_CLASS_DB_COLUMN);
+
/*
* check db_column:{create} permission
*/
@@ -348,10 +353,10 @@ out:
void
sepgsql_relation_drop(Oid relOid)
{
- ObjectAddress object;
- char *audit_name;
- uint16_t tclass = 0;
- char relkind;
+ ObjectAddress object;
+ char *audit_name;
+ uint16_t tclass = 0;
+ char relkind;
relkind = get_rel_relkind(relOid);
if (relkind == RELKIND_RELATION)
@@ -398,13 +403,13 @@ sepgsql_relation_drop(Oid relOid)
*/
if (relkind == RELKIND_RELATION)
{
- Form_pg_attribute attForm;
+ Form_pg_attribute attForm;
CatCList *attrList;
HeapTuple atttup;
int i;
attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
- for (i=0; i < attrList->n_members; i++)
+ for (i = 0; i < attrList->n_members; i++)
{
atttup = &attrList->members[i]->tuple;
attForm = (Form_pg_attribute) GETSTRUCT(atttup);
@@ -436,7 +441,7 @@ sepgsql_relation_drop(Oid relOid)
void
sepgsql_relation_relabel(Oid relOid, const char *seclabel)
{
- ObjectAddress object;
+ ObjectAddress object;
char *audit_name;
char relkind;
uint16_t tclass = 0;
@@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
SEPG_DB_TABLE__RELABELFROM,
audit_name,
true);
+
/*
* check db_xxx:{relabelto} permission
*/
diff --git a/contrib/sepgsql/schema.c b/contrib/sepgsql/schema.c
index 31d60efe18..230449dc4b 100644
--- a/contrib/sepgsql/schema.c
+++ b/contrib/sepgsql/schema.c
@@ -35,22 +35,22 @@ void
sepgsql_schema_post_create(Oid namespaceId)
{
Relation rel;
- ScanKeyData skey;
- SysScanDesc sscan;
+ ScanKeyData skey;
+ SysScanDesc sscan;
HeapTuple tuple;
char *tcontext;
char *ncontext;
char audit_name[NAMEDATALEN + 20];
- ObjectAddress object;
- Form_pg_namespace nspForm;
+ ObjectAddress object;
+ Form_pg_namespace nspForm;
/*
* Compute a default security label when we create a new schema object
* under the working database.
*
- * XXX - uncoming version of libselinux supports to take object
- * name to handle special treatment on default security label;
- * such as special label on "pg_temp" schema.
+ * XXX - uncoming version of libselinux supports to take object name to
+ * handle special treatment on default security label; such as special
+ * label on "pg_temp" schema.
*/
rel = heap_open(NamespaceRelationId, AccessShareLock);
@@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
tcontext,
SEPG_CLASS_DB_SCHEMA);
+
/*
* check db_schema:{create}
*/
@@ -104,8 +105,8 @@ sepgsql_schema_post_create(Oid namespaceId)
void
sepgsql_schema_drop(Oid namespaceId)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
/*
* check db_schema:{drop} permission
@@ -116,7 +117,7 @@ sepgsql_schema_drop(Oid namespaceId)
audit_name = getObjectDescription(&object);
sepgsql_avc_check_perms(&object,
- SEPG_CLASS_DB_SCHEMA,
+ SEPG_CLASS_DB_SCHEMA,
SEPG_DB_SCHEMA__DROP,
audit_name,
true);
@@ -132,8 +133,8 @@ sepgsql_schema_drop(Oid namespaceId)
void
sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
{
- ObjectAddress object;
- char *audit_name;
+ ObjectAddress object;
+ char *audit_name;
object.classId = NamespaceRelationId;
object.objectId = namespaceId;
@@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
SEPG_DB_SCHEMA__RELABELFROM,
audit_name,
true);
+
/*
* check db_schema:{relabelto} permission
*/
diff --git a/contrib/sepgsql/sepgsql.h b/contrib/sepgsql/sepgsql.h
index 708d4ee656..479b136909 100644
--- a/contrib/sepgsql/sepgsql.h
+++ b/contrib/sepgsql/sepgsql.h
@@ -248,20 +248,21 @@ extern bool sepgsql_check_perms(const char *scontext,
uint32 required,
const char *audit_name,
bool abort);
+
/*
* uavc.c
*/
#define SEPGSQL_AVC_NOAUDIT ((void *)(-1))
extern bool sepgsql_avc_check_perms_label(const char *tcontext,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort);
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort);
extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
- uint16 tclass,
- uint32 required,
- const char *audit_name,
- bool abort);
+ uint16 tclass,
+ uint32 required,
+ const char *audit_name,
+ bool abort);
extern char *sepgsql_avc_trusted_proc(Oid functionId);
extern void sepgsql_avc_init(void);
@@ -269,7 +270,7 @@ extern void sepgsql_avc_init(void);
* label.c
*/
extern char *sepgsql_get_client_label(void);
-extern void sepgsql_init_client_label(void);
+extern void sepgsql_init_client_label(void);
extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
extern void sepgsql_object_relabel(const ObjectAddress *object,
@@ -290,7 +291,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort);
* database.c
*/
extern void sepgsql_database_post_create(Oid databaseId,
- const char *dtemplate);
+ const char *dtemplate);
extern void sepgsql_database_drop(Oid databaseId);
extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
diff --git a/contrib/sepgsql/uavc.c b/contrib/sepgsql/uavc.c
index 905f87dfc8..9641a17d79 100644
--- a/contrib/sepgsql/uavc.c
+++ b/contrib/sepgsql/uavc.c
@@ -30,22 +30,22 @@
*/
typedef struct
{
- uint32 hash; /* hash value of this cache entry */
- char *scontext; /* security context of the subject */
- char *tcontext; /* security context of the target */
- uint16 tclass; /* object class of the target */
+ uint32 hash; /* hash value of this cache entry */
+ char *scontext; /* security context of the subject */
+ char *tcontext; /* security context of the target */
+ uint16 tclass; /* object class of the target */
- uint32 allowed; /* permissions to be allowed */
- uint32 auditallow; /* permissions to be audited on allowed */
- uint32 auditdeny; /* permissions to be audited on denied */
+ uint32 allowed; /* permissions to be allowed */
+ uint32 auditallow; /* permissions to be audited on allowed */
+ uint32 auditdeny; /* permissions to be audited on denied */
- bool permissive; /* true, if permissive rule */
- bool hot_cache; /* true, if recently referenced */
+ bool permissive; /* true, if permissive rule */
+ bool hot_cache; /* true, if recently referenced */
bool tcontext_is_valid;
- /* true, if tcontext is valid */
- char *ncontext; /* temporary scontext on execution of trusted
- * procedure, or NULL elsewhere */
-} avc_cache;
+ /* true, if tcontext is valid */
+ char *ncontext; /* temporary scontext on execution of trusted
+ * procedure, or NULL elsewhere */
+} avc_cache;
/*
* Declaration of static variables
@@ -54,12 +54,12 @@ typedef struct
#define AVC_NUM_RECLAIM 16
#define AVC_DEF_THRESHOLD 384
-static MemoryContext avc_mem_cxt;
-static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
-static int avc_num_caches; /* number of caches currently used */
-static int avc_lru_hint; /* index of the buckets to be reclaimed next */
-static int avc_threshold; /* threshold to launch cache-reclaiming */
-static char *avc_unlabeled; /* system 'unlabeled' label */
+static MemoryContext avc_mem_cxt;
+static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
+static int avc_num_caches; /* number of caches currently used */
+static int avc_lru_hint; /* index of the buckets to be reclaimed next */
+static int avc_threshold; /* threshold to launch cache-reclaiming */
+static char *avc_unlabeled; /* system 'unlabeled' label */
/*
* Hash function
@@ -67,8 +67,8 @@ static char *avc_unlabeled; /* system 'unlabeled' label */
static uint32
sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
{
- return hash_any((const unsigned char *)scontext, strlen(scontext))
- ^ hash_any((const unsigned char *)tcontext, strlen(tcontext))
+ return hash_any((const unsigned char *) scontext, strlen(scontext))
+ ^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
^ tclass;
}
@@ -88,7 +88,7 @@ sepgsql_avc_reset(void)
/*
* Reclaim caches recently unreferenced
- */
+ */
static void
sepgsql_avc_reclaim(void)
{
@@ -142,15 +142,15 @@ sepgsql_avc_reclaim(void)
* Access control decisions must be atomic, but multiple system calls may
* be required to make a decision; thus, when referencing the access vector
* cache, we must loop until we complete without an intervening cache flush
- * event. In practice, looping even once should be very rare. Callers should
+ * event. In practice, looping even once should be very rare. Callers should
* do something like this:
*
- * sepgsql_avc_check_valid();
- * do {
- * :
- * <reference to uavc>
- * :
- * } while (!sepgsql_avc_check_valid())
+ * sepgsql_avc_check_valid();
+ * do {
+ * :
+ * <reference to uavc>
+ * :
+ * } while (!sepgsql_avc_check_valid())
*
* -------------------------------------------------------------------------
*/
@@ -169,7 +169,7 @@ sepgsql_avc_check_valid(void)
/*
* sepgsql_avc_unlabeled
*
- * Returns an alternative label to be applied when no label or an invalid
+ * Returns an alternative label to be applied when no label or an invalid
* label would otherwise be assigned.
*/
static char *
@@ -177,12 +177,12 @@ sepgsql_avc_unlabeled(void)
{
if (!avc_unlabeled)
{
- security_context_t unlabeled;
+ security_context_t unlabeled;
if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("SELinux: failed to get initial security label: %m")));
+ (errcode(ERRCODE_INTERNAL_ERROR),
+ errmsg("SELinux: failed to get initial security label: %m")));
PG_TRY();
{
avc_unlabeled = MemoryContextStrdup(avc_mem_cxt, unlabeled);
@@ -200,7 +200,7 @@ sepgsql_avc_unlabeled(void)
}
/*
- * sepgsql_avc_compute
+ * sepgsql_avc_compute
*
* A fallback path, when cache mishit. It asks SELinux its access control
* decision for the supplied pair of security context and object class.
@@ -208,24 +208,24 @@ sepgsql_avc_unlabeled(void)
static avc_cache *
sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
{
- char *ucontext = NULL;
- char *ncontext = NULL;
- MemoryContext oldctx;
- avc_cache *cache;
- uint32 hash;
- int index;
- struct av_decision avd;
+ char *ucontext = NULL;
+ char *ncontext = NULL;
+ MemoryContext oldctx;
+ avc_cache *cache;
+ uint32 hash;
+ int index;
+ struct av_decision avd;
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS;
/*
- * Validation check of the supplied security context.
- * Because it always invoke system-call, frequent check should be avoided.
- * Unless security policy is reloaded, validation status shall be kept, so
- * we also cache whether the supplied security context was valid, or not.
+ * Validation check of the supplied security context. Because it always
+ * invoke system-call, frequent check should be avoided. Unless security
+ * policy is reloaded, validation status shall be kept, so we also cache
+ * whether the supplied security context was valid, or not.
*/
- if (security_check_context_raw((security_context_t)tcontext) != 0)
+ if (security_check_context_raw((security_context_t) tcontext) != 0)
ucontext = sepgsql_avc_unlabeled();
/*
@@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
/*
- * It also caches a security label to be switched when a client
- * labeled as 'scontext' executes a procedure labeled as 'tcontext',
- * not only access control decision on the procedure.
- * The security label to be switched shall be computed uniquely on
- * a pair of 'scontext' and 'tcontext', thus, it is reasonable to
- * cache the new label on avc, and enables to reduce unnecessary
- * system calls.
- * It shall be referenced at sepgsql_needs_fmgr_hook to check whether
- * the supplied function is a trusted procedure, or not.
+ * It also caches a security label to be switched when a client labeled as
+ * 'scontext' executes a procedure labeled as 'tcontext', not only access
+ * control decision on the procedure. The security label to be switched
+ * shall be computed uniquely on a pair of 'scontext' and 'tcontext',
+ * thus, it is reasonable to cache the new label on avc, and enables to
+ * reduce unnecessary system calls. It shall be referenced at
+ * sepgsql_needs_fmgr_hook to check whether the supplied function is a
+ * trusted procedure, or not.
*/
if (tclass == SEPG_CLASS_DB_PROCEDURE)
{
@@ -269,7 +268,7 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
cache = palloc0(sizeof(avc_cache));
- cache->hash = hash;
+ cache->hash = hash;
cache->scontext = pstrdup(scontext);
cache->tcontext = pstrdup(tcontext);
cache->tclass = tclass;
@@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
hash = sepgsql_avc_hash(scontext, tcontext, tclass);
index = hash % AVC_NUM_SLOTS;
- foreach (cell, avc_slots[index])
+ foreach(cell, avc_slots[index])
{
cache = lfirst(cell);
@@ -348,14 +347,15 @@ sepgsql_avc_check_perms_label(const char *tcontext,
uint16 tclass, uint32 required,
const char *audit_name, bool abort)
{
- char *scontext = sepgsql_get_client_label();
+ char *scontext = sepgsql_get_client_label();
avc_cache *cache;
uint32 denied;
uint32 audited;
bool result;
sepgsql_avc_check_valid();
- do {
+ do
+ {
result = true;
/*
@@ -377,16 +377,16 @@ sepgsql_avc_check_perms_label(const char *tcontext,
audited = (denied ? (denied & ~0) : (required & ~0));
else
audited = denied ? (denied & cache->auditdeny)
- : (required & cache->auditallow);
+ : (required & cache->auditallow);
if (denied)
{
/*
* In permissive mode or permissive domain, violated permissions
* shall be audited to the log files at once, and then implicitly
- * allowed to avoid a flood of access denied logs, because
- * the purpose of permissive mode/domain is to collect a violation
- * log that will make it possible to fix up the security policy.
+ * allowed to avoid a flood of access denied logs, because the
+ * purpose of permissive mode/domain is to collect a violation log
+ * that will make it possible to fix up the security policy.
*/
if (!sepgsql_getenforce() || cache->permissive)
cache->allowed |= required;
@@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
/*
* In the case when we have something auditable actions here,
- * sepgsql_audit_log shall be called with text representation of
- * security labels for both of subject and object.
- * It records this access violation, so DBA will be able to find
- * out unexpected security problems later.
+ * sepgsql_audit_log shall be called with text representation of security
+ * labels for both of subject and object. It records this access
+ * violation, so DBA will be able to find out unexpected security problems
+ * later.
*/
if (audited != 0 &&
audit_name != SEPGSQL_AVC_NOAUDIT &&
@@ -428,8 +428,8 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
uint16 tclass, uint32 required,
const char *audit_name, bool abort)
{
- char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
- bool rc;
+ char *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
+ bool rc;
rc = sepgsql_avc_check_perms_label(tcontext,
tclass, required,
@@ -450,10 +450,10 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
char *
sepgsql_avc_trusted_proc(Oid functionId)
{
- char *scontext = sepgsql_get_client_label();
- char *tcontext;
- ObjectAddress tobject;
- avc_cache *cache;
+ char *scontext = sepgsql_get_client_label();
+ char *tcontext;
+ ObjectAddress tobject;
+ avc_cache *cache;
tobject.classId = ProcedureRelationId;
tobject.objectId = functionId;
@@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
sepgsql_avc_check_valid();
- do {
+ do
+ {
if (tcontext)
cache = sepgsql_avc_lookup(scontext, tcontext,
SEPG_CLASS_DB_PROCEDURE);
@@ -492,7 +493,7 @@ sepgsql_avc_exit(int code, Datum arg)
void
sepgsql_avc_init(void)
{
- int rc;
+ int rc;
/*
* All the avc stuff shall be allocated on avc_mem_cxt
@@ -508,12 +509,11 @@ sepgsql_avc_init(void)
avc_threshold = AVC_DEF_THRESHOLD;
/*
- * SELinux allows to mmap(2) its kernel status page in read-only mode
- * to inform userspace applications its status updating (such as
- * policy reloading) without system-call invocations.
- * This feature is only supported in Linux-2.6.38 or later, however,
- * libselinux provides a fallback mode to know its status using
- * netlink sockets.
+ * SELinux allows to mmap(2) its kernel status page in read-only mode to
+ * inform userspace applications its status updating (such as policy
+ * reloading) without system-call invocations. This feature is only
+ * supported in Linux-2.6.38 or later, however, libselinux provides a
+ * fallback mode to know its status using netlink sockets.
*/
rc = selinux_status_open(1);
if (rc < 0)
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
index 39a0160587..8dc565a190 100644
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
/*
* Remember that SPI_prepare places plan in current memory context
- * - so, we have to save plan in Top memory context for later
- * use.
+ * - so, we have to save plan in Top memory context for later use.
*/
if (SPI_keepplan(pplan))
/* internal error */
diff --git a/contrib/vacuumlo/vacuumlo.c b/contrib/vacuumlo/vacuumlo.c
index 641a8c3425..958a496b24 100644
--- a/contrib/vacuumlo/vacuumlo.c
+++ b/contrib/vacuumlo/vacuumlo.c
@@ -69,7 +69,7 @@ vacuumlo(const char *database, const struct _param * param)
int i;
static char *password = NULL;
bool new_pass;
- bool success = true;
+ bool success = true;
/* Note: password can be carried over from a previous call */
if (param->pg_prompt == TRI_YES && password == NULL)
@@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
* We don't want to run each delete as an individual transaction, because
* the commit overhead would be high. However, since 9.0 the backend will
* acquire a lock per deleted LO, so deleting too many LOs per transaction
- * risks running out of room in the shared-memory lock table.
- * Accordingly, we delete up to transaction_limit LOs per transaction.
+ * risks running out of room in the shared-memory lock table. Accordingly,
+ * we delete up to transaction_limit LOs per transaction.
*/
res = PQexec(conn, "begin");
if (PQresultStatus(res) != PGRES_COMMAND_OK)
@@ -459,8 +459,8 @@ main(int argc, char **argv)
if (param.transaction_limit < 0)
{
fprintf(stderr,
- "%s: transaction limit must not be negative (0 disables)\n",
- progname);
+ "%s: transaction limit must not be negative (0 disables)\n",
+ progname);
exit(1);
}
break;
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
index 2ddee59fcb..660d25c349 100644
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -702,126 +702,126 @@ xpath_table(PG_FUNCTION_ARGS)
PG_TRY();
{
- /* For each row i.e. document returned from SPI */
- for (i = 0; i < proc; i++)
- {
- char *pkey;
- char *xmldoc;
- xmlXPathContextPtr ctxt;
- xmlXPathObjectPtr res;
- xmlChar *resstr;
- xmlXPathCompExprPtr comppath;
-
- /* Extract the row data as C Strings */
- spi_tuple = tuptable->vals[i];
- pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
- xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
-
- /*
- * Clear the values array, so that not-well-formed documents return
- * NULL in all columns. Note that this also means that spare columns
- * will be NULL.
- */
- for (j = 0; j < ret_tupdesc->natts; j++)
- values[j] = NULL;
-
- /* Insert primary key */
- values[0] = pkey;
-
- /* Parse the document */
- if (xmldoc)
- doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
- else /* treat NULL as not well-formed */
- doctree = NULL;
-
- if (doctree == NULL)
+ /* For each row i.e. document returned from SPI */
+ for (i = 0; i < proc; i++)
{
- /* not well-formed, so output all-NULL tuple */
- ret_tuple = BuildTupleFromCStrings(attinmeta, values);
- tuplestore_puttuple(tupstore, ret_tuple);
- heap_freetuple(ret_tuple);
- }
- else
- {
- /* New loop here - we have to deal with nodeset results */
- rownr = 0;
-
- do
+ char *pkey;
+ char *xmldoc;
+ xmlXPathContextPtr ctxt;
+ xmlXPathObjectPtr res;
+ xmlChar *resstr;
+ xmlXPathCompExprPtr comppath;
+
+ /* Extract the row data as C Strings */
+ spi_tuple = tuptable->vals[i];
+ pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
+ xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
+
+ /*
+ * Clear the values array, so that not-well-formed documents
+ * return NULL in all columns. Note that this also means that
+ * spare columns will be NULL.
+ */
+ for (j = 0; j < ret_tupdesc->natts; j++)
+ values[j] = NULL;
+
+ /* Insert primary key */
+ values[0] = pkey;
+
+ /* Parse the document */
+ if (xmldoc)
+ doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
+ else /* treat NULL as not well-formed */
+ doctree = NULL;
+
+ if (doctree == NULL)
{
- /* Now evaluate the set of xpaths. */
- had_values = false;
- for (j = 0; j < numpaths; j++)
+ /* not well-formed, so output all-NULL tuple */
+ ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+ tuplestore_puttuple(tupstore, ret_tuple);
+ heap_freetuple(ret_tuple);
+ }
+ else
+ {
+ /* New loop here - we have to deal with nodeset results */
+ rownr = 0;
+
+ do
{
- ctxt = xmlXPathNewContext(doctree);
- ctxt->node = xmlDocGetRootElement(doctree);
+ /* Now evaluate the set of xpaths. */
+ had_values = false;
+ for (j = 0; j < numpaths; j++)
+ {
+ ctxt = xmlXPathNewContext(doctree);
+ ctxt->node = xmlDocGetRootElement(doctree);
- /* compile the path */
- comppath = xmlXPathCompile(xpaths[j]);
- if (comppath == NULL)
- xml_ereport(xmlerrcxt, ERROR,
- ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "XPath Syntax Error");
+ /* compile the path */
+ comppath = xmlXPathCompile(xpaths[j]);
+ if (comppath == NULL)
+ xml_ereport(xmlerrcxt, ERROR,
+ ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+ "XPath Syntax Error");
- /* Now evaluate the path expression. */
- res = xmlXPathCompiledEval(comppath, ctxt);
- xmlXPathFreeCompExpr(comppath);
+ /* Now evaluate the path expression. */
+ res = xmlXPathCompiledEval(comppath, ctxt);
+ xmlXPathFreeCompExpr(comppath);
- if (res != NULL)
- {
- switch (res->type)
+ if (res != NULL)
{
- case XPATH_NODESET:
- /* We see if this nodeset has enough nodes */
- if (res->nodesetval != NULL &&
- rownr < res->nodesetval->nodeNr)
- {
- resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
- had_values = true;
- }
- else
- resstr = NULL;
-
- break;
-
- case XPATH_STRING:
- resstr = xmlStrdup(res->stringval);
- break;
-
- default:
- elog(NOTICE, "unsupported XQuery result: %d", res->type);
- resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
+ switch (res->type)
+ {
+ case XPATH_NODESET:
+ /* We see if this nodeset has enough nodes */
+ if (res->nodesetval != NULL &&
+ rownr < res->nodesetval->nodeNr)
+ {
+ resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
+ had_values = true;
+ }
+ else
+ resstr = NULL;
+
+ break;
+
+ case XPATH_STRING:
+ resstr = xmlStrdup(res->stringval);
+ break;
+
+ default:
+ elog(NOTICE, "unsupported XQuery result: %d", res->type);
+ resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
+ }
+
+ /*
+ * Insert this into the appropriate column in the
+ * result tuple.
+ */
+ values[j + 1] = (char *) resstr;
}
-
- /*
- * Insert this into the appropriate column in the
- * result tuple.
- */
- values[j + 1] = (char *) resstr;
+ xmlXPathFreeContext(ctxt);
}
- xmlXPathFreeContext(ctxt);
- }
- /* Now add the tuple to the output, if there is one. */
- if (had_values)
- {
- ret_tuple = BuildTupleFromCStrings(attinmeta, values);
- tuplestore_puttuple(tupstore, ret_tuple);
- heap_freetuple(ret_tuple);
- }
+ /* Now add the tuple to the output, if there is one. */
+ if (had_values)
+ {
+ ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+ tuplestore_puttuple(tupstore, ret_tuple);
+ heap_freetuple(ret_tuple);
+ }
- rownr++;
- } while (had_values);
- }
+ rownr++;
+ } while (had_values);
+ }
- if (doctree != NULL)
- xmlFreeDoc(doctree);
- doctree = NULL;
+ if (doctree != NULL)
+ xmlFreeDoc(doctree);
+ doctree = NULL;
- if (pkey)
- pfree(pkey);
- if (xmldoc)
- pfree(xmldoc);
- }
+ if (pkey)
+ pfree(pkey);
+ if (xmldoc)
+ pfree(xmldoc);
+ }
}
PG_CATCH();
{
diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c
index ba1171a041..a93931d261 100644
--- a/contrib/xml2/xslt_proc.c
+++ b/contrib/xml2/xslt_proc.c
@@ -85,40 +85,40 @@ xslt_process(PG_FUNCTION_ARGS)
{
/* Check to see if document is a file or a literal */
- if (VARDATA(doct)[0] == '<')
- doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
- else
- doctree = xmlParseFile(text_to_cstring(doct));
-
- if (doctree == NULL)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "error parsing XML document");
+ if (VARDATA(doct)[0] == '<')
+ doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
+ else
+ doctree = xmlParseFile(text_to_cstring(doct));
- /* Same for stylesheet */
- if (VARDATA(ssheet)[0] == '<')
- {
- ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
- VARSIZE(ssheet) - VARHDRSZ);
- if (ssdoc == NULL)
+ if (doctree == NULL)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "error parsing stylesheet as XML document");
+ "error parsing XML document");
- stylesheet = xsltParseStylesheetDoc(ssdoc);
- }
- else
- stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
+ /* Same for stylesheet */
+ if (VARDATA(ssheet)[0] == '<')
+ {
+ ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
+ VARSIZE(ssheet) - VARHDRSZ);
+ if (ssdoc == NULL)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+ "error parsing stylesheet as XML document");
+
+ stylesheet = xsltParseStylesheetDoc(ssdoc);
+ }
+ else
+ stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
- if (stylesheet == NULL)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "failed to parse stylesheet");
+ if (stylesheet == NULL)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+ "failed to parse stylesheet");
- restree = xsltApplyStylesheet(stylesheet, doctree, params);
+ restree = xsltApplyStylesheet(stylesheet, doctree, params);
- if (restree == NULL)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
- "failed to apply stylesheet");
+ if (restree == NULL)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+ "failed to apply stylesheet");
- resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
+ resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
}
PG_CATCH();
{
diff --git a/src/backend/access/gist/gist.c b/src/backend/access/gist/gist.c
index 1efaaee1a8..783590ea55 100644
--- a/src/backend/access/gist/gist.c
+++ b/src/backend/access/gist/gist.c
@@ -27,7 +27,7 @@
/* non-export function prototypes */
static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
- GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
+ GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
GISTSTATE *giststate,
IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
@@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
{
/*
* Page was split while we looked elsewhere. We didn't see the
- * downlink to the right page when we scanned the parent, so
- * add it to the queue now.
+ * downlink to the right page when we scanned the parent, so add
+ * it to the queue now.
*
* Put the right page ahead of the queue, so that we visit it
* next. That's important, because if this is the lowest internal
@@ -829,7 +829,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
RelationGetRelationName(r), child);
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
}
/*
@@ -1046,7 +1046,7 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
*/
static bool
gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
- GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
+ GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
{
return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
InvalidBuffer, InvalidBuffer, false, false);
@@ -1308,7 +1308,7 @@ initGISTstate(Relation index)
giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
giststate->scanCxt = scanCxt;
- giststate->tempCxt = scanCxt; /* caller must change this if needed */
+ giststate->tempCxt = scanCxt; /* caller must change this if needed */
giststate->tupdesc = index->rd_att;
for (i = 0; i < index->rd_att->natts; i++)
diff --git a/src/backend/access/gist/gistbuild.c b/src/backend/access/gist/gistbuild.c
index 712e59ac90..8caf485676 100644
--- a/src/backend/access/gist/gistbuild.c
+++ b/src/backend/access/gist/gistbuild.c
@@ -48,7 +48,7 @@ typedef enum
* before switching to the buffering build
* mode */
GIST_BUFFERING_ACTIVE /* in buffering build mode */
-} GistBufferingMode;
+} GistBufferingMode;
/* Working state for gistbuild and its callback */
typedef struct
@@ -263,7 +263,7 @@ gistValidateBufferingOption(char *value)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid value for \"buffering\" option"),
- errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
+ errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
}
}
@@ -567,7 +567,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
BlockNumber childblkno;
Buffer buffer;
bool result = false;
- BlockNumber blkno;
+ BlockNumber blkno;
int level;
OffsetNumber downlinkoffnum = InvalidOffsetNumber;
BlockNumber parentblkno = InvalidBlockNumber;
@@ -623,7 +623,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
{
gistbufferinginserttuples(buildstate, buffer, level,
&newtup, 1, childoffnum,
- InvalidBlockNumber, InvalidOffsetNumber);
+ InvalidBlockNumber, InvalidOffsetNumber);
/* gistbufferinginserttuples() released the buffer */
}
else
@@ -716,26 +716,26 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
/*
* All the downlinks on the old root page are now on one of the child
- * pages. Visit all the new child pages to memorize the parents of
- * the grandchildren.
+ * pages. Visit all the new child pages to memorize the parents of the
+ * grandchildren.
*/
if (gfbb->rootlevel > 1)
{
maxoff = PageGetMaxOffsetNumber(page);
for (off = FirstOffsetNumber; off <= maxoff; off++)
{
- ItemId iid = PageGetItemId(page, off);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+ ItemId iid = PageGetItemId(page, off);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
- Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno);
+ Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno);
LockBuffer(childbuf, GIST_SHARE);
gistMemorizeAllDownlinks(buildstate, childbuf);
UnlockReleaseBuffer(childbuf);
/*
- * Also remember that the parent of the new child page is
- * the root block.
+ * Also remember that the parent of the new child page is the
+ * root block.
*/
gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO);
}
@@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
* Remember the parent of each new child page in our parent map.
* This assumes that the downlinks fit on the parent page. If the
* parent page is split, too, when we recurse up to insert the
- * downlinks, the recursive gistbufferinginserttuples() call
- * will update the map again.
+ * downlinks, the recursive gistbufferinginserttuples() call will
+ * update the map again.
*/
if (level > 0)
gistMemorizeParent(buildstate,
@@ -879,8 +879,9 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
if (parent == *parentblkno && *parentblkno != InvalidBlockNumber &&
*downlinkoffnum != InvalidOffsetNumber && *downlinkoffnum <= maxoff)
{
- ItemId iid = PageGetItemId(page, *downlinkoffnum);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+ ItemId iid = PageGetItemId(page, *downlinkoffnum);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* Still there */
@@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
}
/*
- * Downlink was not at the offset where it used to be. Scan the page
- * to find it. During normal gist insertions, it might've moved to another
- * page, to the right, but during a buffering build, we keep track of
- * the parent of each page in the lookup table so we should always know
- * what page it's on.
+ * Downlink was not at the offset where it used to be. Scan the page to
+ * find it. During normal gist insertions, it might've moved to another
+ * page, to the right, but during a buffering build, we keep track of the
+ * parent of each page in the lookup table so we should always know what
+ * page it's on.
*/
for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off))
{
- ItemId iid = PageGetItemId(page, off);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+ ItemId iid = PageGetItemId(page, off);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+
if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
{
/* yes!!, found it */
@@ -908,7 +910,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
}
elog(ERROR, "failed to re-find parent for block %u", childblkno);
- return InvalidBuffer; /* keep compiler quiet */
+ return InvalidBuffer; /* keep compiler quiet */
}
/*
@@ -1129,7 +1131,7 @@ gistGetMaxLevel(Relation index)
typedef struct
{
- BlockNumber childblkno; /* hash key */
+ BlockNumber childblkno; /* hash key */
BlockNumber parentblkno;
} ParentMapEntry;
@@ -1156,9 +1158,9 @@ gistMemorizeParent(GISTBuildState *buildstate, BlockNumber child, BlockNumber pa
bool found;
entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
- (const void *) &child,
- HASH_ENTER,
- &found);
+ (const void *) &child,
+ HASH_ENTER,
+ &found);
entry->parentblkno = parent;
}
@@ -1171,16 +1173,17 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf)
OffsetNumber maxoff;
OffsetNumber off;
BlockNumber parentblkno = BufferGetBlockNumber(parentbuf);
- Page page = BufferGetPage(parentbuf);
+ Page page = BufferGetPage(parentbuf);
Assert(!GistPageIsLeaf(page));
maxoff = PageGetMaxOffsetNumber(page);
for (off = FirstOffsetNumber; off <= maxoff; off++)
{
- ItemId iid = PageGetItemId(page, off);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+ ItemId iid = PageGetItemId(page, off);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
+
gistMemorizeParent(buildstate, childblkno, parentblkno);
}
}
@@ -1193,9 +1196,9 @@ gistGetParent(GISTBuildState *buildstate, BlockNumber child)
/* Find node buffer in hash table */
entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
- (const void *) &child,
- HASH_FIND,
- &found);
+ (const void *) &child,
+ HASH_FIND,
+ &found);
if (!found)
elog(ERROR, "could not find parent of block %d in lookup table", child);
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 3feca263a7..39aec856f9 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -528,7 +528,7 @@ typedef struct
bool isnull[INDEX_MAX_KEYS];
GISTPageSplitInfo *splitinfo;
GISTNodeBuffer *nodeBuffer;
-} RelocationBufferInfo;
+} RelocationBufferInfo;
/*
* At page split, distribute tuples from the buffer of the split page to
diff --git a/src/backend/access/gist/gistproc.c b/src/backend/access/gist/gistproc.c
index d97c64ede3..09e911d098 100644
--- a/src/backend/access/gist/gistproc.c
+++ b/src/backend/access/gist/gistproc.c
@@ -244,7 +244,7 @@ typedef struct
int index;
/* Delta between penalties of entry insertion into different groups */
double delta;
-} CommonEntry;
+} CommonEntry;
/*
* Context for g_box_consider_split. Contains information about currently
@@ -267,7 +267,7 @@ typedef struct
int dim; /* axis of this split */
double range; /* width of general MBR projection to the
* selected axis */
-} ConsiderSplitContext;
+} ConsiderSplitContext;
/*
* Interval represents projection of box to axis.
@@ -276,7 +276,7 @@ typedef struct
{
double lower,
upper;
-} SplitInterval;
+} SplitInterval;
/*
* Interval comparison function by lower bound of the interval;
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index bf139de824..c9fc9ba97f 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -124,7 +124,7 @@ gistbeginscan(PG_FUNCTION_ARGS)
so->giststate = giststate;
giststate->tempCxt = createTempGistContext();
so->queue = NULL;
- so->queueCxt = giststate->scanCxt; /* see gistrescan */
+ so->queueCxt = giststate->scanCxt; /* see gistrescan */
/* workspaces with size dependent on numberOfOrderBys: */
so->tmpTreeItem = palloc(GSTIHDRSZ + sizeof(double) * scan->numberOfOrderBys);
diff --git a/src/backend/access/gist/gistsplit.c b/src/backend/access/gist/gistsplit.c
index 2ec69a60d4..739fc597ce 100644
--- a/src/backend/access/gist/gistsplit.c
+++ b/src/backend/access/gist/gistsplit.c
@@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
if (v->spl_equiv == NULL)
{
/*
- * simple case: left and right keys for attno column are
- * equal
+ * simple case: left and right keys for attno column are equal
*/
gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
}
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 96dabdb48a..bbea5e4eac 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -391,7 +391,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
uint32 ovflbitno;
int32 bitmappage,
bitmapbit;
- Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
+ Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
/* Get information from the doomed page */
_hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 2d81383ae8..9519e73e54 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -223,9 +223,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
}
/*
- * Be sure to check for interrupts at least once per page. Checks at
- * higher code levels won't be able to stop a seqscan that encounters
- * many pages' worth of consecutive dead tuples.
+ * Be sure to check for interrupts at least once per page. Checks at
+ * higher code levels won't be able to stop a seqscan that encounters many
+ * pages' worth of consecutive dead tuples.
*/
CHECK_FOR_INTERRUPTS();
@@ -997,8 +997,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
*
* Same as relation_openrv, but with an additional missing_ok argument
* allowing a NULL return rather than an error if the relation is not
- * found. (Note that some other causes, such as permissions problems,
- * will still result in an ereport.)
+ * found. (Note that some other causes, such as permissions problems,
+ * will still result in an ereport.)
* ----------------
*/
Relation
@@ -1105,7 +1105,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
* by a RangeVar node
*
* As above, but optionally return NULL instead of failing for
- * relation-not-found.
+ * relation-not-found.
* ----------------
*/
Relation
@@ -1588,10 +1588,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
/*
* When first_call is true (and thus, skip is initially false) we'll
- * return the first tuple we find. But on later passes, heapTuple
+ * return the first tuple we find. But on later passes, heapTuple
* will initially be pointing to the tuple we returned last time.
- * Returning it again would be incorrect (and would loop forever),
- * so we skip it and return the next match we find.
+ * Returning it again would be incorrect (and would loop forever), so
+ * we skip it and return the next match we find.
*/
if (!skip)
{
@@ -1651,7 +1651,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
{
bool result;
Buffer buffer;
- HeapTupleData heapTuple;
+ HeapTupleData heapTuple;
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
/*
- * We're about to do the actual insert -- but check for conflict first,
- * to avoid possibly having to roll back work we've just done.
+ * We're about to do the actual insert -- but check for conflict first, to
+ * avoid possibly having to roll back work we've just done.
*
- * For a heap insert, we only need to check for table-level SSI locks.
- * Our new tuple can't possibly conflict with existing tuple locks, and
- * heap page locks are only consolidated versions of tuple locks; they do
- * not lock "gaps" as index page locks do. So we don't need to identify
- * a buffer before making the call.
+ * For a heap insert, we only need to check for table-level SSI locks. Our
+ * new tuple can't possibly conflict with existing tuple locks, and heap
+ * page locks are only consolidated versions of tuple locks; they do not
+ * lock "gaps" as index page locks do. So we don't need to identify a
+ * buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
* We're about to do the actual inserts -- but check for conflict first,
* to avoid possibly having to roll back work we've just done.
*
- * For a heap insert, we only need to check for table-level SSI locks.
- * Our new tuple can't possibly conflict with existing tuple locks, and
- * heap page locks are only consolidated versions of tuple locks; they do
- * not lock "gaps" as index page locks do. So we don't need to identify
- * a buffer before making the call.
+ * For a heap insert, we only need to check for table-level SSI locks. Our
+ * new tuple can't possibly conflict with existing tuple locks, and heap
+ * page locks are only consolidated versions of tuple locks; they do not
+ * lock "gaps" as index page locks do. So we don't need to identify a
+ * buffer before making the call.
*/
CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
@@ -2137,12 +2137,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool all_visible_cleared = false;
- int nthispage;
+ int nthispage;
/*
- * Find buffer where at least the next tuple will fit. If the page
- * is all-visible, this will also pin the requisite visibility map
- * page.
+ * Find buffer where at least the next tuple will fit. If the page is
+ * all-visible, this will also pin the requisite visibility map page.
*/
buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
InvalidBuffer, options, bistate,
@@ -2358,7 +2357,7 @@ heap_delete(Relation relation, ItemPointer tid,
ItemId lp;
HeapTupleData tp;
Page page;
- BlockNumber block;
+ BlockNumber block;
Buffer buffer;
Buffer vmbuffer = InvalidBuffer;
bool have_tuple_lock = false;
@@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
page = BufferGetPage(buffer);
/*
- * Before locking the buffer, pin the visibility map page if it appears
- * to be necessary. Since we haven't got the lock yet, someone else might
- * be in the middle of changing this, so we'll need to recheck after
- * we have the lock.
+ * Before locking the buffer, pin the visibility map page if it appears to
+ * be necessary. Since we haven't got the lock yet, someone else might be
+ * in the middle of changing this, so we'll need to recheck after we have
+ * the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@@ -2717,7 +2716,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
HeapTupleData oldtup;
HeapTuple heaptup;
Page page;
- BlockNumber block;
+ BlockNumber block;
Buffer buffer,
newbuf,
vmbuffer = InvalidBuffer,
@@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
page = BufferGetPage(buffer);
/*
- * Before locking the buffer, pin the visibility map page if it appears
- * to be necessary. Since we haven't got the lock yet, someone else might
- * be in the middle of changing this, so we'll need to recheck after
- * we have the lock.
+ * Before locking the buffer, pin the visibility map page if it appears to
+ * be necessary. Since we haven't got the lock yet, someone else might be
+ * in the middle of changing this, so we'll need to recheck after we have
+ * the lock.
*/
if (PageIsAllVisible(page))
visibilitymap_pin(relation, block, &vmbuffer);
@@ -2900,11 +2899,11 @@ l2:
/*
* If we didn't pin the visibility map page and the page has become all
- * visible while we were busy locking the buffer, or during some subsequent
- * window during which we had it unlocked, we'll have to unlock and
- * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
- * unfortunate, esepecially since we'll now have to recheck whether the
- * tuple has been locked or updated under us, but hopefully it won't
+ * visible while we were busy locking the buffer, or during some
+ * subsequent window during which we had it unlocked, we'll have to unlock
+ * and re-lock, to avoid holding the buffer lock across an I/O. That's a
+ * bit unfortunate, esepecially since we'll now have to recheck whether
+ * the tuple has been locked or updated under us, but hopefully it won't
* happen very often.
*/
if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@@ -3196,11 +3195,11 @@ l2:
/*
* Mark old tuple for invalidation from system caches at next command
- * boundary, and mark the new tuple for invalidation in case we abort.
- * We have to do this before releasing the buffer because oldtup is in
- * the buffer. (heaptup is all in local memory, but it's necessary to
- * process both tuple versions in one call to inval.c so we can avoid
- * redundant sinval messages.)
+ * boundary, and mark the new tuple for invalidation in case we abort. We
+ * have to do this before releasing the buffer because oldtup is in the
+ * buffer. (heaptup is all in local memory, but it's necessary to process
+ * both tuple versions in one call to inval.c so we can avoid redundant
+ * sinval messages.)
*/
CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
@@ -4069,7 +4068,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
*/
bool
heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
- Buffer buf)
+ Buffer buf)
{
TransactionId xid;
@@ -4368,9 +4367,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
}
/*
- * Perform XLogInsert for a heap-visible operation. 'block' is the block
+ * Perform XLogInsert for a heap-visible operation. 'block' is the block
* being marked all-visible, and vm_buffer is the buffer containing the
- * corresponding visibility map block. Both should have already been modified
+ * corresponding visibility map block. Both should have already been modified
* and dirtied.
*/
XLogRecPtr
@@ -4705,7 +4704,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
Page page;
/*
- * Read the heap page, if it still exists. If the heap file has been
+ * Read the heap page, if it still exists. If the heap file has been
* dropped or truncated later in recovery, this might fail. In that case,
* there's no point in doing anything further, since the visibility map
* will have to be cleared out at the same time.
@@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
/*
- * We don't bump the LSN of the heap page when setting the visibility
- * map bit, because that would generate an unworkable volume of
- * full-page writes. This exposes us to torn page hazards, but since
- * we're not inspecting the existing page contents in any way, we
- * don't care.
+ * We don't bump the LSN of the heap page when setting the visibility map
+ * bit, because that would generate an unworkable volume of full-page
+ * writes. This exposes us to torn page hazards, but since we're not
+ * inspecting the existing page contents in any way, we don't care.
*
- * However, all operations that clear the visibility map bit *do* bump
- * the LSN, and those operations will only be replayed if the XLOG LSN
- * follows the page LSN. Thus, if the page LSN has advanced past our
- * XLOG record's LSN, we mustn't mark the page all-visible, because
- * the subsequent update won't be replayed to clear the flag.
+ * However, all operations that clear the visibility map bit *do* bump the
+ * LSN, and those operations will only be replayed if the XLOG LSN follows
+ * the page LSN. Thus, if the page LSN has advanced past our XLOG
+ * record's LSN, we mustn't mark the page all-visible, because the
+ * subsequent update won't be replayed to clear the flag.
*/
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* Don't set the bit if replay has already passed this point.
*
* It might be safe to do this unconditionally; if replay has past
- * this point, we'll replay at least as far this time as we did before,
- * and if this bit needs to be cleared, the record responsible for
- * doing so should be again replayed, and clear it. For right now,
- * out of an abundance of conservatism, we use the same test here
+ * this point, we'll replay at least as far this time as we did
+ * before, and if this bit needs to be cleared, the record responsible
+ * for doing so should be again replayed, and clear it. For right
+ * now, out of an abundance of conservatism, we use the same test here
* we did for the heap page; if this results in a dropped bit, no real
* harm is done; and the next VACUUM will fix it.
*/
@@ -5183,7 +5181,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
if (xlrec->all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
- BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
+ BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@@ -5267,7 +5265,7 @@ newt:;
if (xlrec->new_all_visible_cleared)
{
Relation reln = CreateFakeRelcacheEntry(xlrec->target.node);
- BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
+ BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, block, &vmbuffer);
@@ -5690,7 +5688,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
else
appendStringInfo(buf, "multi-insert: ");
appendStringInfo(buf, "rel %u/%u/%u; blk %u; %d tuples",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->blkno, xlrec->ntuples);
}
else
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 30ef1bf7e0..19a34923c7 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -109,8 +109,8 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
BlockNumber block1, BlockNumber block2,
Buffer *vmbuffer1, Buffer *vmbuffer2)
{
- bool need_to_pin_buffer1;
- bool need_to_pin_buffer2;
+ bool need_to_pin_buffer1;
+ bool need_to_pin_buffer2;
Assert(BufferIsValid(buffer1));
Assert(buffer2 == InvalidBuffer || buffer1 <= buffer2);
@@ -145,7 +145,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
/*
* If there are two buffers involved and we pinned just one of them,
* it's possible that the second one became all-visible while we were
- * busy pinning the first one. If it looks like that's a possible
+ * busy pinning the first one. If it looks like that's a possible
* scenario, we'll need to make a second pass through this loop.
*/
if (buffer2 == InvalidBuffer || buffer1 == buffer2
@@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
* block if one was given, taking suitable care with lock ordering and
* the possibility they are the same block.
*
- * If the page-level all-visible flag is set, caller will need to clear
- * both that and the corresponding visibility map bit. However, by the
- * time we return, we'll have x-locked the buffer, and we don't want to
- * do any I/O while in that state. So we check the bit here before
- * taking the lock, and pin the page if it appears necessary.
+ * If the page-level all-visible flag is set, caller will need to
+ * clear both that and the corresponding visibility map bit. However,
+ * by the time we return, we'll have x-locked the buffer, and we don't
+ * want to do any I/O while in that state. So we check the bit here
+ * before taking the lock, and pin the page if it appears necessary.
* Checking without the lock creates a risk of getting the wrong
* answer, so we'll have to recheck after acquiring the lock.
*/
@@ -347,23 +347,24 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* We now have the target page (and the other buffer, if any) pinned
- * and locked. However, since our initial PageIsAllVisible checks
- * were performed before acquiring the lock, the results might now
- * be out of date, either for the selected victim buffer, or for the
- * other buffer passed by the caller. In that case, we'll need to give
- * up our locks, go get the pin(s) we failed to get earlier, and
+ * and locked. However, since our initial PageIsAllVisible checks
+ * were performed before acquiring the lock, the results might now be
+ * out of date, either for the selected victim buffer, or for the
+ * other buffer passed by the caller. In that case, we'll need to
+ * give up our locks, go get the pin(s) we failed to get earlier, and
* re-lock. That's pretty painful, but hopefully shouldn't happen
* often.
*
- * Note that there's a small possibility that we didn't pin the
- * page above but still have the correct page pinned anyway, either
- * because we've already made a previous pass through this loop, or
- * because caller passed us the right page anyway.
+ * Note that there's a small possibility that we didn't pin the page
+ * above but still have the correct page pinned anyway, either because
+ * we've already made a previous pass through this loop, or because
+ * caller passed us the right page anyway.
*
* Note also that it's possible that by the time we get the pin and
* retake the buffer locks, the visibility map bit will have been
- * cleared by some other backend anyway. In that case, we'll have done
- * a bit of extra work for no gain, but there's no real harm done.
+ * cleared by some other backend anyway. In that case, we'll have
+ * done a bit of extra work for no gain, but there's no real harm
+ * done.
*/
if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
GetVisibilityMapPins(relation, buffer, otherBuffer,
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index 28b5a20ae7..050f048a9b 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -75,7 +75,7 @@ do { \
static void toast_delete_datum(Relation rel, Datum value);
static Datum toast_save_datum(Relation rel, Datum value,
- struct varlena *oldexternal, int options);
+ struct varlena * oldexternal, int options);
static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
static struct varlena *toast_fetch_datum(struct varlena * attr);
@@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
*/
static Datum
toast_save_datum(Relation rel, Datum value,
- struct varlena *oldexternal, int options)
+ struct varlena * oldexternal, int options)
{
Relation toastrel;
Relation toastidx;
@@ -1353,7 +1353,7 @@ toast_save_datum(Relation rel, Datum value,
* those versions could easily reference the same toast value.
* When we copy the second or later version of such a row,
* reusing the OID will mean we select an OID that's already
- * in the new toast table. Check for that, and if so, just
+ * in the new toast table. Check for that, and if so, just
* fall through without writing the data again.
*
* While annoying and ugly-looking, this is a good thing
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 9152c7d151..eb5625906f 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -16,7 +16,7 @@
* visibilitymap_pin_ok - check whether correct map page is already pinned
* visibilitymap_set - set a bit in a previously pinned page
* visibilitymap_test - test if a bit is set
- * visibilitymap_count - count number of bits set in visibility map
+ * visibilitymap_count - count number of bits set in visibility map
* visibilitymap_truncate - truncate the visibility map
*
* NOTES
@@ -27,7 +27,7 @@
* the sense that we make sure that whenever a bit is set, we know the
* condition is true, but if a bit is not set, it might or might not be true.
*
- * Clearing a visibility map bit is not separately WAL-logged. The callers
+ * Clearing a visibility map bit is not separately WAL-logged. The callers
* must make sure that whenever a bit is cleared, the bit is cleared on WAL
* replay of the updating operation as well.
*
@@ -36,9 +36,9 @@
* it may still be the case that every tuple on the page is visible to all
* transactions; we just don't know that for certain. The difficulty is that
* there are two bits which are typically set together: the PD_ALL_VISIBLE bit
- * on the page itself, and the visibility map bit. If a crash occurs after the
+ * on the page itself, and the visibility map bit. If a crash occurs after the
* visibility map page makes it to disk and before the updated heap page makes
- * it to disk, redo must set the bit on the heap page. Otherwise, the next
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
* insert, update, or delete on the heap page will fail to realize that the
* visibility map bit must be cleared, possibly causing index-only scans to
* return wrong answers.
@@ -59,10 +59,10 @@
* the buffer lock over any I/O that may be required to read in the visibility
* map page. To avoid this, we examine the heap page before locking it;
* if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
- * bit. Then, we lock the buffer. But this creates a race condition: there
+ * bit. Then, we lock the buffer. But this creates a race condition: there
* is a possibility that in the time it takes to lock the buffer, the
* PD_ALL_VISIBLE bit gets set. If that happens, we have to unlock the
- * buffer, pin the visibility map page, and relock the buffer. This shouldn't
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
* happen often, because only VACUUM currently sets visibility map bits,
* and the race will only occur if VACUUM processes a given page at almost
* exactly the same time that someone tries to further modify it.
@@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
* visibilitymap_set - set a bit on a previously pinned page
*
* recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
- * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
+ * or InvalidXLogRecPtr in normal running. The page LSN is advanced to the
* one provided; in normal running, we generate a new XLOG record and set the
- * page LSN to that value. cutoff_xid is the largest xmin on the page being
+ * page LSN to that value. cutoff_xid is the largest xmin on the page being
* marked all-visible; it is needed for Hot Standby, and can be
* InvalidTransactionId if the page contains no tuples.
*
@@ -295,10 +295,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
* releasing *buf after it's done testing and setting bits.
*
* NOTE: This function is typically called without a lock on the heap page,
- * so somebody else could change the bit just after we look at it. In fact,
+ * so somebody else could change the bit just after we look at it. In fact,
* since we don't lock the visibility map page either, it's even possible that
* someone else could have changed the bit just before we look at it, but yet
- * we might see the old value. It is the caller's responsibility to deal with
+ * we might see the old value. It is the caller's responsibility to deal with
* all concurrency issues!
*/
bool
@@ -344,7 +344,7 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
}
/*
- * visibilitymap_count - count number of bits set in visibility map
+ * visibilitymap_count - count number of bits set in visibility map
*
* Note: we ignore the possibility of race conditions when the table is being
* extended concurrently with the call. New pages added to the table aren't
@@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
BlockNumber result = 0;
BlockNumber mapBlock;
- for (mapBlock = 0; ; mapBlock++)
+ for (mapBlock = 0;; mapBlock++)
{
Buffer mapBuffer;
unsigned char *map;
int i;
/*
- * Read till we fall off the end of the map. We assume that any
- * extra bytes in the last page are zeroed, so we don't bother
- * excluding them from the count.
+ * Read till we fall off the end of the map. We assume that any extra
+ * bytes in the last page are zeroed, so we don't bother excluding
+ * them from the count.
*/
mapBuffer = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(mapBuffer))
@@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
Buffer buf;
/*
- * We might not have opened the relation at the smgr level yet, or we might
- * have been forced to close it by a sinval message. The code below won't
- * necessarily notice relation extension immediately when extend = false,
- * so we rely on sinval messages to ensure that our ideas about the size of
- * the map aren't too far out of date.
+ * We might not have opened the relation at the smgr level yet, or we
+ * might have been forced to close it by a sinval message. The code below
+ * won't necessarily notice relation extension immediately when extend =
+ * false, so we rely on sinval messages to ensure that our ideas about the
+ * size of the map aren't too far out of date.
*/
RelationOpenSmgr(rel);
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index d54b669bf3..26fd9b6e11 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -93,7 +93,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
else
scan->orderByData = NULL;
- scan->xs_want_itup = false; /* may be set later */
+ scan->xs_want_itup = false; /* may be set later */
/*
* During recovery we ignore killed tuples and don't bother to kill them
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 16ac4e1b9f..d64df319c5 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -435,7 +435,7 @@ index_restrpos(IndexScanDesc scan)
ItemPointer
index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
{
- FmgrInfo *procedure;
+ FmgrInfo *procedure;
bool found;
SCAN_CHECKS;
@@ -495,7 +495,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
HeapTuple
index_fetch_heap(IndexScanDesc scan)
{
- ItemPointer tid = &scan->xs_ctup.t_self;
+ ItemPointer tid = &scan->xs_ctup.t_self;
bool all_dead = false;
bool got_heap_tuple;
@@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan)
if (got_heap_tuple)
{
/*
- * Only in a non-MVCC snapshot can more than one member of the
- * HOT chain be visible.
+ * Only in a non-MVCC snapshot can more than one member of the HOT
+ * chain be visible.
*/
scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
pgstat_count_heap_fetch(scan->indexRelation);
@@ -544,7 +544,7 @@ index_fetch_heap(IndexScanDesc scan)
/*
* If we scanned a whole HOT chain and found only dead tuples, tell index
* AM to kill its entry for that TID (this will take effect in the next
- * amgettuple call, in index_getnext_tid). We do not do this when in
+ * amgettuple call, in index_getnext_tid). We do not do this when in
* recovery because it may violate MVCC to do so. See comments in
* RelationGetIndexScan().
*/
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index fedde934a3..d610bef798 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -82,7 +82,7 @@ btint2fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint2sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint2fastcmp;
PG_RETURN_VOID();
@@ -119,7 +119,7 @@ btint4fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint4sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint4fastcmp;
PG_RETURN_VOID();
@@ -156,7 +156,7 @@ btint8fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btint8sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btint8fastcmp;
PG_RETURN_VOID();
@@ -277,7 +277,7 @@ btoidfastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btoidsortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btoidfastcmp;
PG_RETURN_VOID();
@@ -338,7 +338,7 @@ btnamefastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btnamesortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btnamefastcmp;
PG_RETURN_VOID();
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index e6dec618c7..016ce2283c 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -1362,7 +1362,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
* we're in VACUUM and would not otherwise have an XID. Having already
* updated links to the target, ReadNewTransactionId() suffices as an
* upper bound. Any scan having retained a now-stale link is advertising
- * in its PGXACT an xmin less than or equal to the value we read here. It
+ * in its PGXACT an xmin less than or equal to the value we read here. It
* will continue to do so, holding back RecentGlobalXmin, for the duration
* of that scan.
*/
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 184fc3bb79..41d06edb15 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -433,7 +433,7 @@ btbeginscan(PG_FUNCTION_ARGS)
/*
* We don't know yet whether the scan will be index-only, so we do not
- * allocate the tuple workspace arrays until btrescan. However, we set up
+ * allocate the tuple workspace arrays until btrescan. However, we set up
* scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
*/
so->currTuples = so->markTuples = NULL;
@@ -478,7 +478,7 @@ btrescan(PG_FUNCTION_ARGS)
/*
* Allocate tuple workspace arrays, if needed for an index-only scan and
- * not already done in a previous rescan call. To save on palloc
+ * not already done in a previous rescan call. To save on palloc
* overhead, both workspaces are allocated as one palloc block; only this
* function and btendscan know that.
*
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index b701c3f819..e0c952368b 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -564,11 +564,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanKeyEntryInitialize(chosen,
(SK_SEARCHNOTNULL | SK_ISNULL |
(impliesNN->sk_flags &
- (SK_BT_DESC | SK_BT_NULLS_FIRST))),
+ (SK_BT_DESC | SK_BT_NULLS_FIRST))),
curattr,
- ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
- BTGreaterStrategyNumber :
- BTLessStrategyNumber),
+ ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
+ BTGreaterStrategyNumber :
+ BTLessStrategyNumber),
InvalidOid,
InvalidOid,
InvalidOid,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index f79ce552b6..33ad8915f5 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -37,10 +37,10 @@ typedef struct BTSortArrayContext
static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
StrategyNumber strat,
Datum *elems, int nelems);
-static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
+static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
bool reverse,
Datum *elems, int nelems);
-static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
+static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
ScanKey leftarg, ScanKey rightarg,
bool *result);
@@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
}
/*
- * Make a scan-lifespan context to hold array-associated data, or reset
- * it if we already have one from a previous rescan cycle.
+ * Make a scan-lifespan context to hold array-associated data, or reset it
+ * if we already have one from a previous rescan cycle.
*/
if (so->arrayContext == NULL)
so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
@@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
continue;
/*
- * First, deconstruct the array into elements. Anything allocated
+ * First, deconstruct the array into elements. Anything allocated
* here (including a possibly detoasted array value) is in the
* workspace context.
*/
@@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
&elem_values, &elem_nulls, &num_elems);
/*
- * Compress out any null elements. We can ignore them since we assume
+ * Compress out any null elements. We can ignore them since we assume
* all btree operators are strict.
*/
num_nonnulls = 0;
@@ -338,7 +338,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
* successive primitive indexscans produce data in index order.
*/
num_elems = _bt_sort_array_elements(scan, cur,
- (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
+ (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
elem_values, num_nonnulls);
/*
@@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison operator in the opfamily.
*
- * Note: it's possible that this would fail, if the opfamily is incomplete,
- * but it seems quite unlikely that an opfamily would omit non-cross-type
- * comparison operators for any datatype that it supports at all.
+ * Note: it's possible that this would fail, if the opfamily is
+ * incomplete, but it seems quite unlikely that an opfamily would omit
+ * non-cross-type comparison operators for any datatype that it supports
+ * at all.
*/
cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
/*
* Look up the appropriate comparison function in the opfamily.
*
- * Note: it's possible that this would fail, if the opfamily is incomplete,
- * but it seems quite unlikely that an opfamily would omit non-cross-type
- * support functions for any datatype that it supports at all.
+ * Note: it's possible that this would fail, if the opfamily is
+ * incomplete, but it seems quite unlikely that an opfamily would omit
+ * non-cross-type support functions for any datatype that it supports at
+ * all.
*/
cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
elemtype,
@@ -515,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
* _bt_start_array_keys() -- Initialize array keys at start of a scan
*
* Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey. We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
*/
void
_bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@@ -609,8 +611,8 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
* so that the index sorts in the desired direction.
*
* One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan. It also attempts to eliminate redundant
- * keys and detect contradictory keys. (If the index opfamily provides
+ * satisfied to continue the scan. It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
@@ -676,7 +678,7 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
* Note: the reason we have to copy the preprocessed scan keys into private
* storage is that we are modifying the array based on comparisons of the
* key argument values, which could change on a rescan or after moving to
- * new elements of array keys. Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
*/
void
_bt_preprocess_keys(IndexScanDesc scan)
@@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
* set qual_ok to false and abandon further processing.
*
* We also have to deal with the case of "key IS NULL", which is
- * unsatisfiable in combination with any other index condition.
- * By the time we get here, that's been classified as an equality
+ * unsatisfiable in combination with any other index condition. By
+ * the time we get here, that's been classified as an equality
* check, and we've rejected any combination of it with a regular
* equality condition; but not with other types of conditions.
*/
@@ -1421,12 +1423,12 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass.
- * On a forward scan, however, we must keep going, because we
- * may have initially positioned to the start of the index.
+ * because it's not possible for any future tuples to pass. On
+ * a forward scan, however, we must keep going, because we may
+ * have initially positioned to the start of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@@ -1437,11 +1439,11 @@ _bt_checkkeys(IndexScanDesc scan,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass.
- * On a backward scan, however, we must keep going, because we
+ * because it's not possible for any future tuples to pass. On
+ * a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
@@ -1532,12 +1534,12 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
- * index attr. On a backward scan, we can stop if this qual
+ * index attr. On a backward scan, we can stop if this qual
* is one of the "must match" subset. We can stop regardless
* of whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass.
- * On a forward scan, however, we must keep going, because we
- * may have initially positioned to the start of the index.
+ * because it's not possible for any future tuples to pass. On
+ * a forward scan, however, we must keep going, because we may
+ * have initially positioned to the start of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
ScanDirectionIsBackward(dir))
@@ -1548,11 +1550,11 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
/*
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
- * index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
+ * index attr. On a forward scan, we can stop if this qual is
+ * one of the "must match" subset. We can stop regardless of
* whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass.
- * On a backward scan, however, we must keep going, because we
+ * because it's not possible for any future tuples to pass. On
+ * a backward scan, however, we must keep going, because we
* may have initially positioned to the end of the index.
*/
if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c
index 98a7bea742..b3f8f6a231 100644
--- a/src/backend/access/spgist/spgdoinsert.c
+++ b/src/backend/access/spgist/spgdoinsert.c
@@ -24,7 +24,7 @@
/*
* SPPageDesc tracks all info about a page we are inserting into. In some
* situations it actually identifies a tuple, or even a specific node within
- * an inner tuple. But any of the fields can be invalid. If the buffer
+ * an inner tuple. But any of the fields can be invalid. If the buffer
* field is valid, it implies we hold pin and exclusive lock on that buffer.
* page pointer should be valid exactly when buffer is.
*/
@@ -129,8 +129,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
int firststate, int reststate,
BlockNumber blkno, OffsetNumber offnum)
{
- OffsetNumber firstItem;
- OffsetNumber *sortednos;
+ OffsetNumber firstItem;
+ OffsetNumber *sortednos;
SpGistDeadTuple tuple = NULL;
int i;
@@ -155,8 +155,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
for (i = 0; i < nitems; i++)
{
- OffsetNumber itemno = sortednos[i];
- int tupstate;
+ OffsetNumber itemno = sortednos[i];
+ int tupstate;
tupstate = (itemno == firstItem) ? firststate : reststate;
if (tuple == NULL || tuple->tupstate != tupstate)
@@ -200,7 +200,7 @@ saveNodeLink(Relation index, SPPageDesc *parent,
*/
static void
addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
- SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
+ SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
{
XLogRecData rdata[4];
spgxlogAddLeaf xlrec;
@@ -230,7 +230,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
/* Tuple is not part of a chain */
leafTuple->nextOffset = InvalidOffsetNumber;
current->offnum = SpGistPageAddNewItem(state, current->page,
- (Item) leafTuple, leafTuple->size,
+ (Item) leafTuple, leafTuple->size,
NULL, false);
xlrec.offnumLeaf = current->offnum;
@@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
else
{
/*
- * Tuple must be inserted into existing chain. We mustn't change
- * the chain's head address, but we don't need to chase the entire
- * chain to put the tuple at the end; we can insert it second.
+ * Tuple must be inserted into existing chain. We mustn't change the
+ * chain's head address, but we don't need to chase the entire chain
+ * to put the tuple at the end; we can insert it second.
*
* Also, it's possible that the "chain" consists only of a DEAD tuple,
* in which case we should replace the DEAD tuple in-place.
@@ -261,7 +261,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
OffsetNumber offnum;
head = (SpGistLeafTuple) PageGetItem(current->page,
- PageGetItemId(current->page, current->offnum));
+ PageGetItemId(current->page, current->offnum));
if (head->tupstate == SPGIST_LIVE)
{
leafTuple->nextOffset = head->nextOffset;
@@ -274,7 +274,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
* and set new second element
*/
head = (SpGistLeafTuple) PageGetItem(current->page,
- PageGetItemId(current->page, current->offnum));
+ PageGetItemId(current->page, current->offnum));
head->nextOffset = offnum;
xlrec.offnumLeaf = offnum;
@@ -483,7 +483,7 @@ moveLeafs(Relation index, SpGistState *state,
for (i = 0; i < nDelete; i++)
{
it = (SpGistLeafTuple) PageGetItem(current->page,
- PageGetItemId(current->page, toDelete[i]));
+ PageGetItemId(current->page, toDelete[i]));
Assert(it->tupstate == SPGIST_LIVE);
/*
@@ -516,12 +516,12 @@ moveLeafs(Relation index, SpGistState *state,
leafptr += newLeafTuple->size;
/*
- * Now delete the old tuples, leaving a redirection pointer behind for
- * the first one, unless we're doing an index build; in which case there
- * can't be any concurrent scan so we need not provide a redirect.
+ * Now delete the old tuples, leaving a redirection pointer behind for the
+ * first one, unless we're doing an index build; in which case there can't
+ * be any concurrent scan so we need not provide a redirect.
*/
spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
- state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
+ state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
SPGIST_PLACEHOLDER,
nblkno, r);
@@ -575,7 +575,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
SpGistDeadTuple dt;
dt = (SpGistDeadTuple) PageGetItem(current->page,
- PageGetItemId(current->page, position));
+ PageGetItemId(current->page, position));
Assert(dt->tupstate == SPGIST_REDIRECT);
Assert(ItemPointerGetBlockNumber(&dt->pointer) == SPGIST_METAPAGE_BLKNO);
ItemPointerSet(&dt->pointer, blkno, offnum);
@@ -640,7 +640,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig,
/* The opclass may not use node labels, but if it does, duplicate 'em */
if (out->nodeLabels)
{
- Datum theLabel = out->nodeLabels[theNode];
+ Datum theLabel = out->nodeLabels[theNode];
out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes);
for (i = 0; i < out->nNodes; i++)
@@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
{
/*
* We are splitting the root (which up to now is also a leaf page).
- * Its tuples are not linked, so scan sequentially to get them all.
- * We ignore the original value of current->offnum.
+ * Its tuples are not linked, so scan sequentially to get them all. We
+ * ignore the original value of current->offnum.
*/
for (i = FirstOffsetNumber; i <= max; i++)
{
@@ -773,7 +773,7 @@ doPickSplit(Relation index, SpGistState *state,
/* we will delete the tuple altogether, so count full space */
spaceToDelete += it->size + sizeof(ItemIdData);
}
- else /* tuples on root should be live */
+ else /* tuples on root should be live */
elog(ERROR, "unexpected SPGiST tuple state: %d", it->tupstate);
}
}
@@ -820,7 +820,7 @@ doPickSplit(Relation index, SpGistState *state,
* We may not actually insert new tuple because another picksplit may be
* necessary due to too large value, but we will try to allocate enough
* space to include it; and in any case it has to be included in the input
- * for the picksplit function. So don't increment nToInsert yet.
+ * for the picksplit function. So don't increment nToInsert yet.
*/
in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@@ -878,7 +878,7 @@ doPickSplit(Relation index, SpGistState *state,
/*
* Check to see if the picksplit function failed to separate the values,
* ie, it put them all into the same child node. If so, select allTheSame
- * mode and create a random split instead. See comments for
+ * mode and create a random split instead. See comments for
* checkAllTheSame as to why we need to know if the new leaf tuples could
* fit on one page.
*/
@@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
innerTuple->allTheSame = allTheSame;
/*
- * Update nodes[] array to point into the newly formed innerTuple, so
- * that we can adjust their downlinks below.
+ * Update nodes[] array to point into the newly formed innerTuple, so that
+ * we can adjust their downlinks below.
*/
SGITITERATE(innerTuple, i, node)
{
@@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
- * To perform the split, we must insert a new inner tuple, which can't
- * go on a leaf page; and unless we are splitting the root page, we
- * must then update the parent tuple's downlink to point to the inner
- * tuple. If there is room, we'll put the new inner tuple on the same
- * page as the parent tuple, otherwise we need another non-leaf buffer.
- * But if the parent page is the root, we can't add the new inner tuple
- * there, because the root page must have only one inner tuple.
+ * To perform the split, we must insert a new inner tuple, which can't go
+ * on a leaf page; and unless we are splitting the root page, we must then
+ * update the parent tuple's downlink to point to the inner tuple. If
+ * there is room, we'll put the new inner tuple on the same page as the
+ * parent tuple, otherwise we need another non-leaf buffer. But if the
+ * parent page is the root, we can't add the new inner tuple there,
+ * because the root page must have only one inner tuple.
*/
xlrec.initInner = false;
if (parent->buffer != InvalidBuffer &&
@@ -965,9 +965,9 @@ doPickSplit(Relation index, SpGistState *state,
{
/* Send tuple to page with next triple parity (see README) */
newInnerBuffer = SpGistGetBuffer(index,
- GBUF_INNER_PARITY(parent->blkno + 1) |
+ GBUF_INNER_PARITY(parent->blkno + 1) |
(isNulls ? GBUF_NULLS : 0),
- innerTuple->size + sizeof(ItemIdData),
+ innerTuple->size + sizeof(ItemIdData),
&xlrec.initInner);
}
else
@@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
}
/*
- * Because a WAL record can't involve more than four buffers, we can
- * only afford to deal with two leaf pages in each picksplit action,
- * ie the current page and at most one other.
+ * Because a WAL record can't involve more than four buffers, we can only
+ * afford to deal with two leaf pages in each picksplit action, ie the
+ * current page and at most one other.
*
- * The new leaf tuples converted from the existing ones should require
- * the same or less space, and therefore should all fit onto one page
+ * The new leaf tuples converted from the existing ones should require the
+ * same or less space, and therefore should all fit onto one page
* (although that's not necessarily the current page, since we can't
* delete the old tuples but only replace them with placeholders).
- * However, the incoming new tuple might not also fit, in which case
- * we might need another picksplit cycle to reduce it some more.
+ * However, the incoming new tuple might not also fit, in which case we
+ * might need another picksplit cycle to reduce it some more.
*
- * If there's not room to put everything back onto the current page,
- * then we decide on a per-node basis which tuples go to the new page.
- * (We do it like that because leaf tuple chains can't cross pages,
- * so we must place all leaf tuples belonging to the same parent node
- * on the same page.)
+ * If there's not room to put everything back onto the current page, then
+ * we decide on a per-node basis which tuples go to the new page. (We do
+ * it like that because leaf tuple chains can't cross pages, so we must
+ * place all leaf tuples belonging to the same parent node on the same
+ * page.)
*
* If we are splitting the root page (turning it from a leaf page into an
* inner page), then no leaf tuples can go back to the current page; they
@@ -1037,12 +1037,13 @@ doPickSplit(Relation index, SpGistState *state,
int newspace;
newLeafBuffer = SpGistGetBuffer(index,
- GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
+ GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
Min(totalLeafSizes,
SPGIST_PAGE_CAPACITY),
&xlrec.initDest);
+
/*
- * Attempt to assign node groups to the two pages. We might fail to
+ * Attempt to assign node groups to the two pages. We might fail to
* do so, even if totalLeafSizes is less than the available space,
* because we can't split a group across pages.
*/
@@ -1054,12 +1055,12 @@ doPickSplit(Relation index, SpGistState *state,
{
if (leafSizes[i] <= curspace)
{
- nodePageSelect[i] = 0; /* signifies current page */
+ nodePageSelect[i] = 0; /* signifies current page */
curspace -= leafSizes[i];
}
else
{
- nodePageSelect[i] = 1; /* signifies new leaf page */
+ nodePageSelect[i] = 1; /* signifies new leaf page */
newspace -= leafSizes[i];
}
}
@@ -1075,7 +1076,7 @@ doPickSplit(Relation index, SpGistState *state,
else if (includeNew)
{
/* We must exclude the new leaf tuple from the split */
- int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
+ int nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
leafSizes[nodeOfNewTuple] -=
newLeafs[in.nTuples - 1]->size + sizeof(ItemIdData);
@@ -1087,12 +1088,12 @@ doPickSplit(Relation index, SpGistState *state,
{
if (leafSizes[i] <= curspace)
{
- nodePageSelect[i] = 0; /* signifies current page */
+ nodePageSelect[i] = 0; /* signifies current page */
curspace -= leafSizes[i];
}
else
{
- nodePageSelect[i] = 1; /* signifies new leaf page */
+ nodePageSelect[i] = 1; /* signifies new leaf page */
newspace -= leafSizes[i];
}
}
@@ -1204,7 +1205,7 @@ doPickSplit(Relation index, SpGistState *state,
for (i = 0; i < nToInsert; i++)
{
SpGistLeafTuple it = newLeafs[i];
- Buffer leafBuffer;
+ Buffer leafBuffer;
BlockNumber leafBlock;
OffsetNumber newoffset;
@@ -1584,12 +1585,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.nodeI = parent->node;
/*
- * obtain new buffer with the same parity as current, since it will
- * be a child of same parent tuple
+ * obtain new buffer with the same parity as current, since it will be
+ * a child of same parent tuple
*/
current->buffer = SpGistGetBuffer(index,
GBUF_INNER_PARITY(current->blkno),
- newInnerTuple->size + sizeof(ItemIdData),
+ newInnerTuple->size + sizeof(ItemIdData),
&xlrec.newPage);
current->blkno = BufferGetBlockNumber(current->buffer);
current->page = BufferGetPage(current->buffer);
@@ -1597,15 +1598,15 @@ spgAddNodeAction(Relation index, SpGistState *state,
xlrec.blknoNew = current->blkno;
/*
- * Let's just make real sure new current isn't same as old. Right
- * now that's impossible, but if SpGistGetBuffer ever got smart enough
- * to delete placeholder tuples before checking space, maybe it
- * wouldn't be impossible. The case would appear to work except that
- * WAL replay would be subtly wrong, so I think a mere assert isn't
- * enough here.
+ * Let's just make real sure new current isn't same as old. Right now
+ * that's impossible, but if SpGistGetBuffer ever got smart enough to
+ * delete placeholder tuples before checking space, maybe it wouldn't
+ * be impossible. The case would appear to work except that WAL
+ * replay would be subtly wrong, so I think a mere assert isn't enough
+ * here.
*/
- if (xlrec.blknoNew == xlrec.blkno)
- elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
+ if (xlrec.blknoNew == xlrec.blkno)
+ elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
/*
* New current and parent buffer will both be modified; but note that
@@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
Assert(!SpGistPageStoresNulls(current->page));
/*
- * Construct new prefix tuple, containing a single node with the
- * specified label. (We'll update the node's downlink to point to the
- * new postfix tuple, below.)
+ * Construct new prefix tuple, containing a single node with the specified
+ * label. (We'll update the node's downlink to point to the new postfix
+ * tuple, below.)
*/
node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
@@ -1888,9 +1889,9 @@ spgdoinsert(Relation index, SpGistState *state,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
(unsigned long) (leafSize - sizeof(ItemIdData)),
- (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
+ (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
RelationGetRelationName(index)),
- errhint("Values larger than a buffer page cannot be indexed.")));
+ errhint("Values larger than a buffer page cannot be indexed.")));
/* Initialize "current" to the appropriate root page */
current.blkno = isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO;
@@ -1920,7 +1921,7 @@ spgdoinsert(Relation index, SpGistState *state,
if (current.blkno == InvalidBlockNumber)
{
/*
- * Create a leaf page. If leafSize is too large to fit on a page,
+ * Create a leaf page. If leafSize is too large to fit on a page,
* we won't actually use the page yet, but it simplifies the API
* for doPickSplit to always have a leaf page at hand; so just
* quietly limit our request to a page size.
@@ -1968,7 +1969,7 @@ spgdoinsert(Relation index, SpGistState *state,
}
else if ((sizeToSplit =
checkSplitConditions(index, state, &current,
- &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
+ &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
nToSplit < 64 &&
leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY)
{
@@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
}
/*
- * Loop around and attempt to insert the new leafDatum
- * at "current" (which might reference an existing child
+ * Loop around and attempt to insert the new leafDatum at
+ * "current" (which might reference an existing child
* tuple, or might be invalid to force us to find a new
* page for the tuple).
*
@@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
out.result.addNode.nodeLabel);
/*
- * Retry insertion into the enlarged node. We assume
- * that we'll get a MatchNode result this time.
+ * Retry insertion into the enlarged node. We assume that
+ * we'll get a MatchNode result this time.
*/
goto process_inner_tuple;
break;
diff --git a/src/backend/access/spgist/spginsert.c b/src/backend/access/spgist/spginsert.c
index 8ff9245e17..456a71fbba 100644
--- a/src/backend/access/spgist/spginsert.c
+++ b/src/backend/access/spgist/spginsert.c
@@ -123,7 +123,7 @@ spgbuild(PG_FUNCTION_ARGS)
buildstate.spgstate.isBuild = true;
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
- "SP-GiST build temporary context",
+ "SP-GiST build temporary context",
ALLOCSET_DEFAULT_MINSIZE,
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
diff --git a/src/backend/access/spgist/spgkdtreeproc.c b/src/backend/access/spgist/spgkdtreeproc.c
index adfe287581..db472db9d6 100644
--- a/src/backend/access/spgist/spgkdtreeproc.c
+++ b/src/backend/access/spgist/spgkdtreeproc.c
@@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
/*
* Note: points that have coordinates exactly equal to coord may get
- * classified into either node, depending on where they happen to fall
- * in the sorted list. This is okay as long as the inner_consistent
- * function descends into both sides for such cases. This is better
- * than the alternative of trying to have an exact boundary, because
- * it keeps the tree balanced even when we have many instances of the
- * same point value. So we should never trigger the allTheSame logic.
+ * classified into either node, depending on where they happen to fall in
+ * the sorted list. This is okay as long as the inner_consistent function
+ * descends into both sides for such cases. This is better than the
+ * alternative of trying to have an exact boundary, because it keeps the
+ * tree balanced even when we have many instances of the same point value.
+ * So we should never trigger the allTheSame logic.
*/
for (i = 0; i < in->nTuples; i++)
{
diff --git a/src/backend/access/spgist/spgquadtreeproc.c b/src/backend/access/spgist/spgquadtreeproc.c
index 10fafe5864..5da265025e 100644
--- a/src/backend/access/spgist/spgquadtreeproc.c
+++ b/src/backend/access/spgist/spgquadtreeproc.c
@@ -253,8 +253,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
boxQuery = DatumGetBoxP(in->scankeys[i].sk_argument);
if (DatumGetBool(DirectFunctionCall2(box_contain_pt,
- PointerGetDatum(boxQuery),
- PointerGetDatum(centroid))))
+ PointerGetDatum(boxQuery),
+ PointerGetDatum(centroid))))
{
/* centroid is in box, so all quadrants are OK */
}
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 7a3a96230d..2a083b7c38 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -24,7 +24,7 @@
typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
- Datum leafValue, bool isnull, bool recheck);
+ Datum leafValue, bool isnull, bool recheck);
typedef struct ScanStackEntry
{
@@ -88,7 +88,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
if (so->want_itup)
{
/* Must pfree IndexTuples to avoid memory leak */
- int i;
+ int i;
for (i = 0; i < so->nPtrs; i++)
pfree(so->indexTups[i]);
@@ -102,7 +102,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
* Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
*
* The point here is to eliminate null-related considerations from what the
- * opclass consistent functions need to deal with. We assume all SPGiST-
+ * opclass consistent functions need to deal with. We assume all SPGiST-
* indexable operators are strict, so any null RHS value makes the scan
* condition unsatisfiable. We also pull out any IS NULL/IS NOT NULL
* conditions; their effect is reflected into searchNulls/searchNonNulls.
@@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
{
Relation rel = (Relation) PG_GETARG_POINTER(0);
int keysz = PG_GETARG_INT32(1);
+
/* ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2); */
IndexScanDesc scan;
SpGistScanOpaque so;
@@ -457,7 +458,7 @@ redirect:
MemoryContext oldCtx;
innerTuple = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, offset));
+ PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE)
{
@@ -522,7 +523,7 @@ redirect:
for (i = 0; i < out.nNodes; i++)
{
- int nodeN = out.nodeNumbers[i];
+ int nodeN = out.nodeNumbers[i];
Assert(nodeN >= 0 && nodeN < in.nNodes);
if (ItemPointerIsValid(&nodes[nodeN]->t_tid))
@@ -598,7 +599,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
if (so->want_itup)
{
/*
- * Reconstruct desired IndexTuple. We have to copy the datum out of
+ * Reconstruct desired IndexTuple. We have to copy the datum out of
* the temp context anyway, so we may as well create the tuple here.
*/
so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
@@ -636,7 +637,7 @@ spggettuple(PG_FUNCTION_ARGS)
if (so->want_itup)
{
/* Must pfree IndexTuples to avoid memory leak */
- int i;
+ int i;
for (i = 0; i < so->nPtrs; i++)
pfree(so->indexTups[i]);
diff --git a/src/backend/access/spgist/spgtextproc.c b/src/backend/access/spgist/spgtextproc.c
index 656015ea7e..520d7b24c5 100644
--- a/src/backend/access/spgist/spgtextproc.c
+++ b/src/backend/access/spgist/spgtextproc.c
@@ -26,7 +26,7 @@
* In the worst case, a inner tuple in a text suffix tree could have as many
* as 256 nodes (one for each possible byte value). Each node can take 16
* bytes on MAXALIGN=8 machines. The inner tuple must fit on an index page
- * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
+ * of size BLCKSZ. Rather than assuming we know the exact amount of overhead
* imposed by page headers, tuple headers, etc, we leave 100 bytes for that
* (the actual overhead should be no more than 56 bytes at this writing, so
* there is slop in this number). The upshot is that the maximum safe prefix
@@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
{
/*
* Descend to existing node. (If in->allTheSame, the core code will
- * ignore our nodeN specification here, but that's OK. We still
- * have to provide the correct levelAdd and restDatum values, and
- * those are the same regardless of which node gets chosen by core.)
+ * ignore our nodeN specification here, but that's OK. We still have
+ * to provide the correct levelAdd and restDatum values, and those are
+ * the same regardless of which node gets chosen by core.)
*/
out->resultType = spgMatchNode;
out->result.matchNode.nodeN = i;
@@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
else if (in->allTheSame)
{
/*
- * Can't use AddNode action, so split the tuple. The upper tuple
- * has the same prefix as before and uses an empty node label for
- * the lower tuple. The lower tuple has no prefix and the same
- * node labels as the original tuple.
+ * Can't use AddNode action, so split the tuple. The upper tuple has
+ * the same prefix as before and uses an empty node label for the
+ * lower tuple. The lower tuple has no prefix and the same node
+ * labels as the original tuple.
*/
out->resultType = spgSplitTuple;
out->result.splitTuple.prefixHasPrefix = in->hasPrefix;
@@ -315,13 +315,13 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
if (commonLen < VARSIZE_ANY_EXHDR(texti))
nodes[i].c = *(uint8 *) (VARDATA_ANY(texti) + commonLen);
else
- nodes[i].c = '\0'; /* use \0 if string is all common */
+ nodes[i].c = '\0'; /* use \0 if string is all common */
nodes[i].i = i;
nodes[i].d = in->datums[i];
}
/*
- * Sort by label bytes so that we can group the values into nodes. This
+ * Sort by label bytes so that we can group the values into nodes. This
* also ensures that the nodes are ordered by label value, allowing the
* use of binary search in searchChar.
*/
@@ -371,7 +371,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
/*
* Reconstruct values represented at this tuple, including parent data,
- * prefix of this tuple if any, and the node label if any. in->level
+ * prefix of this tuple if any, and the node label if any. in->level
* should be the length of the previously reconstructed value, and the
* number of bytes added here is prefixSize or prefixSize + 1.
*
@@ -381,7 +381,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
* long-format reconstructed values.
*/
Assert(in->level == 0 ? DatumGetPointer(in->reconstructedValue) == NULL :
- VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
+ VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
maxReconstrLen = in->level + 1;
if (in->hasPrefix)
@@ -530,7 +530,7 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
}
else
{
- text *fullText = palloc(VARHDRSZ + fullLen);
+ text *fullText = palloc(VARHDRSZ + fullLen);
SET_VARSIZE(fullText, VARHDRSZ + fullLen);
fullValue = VARDATA(fullText);
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 46a10f6a20..d56c2325fe 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When requesting an inner page, if we get one with the wrong parity,
* we just release the buffer and try again. We will get a different page
- * because GetFreeIndexPage will have marked the page used in FSM. The page
+ * because GetFreeIndexPage will have marked the page used in FSM. The page
* is entered in our local lastUsedPages cache, so there's some hope of
* making use of it later in this session, but otherwise we rely on VACUUM
* to eventually re-enter the page in FSM, making it available for recycling.
@@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
*
* When we return a buffer to the caller, the page is *not* entered into
* the lastUsedPages cache; we expect the caller will do so after it's taken
- * whatever space it will use. This is because after the caller has used up
+ * whatever space it will use. This is because after the caller has used up
* some space, the page might have less space than whatever was cached already
* so we'd rather not trash the old cache entry.
*/
@@ -275,7 +275,7 @@ allocNewBuffer(Relation index, int flags)
else
{
BlockNumber blkno = BufferGetBlockNumber(buffer);
- int blkFlags = GBUF_INNER_PARITY(blkno);
+ int blkFlags = GBUF_INNER_PARITY(blkno);
if ((flags & GBUF_PARITY_MASK) == blkFlags)
{
@@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
/*
* If possible, increase the space request to include relation's
- * fillfactor. This ensures that when we add unrelated tuples to a page,
+ * fillfactor. This ensures that when we add unrelated tuples to a page,
* we try to keep 100-fillfactor% available for adding tuples that are
* related to the ones already on it. But fillfactor mustn't cause an
* error for requests that would otherwise be legal.
@@ -664,7 +664,7 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix,
errmsg("SPGiST inner tuple size %lu exceeds maximum %lu",
(unsigned long) size,
(unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))),
- errhint("Values larger than a buffer page cannot be indexed.")));
+ errhint("Values larger than a buffer page cannot be indexed.")));
/*
* Check for overflow of header fields --- probably can't fail if the
@@ -801,7 +801,7 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size,
for (; i <= maxoff; i++)
{
SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page,
- PageGetItemId(page, i));
+ PageGetItemId(page, i));
if (it->tupstate == SPGIST_PLACEHOLDER)
{
diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c
index 856790ee2a..27b55170cb 100644
--- a/src/backend/access/spgist/spgvacuum.c
+++ b/src/backend/access/spgist/spgvacuum.c
@@ -31,8 +31,8 @@
/* Entry in pending-list of TIDs we need to revisit */
typedef struct spgVacPendingItem
{
- ItemPointerData tid; /* redirection target to visit */
- bool done; /* have we dealt with this? */
+ ItemPointerData tid; /* redirection target to visit */
+ bool done; /* have we dealt with this? */
struct spgVacPendingItem *next; /* list link */
} spgVacPendingItem;
@@ -46,10 +46,10 @@ typedef struct spgBulkDeleteState
void *callback_state;
/* Additional working state */
- SpGistState spgstate; /* for SPGiST operations that need one */
- spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
- TransactionId myXmin; /* for detecting newly-added redirects */
- TransactionId OldestXmin; /* for deciding a redirect is obsolete */
+ SpGistState spgstate; /* for SPGiST operations that need one */
+ spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
+ TransactionId myXmin; /* for detecting newly-added redirects */
+ TransactionId OldestXmin; /* for deciding a redirect is obsolete */
BlockNumber lastFilledBlock; /* last non-deletable block */
} spgBulkDeleteState;
@@ -213,7 +213,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
* Figure out exactly what we have to do. We do this separately from
* actually modifying the page, mainly so that we have a representation
* that can be dumped into WAL and then the replay code can do exactly
- * the same thing. The output of this step consists of six arrays
+ * the same thing. The output of this step consists of six arrays
* describing four kinds of operations, to be performed in this order:
*
* toDead[]: tuple numbers to be replaced with DEAD tuples
@@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else if (prevLive == InvalidOffsetNumber)
{
/*
- * This is the first live tuple in the chain. It has
- * to move to the head position.
+ * This is the first live tuple in the chain. It has to move
+ * to the head position.
*/
moveSrc[xlrec.nMove] = j;
moveDest[xlrec.nMove] = i;
@@ -289,7 +289,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
else
{
/*
- * Second or later live tuple. Arrange to re-chain it to the
+ * Second or later live tuple. Arrange to re-chain it to the
* previous live one, if there was a gap.
*/
if (interveningDeletable)
@@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
InvalidBlockNumber, InvalidOffsetNumber);
/*
- * We implement the move step by swapping the item pointers of the
- * source and target tuples, then replacing the newly-source tuples
- * with placeholders. This is perhaps unduly friendly with the page
- * data representation, but it's fast and doesn't risk page overflow
- * when a tuple to be relocated is large.
+ * We implement the move step by swapping the item pointers of the source
+ * and target tuples, then replacing the newly-source tuples with
+ * placeholders. This is perhaps unduly friendly with the page data
+ * representation, but it's fast and doesn't risk page overflow when a
+ * tuple to be relocated is large.
*/
for (i = 0; i < xlrec.nMove; i++)
{
@@ -518,7 +518,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer,
*/
for (i = max;
i >= FirstOffsetNumber &&
- (opaque->nRedirection > 0 || !hasNonPlaceholder);
+ (opaque->nRedirection > 0 || !hasNonPlaceholder);
i--)
{
SpGistDeadTuple dt;
@@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
/*
* The root pages must never be deleted, nor marked as available in FSM,
- * because we don't want them ever returned by a search for a place to
- * put a new tuple. Otherwise, check for empty/deletable page, and
- * make sure FSM knows about it.
+ * because we don't want them ever returned by a search for a place to put
+ * a new tuple. Otherwise, check for empty/deletable page, and make sure
+ * FSM knows about it.
*/
if (!SpGistBlockIsRoot(blkno))
{
@@ -688,7 +688,7 @@ spgprocesspending(spgBulkDeleteState *bds)
Relation index = bds->info->index;
spgVacPendingItem *pitem;
spgVacPendingItem *nitem;
- BlockNumber blkno;
+ BlockNumber blkno;
Buffer buffer;
Page page;
@@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
else
{
/*
- * On an inner page, visit the referenced inner tuple and add
- * all its downlinks to the pending list. We might have pending
- * items for more than one inner tuple on the same page (in fact
- * this is pretty likely given the way space allocation works),
- * so get them all while we are here.
+ * On an inner page, visit the referenced inner tuple and add all
+ * its downlinks to the pending list. We might have pending items
+ * for more than one inner tuple on the same page (in fact this is
+ * pretty likely given the way space allocation works), so get
+ * them all while we are here.
*/
for (nitem = pitem; nitem != NULL; nitem = nitem->next)
{
@@ -774,7 +774,7 @@ spgprocesspending(spgBulkDeleteState *bds)
{
/* transfer attention to redirect point */
spgAddPendingTID(bds,
- &((SpGistDeadTuple) innerTuple)->pointer);
+ &((SpGistDeadTuple) innerTuple)->pointer);
}
else
elog(ERROR, "unexpected SPGiST tuple state: %d",
@@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
* physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
- * delete some deletable tuples. See more extensive comments about
- * this in btvacuumscan().
+ * delete some deletable tuples. See more extensive comments about this
+ * in btvacuumscan().
*/
blkno = SPGIST_METAPAGE_BLKNO + 1;
for (;;)
diff --git a/src/backend/access/spgist/spgxlog.c b/src/backend/access/spgist/spgxlog.c
index 8e87e2adc9..82f8c8b978 100644
--- a/src/backend/access/spgist/spgxlog.c
+++ b/src/backend/access/spgist/spgxlog.c
@@ -40,7 +40,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
}
/*
- * Add a leaf tuple, or replace an existing placeholder tuple. This is used
+ * Add a leaf tuple, or replace an existing placeholder tuple. This is used
* to replay SpGistPageAddNewItem() operations. If the offset points at an
* existing tuple, it had better be a placeholder tuple.
*/
@@ -50,7 +50,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset)
if (offset <= PageGetMaxOffsetNumber(page))
{
SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page,
- PageGetItemId(page, offset));
+ PageGetItemId(page, offset));
if (dt->tupstate != SPGIST_PLACEHOLDER)
elog(ERROR, "SPGiST tuple to be replaced is not a placeholder");
@@ -126,7 +126,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer,
- SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+ SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -143,7 +143,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistLeafTuple head;
head = (SpGistLeafTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumHeadLeaf));
+ PageGetItemId(page, xldata->offnumHeadLeaf));
Assert(head->nextOffset == leafTuple->nextOffset);
head->nextOffset = xldata->offnumLeaf;
}
@@ -154,7 +154,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
PageIndexTupleDelete(page, xldata->offnumLeaf);
if (PageAddItem(page,
(Item) leafTuple, leafTuple->size,
- xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
+ xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
leafTuple->size);
}
@@ -180,7 +180,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple tuple;
tuple = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumParent));
+ PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(tuple, xldata->nodeI,
xldata->blknoLeaf, xldata->offnumLeaf);
@@ -229,7 +229,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer,
- SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+ SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
{
@@ -261,7 +261,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (!XLByteLE(lsn, PageGetLSN(page)))
{
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
- state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
+ state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
SPGIST_PLACEHOLDER,
xldata->blknoDst,
toInsert[nInsert - 1]);
@@ -286,7 +286,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple tuple;
tuple = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumParent));
+ PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(tuple, xldata->nodeI,
xldata->blknoDst, toInsert[nInsert - 1]);
@@ -413,7 +413,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
}
/*
- * Update parent downlink. Since parent could be in either of the
+ * Update parent downlink. Since parent could be in either of the
* previous two buffers, it's a bit tricky to determine which BKP bit
* applies.
*/
@@ -435,7 +435,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple innerTuple;
innerTuple = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumParent));
+ PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(innerTuple, xldata->nodeI,
xldata->blknoNew, xldata->offnumNew);
@@ -504,7 +504,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
- xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
+ xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
elog(ERROR, "failed to add item of size %u to SPGiST index page",
prefixTuple->size);
@@ -571,7 +571,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(srcBuffer);
SpGistInitBuffer(srcBuffer,
- SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+ SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
/* don't update LSN etc till we're done with it */
}
else
@@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
{
/*
* We have it a bit easier here than in doPickSplit(),
- * because we know the inner tuple's location already,
- * so we can inject the correct redirection tuple now.
+ * because we know the inner tuple's location already, so
+ * we can inject the correct redirection tuple now.
*/
if (!state.isBuild)
spgPageIndexMultiDelete(&state, page,
@@ -627,7 +627,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(destBuffer);
SpGistInitBuffer(destBuffer,
- SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+ SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
/* don't update LSN etc till we're done with it */
}
else
@@ -707,9 +707,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple parent;
parent = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumParent));
+ PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(parent, xldata->nodeI,
- xldata->blknoInner, xldata->offnumInner);
+ xldata->blknoInner, xldata->offnumInner);
}
PageSetLSN(page, lsn);
@@ -742,9 +742,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
SpGistInnerTuple parent;
parent = (SpGistInnerTuple) PageGetItem(page,
- PageGetItemId(page, xldata->offnumParent));
+ PageGetItemId(page, xldata->offnumParent));
spgUpdateNodeLink(parent, xldata->nodeI,
- xldata->blknoInner, xldata->offnumInner);
+ xldata->blknoInner, xldata->offnumInner);
PageSetLSN(page, lsn);
PageSetTLI(page, ThisTimeLineID);
@@ -803,7 +803,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
spgPageIndexMultiDelete(&state, page,
toPlaceholder, xldata->nPlaceholder,
- SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+ SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
InvalidOffsetNumber);
@@ -821,7 +821,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
spgPageIndexMultiDelete(&state, page,
moveSrc, xldata->nMove,
- SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+ SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
InvalidBlockNumber,
InvalidOffsetNumber);
@@ -906,7 +906,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
SpGistDeadTuple dt;
dt = (SpGistDeadTuple) PageGetItem(page,
- PageGetItemId(page, itemToPlaceholder[i]));
+ PageGetItemId(page, itemToPlaceholder[i]));
Assert(dt->tupstate == SPGIST_REDIRECT);
dt->tupstate = SPGIST_PLACEHOLDER;
ItemPointerSetInvalid(&dt->pointer);
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 33b5ca2d36..7f2f6921d5 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
* Testing during the PostgreSQL 9.2 development cycle revealed that on a
* large multi-processor system, it was possible to have more CLOG page
* requests in flight at one time than the numebr of CLOG buffers which existed
- * at that time, which was hardcoded to 8. Further testing revealed that
+ * at that time, which was hardcoded to 8. Further testing revealed that
* performance dropped off with more than 32 CLOG buffers, possibly because
* the linear buffer search algorithm doesn't scale well.
*
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index a8e3f19119..dd69c232eb 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -903,12 +903,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
int slotno;
int cur_count;
- int bestvalidslot = 0; /* keep compiler quiet */
+ int bestvalidslot = 0; /* keep compiler quiet */
int best_valid_delta = -1;
- int best_valid_page_number = 0; /* keep compiler quiet */
- int bestinvalidslot = 0; /* keep compiler quiet */
+ int best_valid_page_number = 0; /* keep compiler quiet */
+ int bestinvalidslot = 0; /* keep compiler quiet */
int best_invalid_delta = -1;
- int best_invalid_page_number = 0; /* keep compiler quiet */
+ int best_invalid_page_number = 0; /* keep compiler quiet */
/* See if page already has a buffer assigned */
for (slotno = 0; slotno < shared->num_slots; slotno++)
@@ -920,15 +920,15 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If we find any EMPTY slot, just select that one. Else choose a
- * victim page to replace. We normally take the least recently used
+ * victim page to replace. We normally take the least recently used
* valid page, but we will never take the slot containing
- * latest_page_number, even if it appears least recently used. We
+ * latest_page_number, even if it appears least recently used. We
* will select a slot that is already I/O busy only if there is no
* other choice: a read-busy slot will not be least recently used once
* the read finishes, and waiting for an I/O on a write-busy slot is
* inferior to just picking some other slot. Testing shows the slot
- * we pick instead will often be clean, allowing us to begin a read
- * at once.
+ * we pick instead will often be clean, allowing us to begin a read at
+ * once.
*
* Normally the page_lru_count values will all be different and so
* there will be a well-defined LRU page. But since we allow
@@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* If all pages (except possibly the latest one) are I/O busy, we'll
- * have to wait for an I/O to complete and then retry. In that unhappy
- * case, we choose to wait for the I/O on the least recently used slot,
- * on the assumption that it was likely initiated first of all the I/Os
- * in progress and may therefore finish first.
+ * have to wait for an I/O to complete and then retry. In that
+ * unhappy case, we choose to wait for the I/O on the least recently
+ * used slot, on the assumption that it was likely initiated first of
+ * all the I/Os in progress and may therefore finish first.
*/
if (best_valid_delta < 0)
{
@@ -1168,20 +1168,20 @@ restart:;
/*
* SlruScanDirectory callback
- * This callback reports true if there's any segment prior to the one
- * containing the page passed as "data".
+ * This callback reports true if there's any segment prior to the one
+ * containing the page passed as "data".
*/
bool
SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
{
- int cutoffPage = *(int *) data;
+ int cutoffPage = *(int *) data;
cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
if (ctl->PagePrecedes(segpage, cutoffPage))
- return true; /* found one; don't iterate any more */
+ return true; /* found one; don't iterate any more */
- return false; /* keep going */
+ return false; /* keep going */
}
/*
@@ -1191,8 +1191,8 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data
static bool
SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
{
- char path[MAXPGPATH];
- int cutoffPage = *(int *) data;
+ char path[MAXPGPATH];
+ int cutoffPage = *(int *) data;
if (ctl->PagePrecedes(segpage, cutoffPage))
{
@@ -1202,7 +1202,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
unlink(path);
}
- return false; /* keep going */
+ return false; /* keep going */
}
/*
@@ -1212,14 +1212,14 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
bool
SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
ereport(DEBUG2,
(errmsg("removing file \"%s\"", path)));
unlink(path);
- return false; /* keep going */
+ return false; /* keep going */
}
/*
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 0b41a76a32..b94fae3740 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -360,8 +360,9 @@ static void
GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
TransactionId *children)
{
- PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+
/* We need no extra lock since the GXACT isn't valid yet */
if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
{
@@ -410,7 +411,7 @@ LockGXact(const char *gid, Oid user)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+ PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
/* Ignore not-yet-valid GIDs */
if (!gxact->valid)
@@ -523,7 +524,7 @@ TransactionIdIsPrepared(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid && pgxact->xid == xid)
{
@@ -648,8 +649,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
while (status->array != NULL && status->currIdx < status->ngxacts)
{
GlobalTransaction gxact = &status->array[status->currIdx++];
- PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
Datum values[5];
bool nulls[5];
HeapTuple tuple;
@@ -719,7 +720,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (pgxact->xid == xid)
{
@@ -850,8 +851,8 @@ save_state_data(const void *data, uint32 len)
void
StartPrepare(GlobalTransaction gxact)
{
- PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
TransactionId xid = pgxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
@@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
errmsg("could not close two-phase state file: %m")));
/*
- * Mark the prepared transaction as valid. As soon as xact.c marks MyPgXact
- * as not running our XID (which it will do immediately after this
- * function returns), others can commit/rollback the xact.
+ * Mark the prepared transaction as valid. As soon as xact.c marks
+ * MyPgXact as not running our XID (which it will do immediately after
+ * this function returns), others can commit/rollback the xact.
*
* NB: a side effect of this is to make a dummy ProcArray entry for the
* prepared XID. This must happen before we clear the XID from MyPgXact,
@@ -1551,7 +1552,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+ PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid &&
XLByteLE(gxact->prepare_lsn, redo_horizon))
@@ -1707,7 +1708,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* XID, and they may force us to advance nextXid.
*
* We don't expect anyone else to modify nextXid, hence we don't
- * need to hold a lock while examining it. We still acquire the
+ * need to hold a lock while examining it. We still acquire the
* lock to modify it, though.
*/
subxids = (TransactionId *)
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 892a46abc3..7abf9343be 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
* latestCompletedXid is present in the ProcArray, which is essential for
* correct OldestXmin tracking; see src/backend/access/transam/README.
*
- * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are
- * relying on fetch/store of an xid to be atomic, else other backends
+ * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
+ * are relying on fetch/store of an xid to be atomic, else other backends
* might see a partially-set xid here. But holding both locks at once
* would be a nasty concurrency hit. So for now, assume atomicity.
*
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 659b53524c..8f00186dd7 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
XLogRecData rdata[4];
int lastrdata = 0;
xl_xact_commit xlrec;
+
/*
* Set flags required for recovery processing of commits.
*/
@@ -1073,7 +1074,8 @@ RecordTransactionCommit(void)
{
XLogRecData rdata[2];
int lastrdata = 0;
- xl_xact_commit_compact xlrec;
+ xl_xact_commit_compact xlrec;
+
xlrec.xact_time = xactStopTimestamp;
xlrec.nsubxacts = nchildren;
rdata[0].data = (char *) (&xlrec);
@@ -2102,7 +2104,7 @@ PrepareTransaction(void)
if (XactHasExportedSnapshots())
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot PREPARE a transaction that has exported snapshots")));
+ errmsg("cannot PREPARE a transaction that has exported snapshots")));
/* Prevent cancel/die interrupt while cleaning up */
HOLD_INTERRUPTS();
@@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
break;
/*
- * We were issued a RELEASE command, so we end the
- * current subtransaction and return to the parent transaction.
- * The parent might be ended too, so repeat till we find an
- * INPROGRESS transaction or subtransaction.
+ * We were issued a RELEASE command, so we end the current
+ * subtransaction and return to the parent transaction. The parent
+ * might be ended too, so repeat till we find an INPROGRESS
+ * transaction or subtransaction.
*/
case TBLOCK_SUBRELEASE:
do
@@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
* hierarchy and perform final commit. We do this by rolling up
* any subtransactions into their parent, which leads to O(N^2)
* operations with respect to resource owners - this isn't that
- * bad until we approach a thousands of savepoints but is necessary
- * for correctness should after triggers create new resource
- * owners.
+ * bad until we approach a thousands of savepoints but is
+ * necessary for correctness should after triggers create new
+ * resource owners.
*/
case TBLOCK_SUBCOMMIT:
do
@@ -4551,11 +4553,11 @@ xactGetCommittedChildren(TransactionId **ptr)
*/
static void
xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
- TransactionId *sub_xids, int nsubxacts,
- SharedInvalidationMessage *inval_msgs, int nmsgs,
- RelFileNode *xnodes, int nrels,
- Oid dbId, Oid tsId,
- uint32 xinfo)
+ TransactionId *sub_xids, int nsubxacts,
+ SharedInvalidationMessage *inval_msgs, int nmsgs,
+ RelFileNode *xnodes, int nrels,
+ Oid dbId, Oid tsId,
+ uint32 xinfo)
{
TransactionId max_xid;
int i;
@@ -4659,12 +4661,13 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
XLogFlush(lsn);
}
+
/*
* Utility function to call xact_redo_commit_internal after breaking down xlrec
*/
static void
xact_redo_commit(xl_xact_commit *xlrec,
- TransactionId xid, XLogRecPtr lsn)
+ TransactionId xid, XLogRecPtr lsn)
{
TransactionId *subxacts;
SharedInvalidationMessage *inval_msgs;
@@ -4675,11 +4678,11 @@ xact_redo_commit(xl_xact_commit *xlrec,
inval_msgs = (SharedInvalidationMessage *) &(subxacts[xlrec->nsubxacts]);
xact_redo_commit_internal(xid, lsn, subxacts, xlrec->nsubxacts,
- inval_msgs, xlrec->nmsgs,
- xlrec->xnodes, xlrec->nrels,
- xlrec->dbId,
- xlrec->tsId,
- xlrec->xinfo);
+ inval_msgs, xlrec->nmsgs,
+ xlrec->xnodes, xlrec->nrels,
+ xlrec->dbId,
+ xlrec->tsId,
+ xlrec->xinfo);
}
/*
@@ -4687,14 +4690,14 @@ xact_redo_commit(xl_xact_commit *xlrec,
*/
static void
xact_redo_commit_compact(xl_xact_commit_compact *xlrec,
- TransactionId xid, XLogRecPtr lsn)
+ TransactionId xid, XLogRecPtr lsn)
{
xact_redo_commit_internal(xid, lsn, xlrec->subxacts, xlrec->nsubxacts,
- NULL, 0, /* inval msgs */
- NULL, 0, /* relfilenodes */
- InvalidOid, /* dbId */
- InvalidOid, /* tsId */
- 0); /* xinfo */
+ NULL, 0, /* inval msgs */
+ NULL, 0, /* relfilenodes */
+ InvalidOid, /* dbId */
+ InvalidOid, /* tsId */
+ 0); /* xinfo */
}
/*
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 0f2678cfda..bcb71c45b2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
/*
* fullPageWrites is the master copy used by all backends to determine
- * whether to write full-page to WAL, instead of using process-local
- * one. This is required because, when full_page_writes is changed
- * by SIGHUP, we must WAL-log it before it actually affects
- * WAL-logging by backends. Checkpointer sets at startup or after SIGHUP.
+ * whether to write full-page to WAL, instead of using process-local one.
+ * This is required because, when full_page_writes is changed by SIGHUP,
+ * we must WAL-log it before it actually affects WAL-logging by backends.
+ * Checkpointer sets at startup or after SIGHUP.
*/
bool fullPageWrites;
@@ -455,8 +455,11 @@ typedef struct XLogCtlData
XLogRecPtr recoveryLastRecPtr;
/* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
TimestampTz recoveryLastXTime;
- /* timestamp of when we started replaying the current chunk of WAL data,
- * only relevant for replication or archive recovery */
+
+ /*
+ * timestamp of when we started replaying the current chunk of WAL data,
+ * only relevant for replication or archive recovery
+ */
TimestampTz currentChunkStartTime;
/* end of the last record restored from the archive */
XLogRecPtr restoreLastRecPtr;
@@ -580,7 +583,7 @@ static bool updateMinRecoveryPoint = true;
* to replay all the WAL, so reachedConsistency is never set. During archive
* recovery, the database is consistent once minRecoveryPoint is reached.
*/
-bool reachedConsistency = false;
+bool reachedConsistency = false;
static bool InRedo = false;
@@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* insert lock, but it seems better to avoid doing CRC calculations while
* holding the lock.
*
- * We add entries for backup blocks to the chain, so that they don't
- * need any special treatment in the critical section where the chunks are
+ * We add entries for backup blocks to the chain, so that they don't need
+ * any special treatment in the critical section where the chunks are
* copied into the WAL buffers. Those entries have to be unlinked from the
* chain if we have to loop back here.
*/
@@ -896,10 +899,10 @@ begin:;
/*
* Calculate CRC of the data, including all the backup blocks
*
- * Note that the record header isn't added into the CRC initially since
- * we don't know the prev-link yet. Thus, the CRC will represent the CRC
- * of the whole record in the order: rdata, then backup blocks, then
- * record header.
+ * Note that the record header isn't added into the CRC initially since we
+ * don't know the prev-link yet. Thus, the CRC will represent the CRC of
+ * the whole record in the order: rdata, then backup blocks, then record
+ * header.
*/
INIT_CRC32(rdata_crc);
for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@@ -948,10 +951,10 @@ begin:;
}
/*
- * Also check to see if fullPageWrites or forcePageWrites was just turned on;
- * if we weren't already doing full-page writes then go back and recompute.
- * (If it was just turned off, we could recompute the record without full pages,
- * but we choose not to bother.)
+ * Also check to see if fullPageWrites or forcePageWrites was just turned
+ * on; if we weren't already doing full-page writes then go back and
+ * recompute. (If it was just turned off, we could recompute the record
+ * without full pages, but we choose not to bother.)
*/
if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
{
@@ -1575,15 +1578,15 @@ AdvanceXLInsertBuffer(bool new_segment)
* WAL records beginning in this page have removable backup blocks. This
* allows the WAL archiver to know whether it is safe to compress archived
* WAL data by transforming full-block records into the non-full-block
- * format. It is sufficient to record this at the page level because we
+ * format. It is sufficient to record this at the page level because we
* force a page switch (in fact a segment switch) when starting a backup,
* so the flag will be off before any records can be written during the
- * backup. At the end of a backup, the last page will be marked as all
+ * backup. At the end of a backup, the last page will be marked as all
* unsafe when perhaps only part is unsafe, but at worst the archiver
* would miss the opportunity to compress a few records.
*/
if (!Insert->forcePageWrites)
- NewPage->xlp_info |= XLP_BKP_REMOVABLE;
+ NewPage ->xlp_info |= XLP_BKP_REMOVABLE;
/*
* If first page of an XLOG segment file, make it a long header.
@@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
Write->lastSegSwitchTime = (pg_time_t) time(NULL);
/*
- * Request a checkpoint if we've consumed too
- * much xlog since the last one. For speed, we first check
- * using the local copy of RedoRecPtr, which might be out of
- * date; if it looks like a checkpoint is needed, forcibly
- * update RedoRecPtr and recheck.
+ * Request a checkpoint if we've consumed too much xlog since
+ * the last one. For speed, we first check using the local
+ * copy of RedoRecPtr, which might be out of date; if it looks
+ * like a checkpoint is needed, forcibly update RedoRecPtr and
+ * recheck.
*/
if (IsUnderPostmaster &&
XLogCheckpointNeeded(openLogId, openLogSeg))
@@ -1931,7 +1934,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
/*
* If the WALWriter is sleeping, we should kick it to make it come out of
- * low-power mode. Otherwise, determine whether there's a full page of
+ * low-power mode. Otherwise, determine whether there's a full page of
* WAL available to write.
*/
if (!sleeping)
@@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
}
/*
- * Nudge the WALWriter: it has a full page of WAL to write, or we want
- * it to come out of low-power mode so that this async commit will reach
- * disk within the expected amount of time.
+ * Nudge the WALWriter: it has a full page of WAL to write, or we want it
+ * to come out of low-power mode so that this async commit will reach disk
+ * within the expected amount of time.
*/
if (ProcGlobal->walwriterLatch)
SetLatch(ProcGlobal->walwriterLatch);
@@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
WriteRqstPtr = record;
/*
- * Now wait until we get the write lock, or someone else does the
- * flush for us.
+ * Now wait until we get the write lock, or someone else does the flush
+ * for us.
*/
for (;;)
{
@@ -2182,7 +2185,7 @@ XLogFlush(XLogRecPtr record)
* block, and flush through the latest one of those. Thus, if async commits
* are not being used, we will flush complete blocks only. We can guarantee
* that async commits reach disk after at most three cycles; normally only
- * one or two. (When flushing complete blocks, we allow XLogWrite to write
+ * one or two. (When flushing complete blocks, we allow XLogWrite to write
* "flexibly", meaning it can stop at the end of the buffer ring; this makes a
* difference only with very high load or long wal_writer_delay, but imposes
* one extra cycle for the worst case for async commits.)
@@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
/*
* If we wrote something then we have something to send to standbys also,
- * otherwise the replication delay become around 7s with just async commit.
+ * otherwise the replication delay become around 7s with just async
+ * commit.
*/
if (wrote_something)
WalSndWakeup();
@@ -2776,17 +2780,17 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
}
/*
- * If the segment was fetched from archival storage, replace
- * the existing xlog segment (if any) with the archival version.
+ * If the segment was fetched from archival storage, replace the existing
+ * xlog segment (if any) with the archival version.
*/
if (source == XLOG_FROM_ARCHIVE)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr endptr;
- char xlogfpath[MAXPGPATH];
- bool reload = false;
- struct stat statbuf;
+ XLogRecPtr endptr;
+ char xlogfpath[MAXPGPATH];
+ bool reload = false;
+ struct stat statbuf;
XLogFilePath(xlogfpath, tli, log, seg);
if (stat(xlogfpath, &statbuf) == 0)
@@ -2801,9 +2805,9 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
if (rename(path, xlogfpath) < 0)
ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not rename file \"%s\" to \"%s\": %m",
- path, xlogfpath)));
+ (errcode_for_file_access(),
+ errmsg("could not rename file \"%s\" to \"%s\": %m",
+ path, xlogfpath)));
/*
* If the existing segment was replaced, since walsenders might have
@@ -3812,7 +3816,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt)
RecPtr = &tmpRecPtr;
/*
- * RecPtr is pointing to end+1 of the previous WAL record. We must
+ * RecPtr is pointing to end+1 of the previous WAL record. We must
* advance it if necessary to where the next record starts. First,
* align to next page if no more records can fit on the current page.
*/
@@ -5389,10 +5393,10 @@ readRecoveryCommandFile(void)
}
if (rtli)
ereport(DEBUG2,
- (errmsg_internal("recovery_target_timeline = %u", rtli)));
+ (errmsg_internal("recovery_target_timeline = %u", rtli)));
else
ereport(DEBUG2,
- (errmsg_internal("recovery_target_timeline = latest")));
+ (errmsg_internal("recovery_target_timeline = latest")));
}
else if (strcmp(item->name, "recovery_target_xid") == 0)
{
@@ -5404,7 +5408,7 @@ readRecoveryCommandFile(void)
item->value)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_xid = %u",
- recoveryTargetXid)));
+ recoveryTargetXid)));
recoveryTarget = RECOVERY_TARGET_XID;
}
else if (strcmp(item->name, "recovery_target_time") == 0)
@@ -5428,7 +5432,7 @@ readRecoveryCommandFile(void)
Int32GetDatum(-1)));
ereport(DEBUG2,
(errmsg_internal("recovery_target_time = '%s'",
- timestamptz_to_str(recoveryTargetTime))));
+ timestamptz_to_str(recoveryTargetTime))));
}
else if (strcmp(item->name, "recovery_target_name") == 0)
{
@@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
}
/*
- * If we are establishing a new timeline, we have to copy data from
- * the last WAL segment of the old timeline to create a starting WAL
- * segment for the new timeline.
+ * If we are establishing a new timeline, we have to copy data from the
+ * last WAL segment of the old timeline to create a starting WAL segment
+ * for the new timeline.
*
- * Notify the archiver that the last WAL segment of the old timeline
- * is ready to copy to archival storage. Otherwise, it is not archived
- * for a while.
+ * Notify the archiver that the last WAL segment of the old timeline is
+ * ready to copy to archival storage. Otherwise, it is not archived for a
+ * while.
*/
if (endTLI != ThisTimeLineID)
{
@@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
XLogArchiveCleanup(xlogpath);
/*
- * Since there might be a partial WAL segment named RECOVERYXLOG,
- * get rid of it.
+ * Since there might be a partial WAL segment named RECOVERYXLOG, get rid
+ * of it.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
unlink(recoveryPath); /* ignore any error */
@@ -6323,11 +6327,11 @@ StartupXLOG(void)
/*
* Set backupStartPoint if we're starting recovery from a base backup.
*
- * Set backupEndPoint and use minRecoveryPoint as the backup end location
- * if we're starting recovery from a base backup which was taken from
- * the standby. In this case, the database system status in pg_control must
- * indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup
- * is corrupted, so we cancel recovery.
+ * Set backupEndPoint and use minRecoveryPoint as the backup end
+ * location if we're starting recovery from a base backup which was
+ * taken from the standby. In this case, the database system status in
+ * pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
+ * means that backup is corrupted, so we cancel recovery.
*/
if (haveBackupLabel)
{
@@ -6340,7 +6344,7 @@ StartupXLOG(void)
ereport(FATAL,
(errmsg("backup_label contains inconsistent data with control file"),
errhint("This means that the backup is corrupted and you will "
- "have to use another backup for recovery.")));
+ "have to use another backup for recovery.")));
ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
}
}
@@ -6383,15 +6387,15 @@ StartupXLOG(void)
/*
* We're in recovery, so unlogged relations may be trashed and must be
- * reset. This should be done BEFORE allowing Hot Standby connections,
- * so that read-only backends don't try to read whatever garbage is
- * left over from before.
+ * reset. This should be done BEFORE allowing Hot Standby
+ * connections, so that read-only backends don't try to read whatever
+ * garbage is left over from before.
*/
ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
/*
- * Likewise, delete any saved transaction snapshot files that got
- * left behind by crashed backends.
+ * Likewise, delete any saved transaction snapshot files that got left
+ * behind by crashed backends.
*/
DeleteAllExportedSnapshotFiles();
@@ -6489,10 +6493,11 @@ StartupXLOG(void)
/*
* Let postmaster know we've started redo now, so that it can launch
- * checkpointer to perform restartpoints. We don't bother during crash
- * recovery as restartpoints can only be performed during archive
- * recovery. And we'd like to keep crash recovery simple, to avoid
- * introducing bugs that could affect you when recovering after crash.
+ * checkpointer to perform restartpoints. We don't bother during
+ * crash recovery as restartpoints can only be performed during
+ * archive recovery. And we'd like to keep crash recovery simple, to
+ * avoid introducing bugs that could affect you when recovering after
+ * crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
@@ -6649,8 +6654,8 @@ StartupXLOG(void)
{
/*
* We have reached the end of base backup, the point where
- * the minimum recovery point in pg_control indicates.
- * The data on disk is now consistent. Reset backupStartPoint
+ * the minimum recovery point in pg_control indicates. The
+ * data on disk is now consistent. Reset backupStartPoint
* and backupEndPoint.
*/
elog(DEBUG1, "end of backup reached");
@@ -6863,9 +6868,9 @@ StartupXLOG(void)
oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
/*
- * Update full_page_writes in shared memory and write an
- * XLOG_FPW_CHANGE record before resource manager writes cleanup
- * WAL records or checkpoint record is written.
+ * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
+ * record before resource manager writes cleanup WAL records or checkpoint
+ * record is written.
*/
Insert->fullPageWrites = lastFullPageWrites;
LocalSetXLogInsertAllowed();
@@ -6954,8 +6959,8 @@ StartupXLOG(void)
LWLockRelease(ProcArrayLock);
/*
- * Start up the commit log and subtrans, if not already done for
- * hot standby.
+ * Start up the commit log and subtrans, if not already done for hot
+ * standby.
*/
if (standbyState == STANDBY_DISABLED)
{
@@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
checkPoint.time = (pg_time_t) time(NULL);
/*
- * For Hot Standby, derive the oldestActiveXid before we fix the redo pointer.
- * This allows us to begin accumulating changes to assemble our starting
- * snapshot of locks and transactions.
+ * For Hot Standby, derive the oldestActiveXid before we fix the redo
+ * pointer. This allows us to begin accumulating changes to assemble our
+ * starting snapshot of locks and transactions.
*/
if (!shutdown && XLogStandbyInfoActive())
checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@@ -8062,7 +8067,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
volatile XLogCtlData *xlogctl = XLogCtl;
/*
- * Is it safe to restartpoint? We must ask each of the resource managers
+ * Is it safe to restartpoint? We must ask each of the resource managers
* whether they have any partial state information that might prevent a
* correct restart from this point. If so, we skip this opportunity, but
* return at the next checkpoint record for another try.
@@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
- * Also refrain from creating a restartpoint if we have seen any references
- * to non-existent pages. Restarting recovery from the restartpoint would
- * not see the references, so we would lose the cross-check that the pages
- * belonged to a relation that was dropped later.
+ * Also refrain from creating a restartpoint if we have seen any
+ * references to non-existent pages. Restarting recovery from the
+ * restartpoint would not see the references, so we would lose the
+ * cross-check that the pages belonged to a relation that was dropped
+ * later.
*/
if (XLogHaveInvalidPages())
{
@@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
}
/*
- * Copy the checkpoint record to shared memory, so that checkpointer
- * can work out the next time it wants to perform a restartpoint.
+ * Copy the checkpoint record to shared memory, so that checkpointer can
+ * work out the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
* Do nothing if full_page_writes has not been changed.
*
* It's safe to check the shared full_page_writes without the lock,
- * because we assume that there is no concurrently running process
- * which can update it.
+ * because we assume that there is no concurrently running process which
+ * can update it.
*/
if (fullPageWrites == Insert->fullPageWrites)
return;
@@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
* It's always safe to take full page images, even when not strictly
* required, but not the other round. So if we're setting full_page_writes
* to true, first set it true and then write the WAL record. If we're
- * setting it to false, first write the WAL record and then set the
- * global flag.
+ * setting it to false, first write the WAL record and then set the global
+ * flag.
*/
if (fullPageWrites)
{
@@ -8516,12 +8522,12 @@ UpdateFullPageWrites(void)
}
/*
- * Write an XLOG_FPW_CHANGE record. This allows us to keep
- * track of full_page_writes during archive recovery, if required.
+ * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
+ * full_page_writes during archive recovery, if required.
*/
if (XLogStandbyInfoActive() && !RecoveryInProgress())
{
- XLogRecData rdata;
+ XLogRecData rdata;
rdata.data = (char *) (&fullPageWrites);
rdata.len = sizeof(bool);
@@ -8561,7 +8567,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* We used to try to take the maximum of ShmemVariableCache->nextOid
* and the recorded nextOid, but that fails if the OID counter wraps
- * around. Since no OID allocation should be happening during replay
+ * around. Since no OID allocation should be happening during replay
* anyway, better to just believe the record exactly. We still take
* OidGenLock while setting the variable, just in case.
*/
@@ -8597,7 +8603,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint) &&
XLogRecPtrIsInvalid(ControlFile->backupEndPoint))
ereport(PANIC,
- (errmsg("online backup was canceled, recovery cannot continue")));
+ (errmsg("online backup was canceled, recovery cannot continue")));
/*
* If we see a shutdown checkpoint, we know that nothing was running
@@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
/*
- * Update the LSN of the last replayed XLOG_FPW_CHANGE record
- * so that do_pg_start_backup() and do_pg_stop_backup() can check
- * whether full_page_writes has been disabled during online backup.
+ * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
+ * do_pg_start_backup() and do_pg_stop_backup() can check whether
+ * full_page_writes has been disabled during online backup.
*/
if (!fpw)
{
@@ -8825,7 +8831,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "checkpoint: redo %X/%X; "
- "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
+ "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest running xid %u; %s",
checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
checkpoint->ThisTimeLineID,
@@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
errhint("WAL control functions cannot be executed during recovery.")));
/*
- * During recovery, we don't need to check WAL level. Because, if WAL level
- * is not sufficient, it's impossible to get here during recovery.
+ * During recovery, we don't need to check WAL level. Because, if WAL
+ * level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@@ -9179,7 +9185,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
* old timeline IDs. That would otherwise happen if you called
* pg_start_backup() right after restoring from a PITR archive: the
* first WAL segment containing the startup checkpoint has pages in
- * the beginning with the old timeline ID. That can cause trouble at
+ * the beginning with the old timeline ID. That can cause trouble at
* recovery: we won't have a history file covering the old timeline if
* pg_xlog directory was not included in the base backup and the WAL
* archive was cleared too before starting the backup.
@@ -9202,17 +9208,18 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
bool checkpointfpw;
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs
* will have different checkpoint positions and hence different
* history file names, even if nothing happened in between.
*
- * During recovery, establish a restartpoint if possible. We use the last
- * restartpoint as the backup starting checkpoint. This means that two
- * successive backup runs can have same checkpoint positions.
+ * During recovery, establish a restartpoint if possible. We use
+ * the last restartpoint as the backup starting checkpoint. This
+ * means that two successive backup runs can have same checkpoint
+ * positions.
*
- * Since the fact that we are executing do_pg_start_backup() during
- * recovery means that checkpointer is running, we can use
+ * Since the fact that we are executing do_pg_start_backup()
+ * during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint.
*
* We use CHECKPOINT_IMMEDIATE only if requested by user (via
@@ -9237,12 +9244,12 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
{
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
/*
- * Check to see if all WAL replayed during online backup (i.e.,
- * since last restartpoint used as backup starting checkpoint)
- * contain full-page writes.
+ * Check to see if all WAL replayed during online backup
+ * (i.e., since last restartpoint used as backup starting
+ * checkpoint) contain full-page writes.
*/
SpinLockAcquire(&xlogctl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr;
@@ -9250,20 +9257,20 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
if (!checkpointfpw || XLByteLE(startpoint, recptr))
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
- "since last restartpoint"),
- errhint("This means that the backup being taken on standby "
- "is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the master, "
- "and then try an online backup again.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("WAL generated with full_page_writes=off was replayed "
+ "since last restartpoint"),
+ errhint("This means that the backup being taken on standby "
+ "is corrupt and should not be used. "
+ "Enable full_page_writes and run CHECKPOINT on the master, "
+ "and then try an online backup again.")));
/*
* During recovery, since we don't use the end-of-backup WAL
- * record and don't write the backup history file, the starting WAL
- * location doesn't need to be unique. This means that two base
- * backups started at the same time might use the same checkpoint
- * as starting locations.
+ * record and don't write the backup history file, the
+ * starting WAL location doesn't need to be unique. This means
+ * that two base backups started at the same time might use
+ * the same checkpoint as starting locations.
*/
gotUniqueStartpoint = true;
}
@@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
errhint("WAL control functions cannot be executed during recovery.")));
/*
- * During recovery, we don't need to check WAL level. Because, if WAL level
- * is not sufficient, it's impossible to get here during recovery.
+ * During recovery, we don't need to check WAL level. Because, if WAL
+ * level is not sufficient, it's impossible to get here during recovery.
*/
if (!backup_started_in_recovery && !XLogIsNeeded())
ereport(ERROR,
@@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
/*
- * Parse the BACKUP FROM line. If we are taking an online backup from
- * the standby, we confirm that the standby has not been promoted
- * during the backup.
+ * Parse the BACKUP FROM line. If we are taking an online backup from the
+ * standby, we confirm that the standby has not been promoted during the
+ * backup.
*/
ptr = strstr(remaining, "BACKUP FROM:");
if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
"Try taking another online backup.")));
/*
- * During recovery, we don't write an end-of-backup record. We assume
- * that pg_control was backed up last and its minimum recovery
- * point can be available as the backup end location. Since we don't
- * have an end-of-backup record, we use the pg_control value to check
- * whether we've reached the end of backup when starting recovery from
- * this backup. We have no way of checking if pg_control wasn't backed
- * up last however.
+ * During recovery, we don't write an end-of-backup record. We assume that
+ * pg_control was backed up last and its minimum recovery point can be
+ * available as the backup end location. Since we don't have an
+ * end-of-backup record, we use the pg_control value to check whether
+ * we've reached the end of backup when starting recovery from this
+ * backup. We have no way of checking if pg_control wasn't backed up last
+ * however.
*
* We don't force a switch to new WAL file and wait for all the required
- * files to be archived. This is okay if we use the backup to start
- * the standby. But, if it's for an archive recovery, to ensure all the
- * required files are available, a user should wait for them to be archived,
- * or include them into the backup.
+ * files to be archived. This is okay if we use the backup to start the
+ * standby. But, if it's for an archive recovery, to ensure all the
+ * required files are available, a user should wait for them to be
+ * archived, or include them into the backup.
*
* We return the current minimum recovery point as the backup end
* location. Note that it's would be bigger than the exact backup end
- * location if the minimum recovery point is updated since the backup
- * of pg_control. This is harmless for current uses.
+ * location if the minimum recovery point is updated since the backup of
+ * pg_control. This is harmless for current uses.
*
* XXX currently a backup history file is for informational and debug
* purposes only. It's not essential for an online backup. Furthermore,
* even if it's created, it will not be archived during recovery because
- * an archiver is not invoked. So it doesn't seem worthwhile to write
- * a backup history file during recovery.
+ * an archiver is not invoked. So it doesn't seem worthwhile to write a
+ * backup history file during recovery.
*/
if (backup_started_in_recovery)
{
@@ -9597,12 +9604,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
if (XLByteLE(startpoint, recptr))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
- "during online backup"),
- errhint("This means that the backup being taken on standby "
- "is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the master, "
- "and then try an online backup again.")));
+ errmsg("WAL generated with full_page_writes=off was replayed "
+ "during online backup"),
+ errhint("This means that the backup being taken on standby "
+ "is corrupt and should not be used. "
+ "Enable full_page_writes and run CHECKPOINT on the master, "
+ "and then try an online backup again.")));
LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
+
/*
- * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't
- * restore from an older backup anyway, but since the information on it
- * is not strictly required, don't error out if it's missing for some reason.
+ * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
+ * from an older backup anyway, but since the information on it is not
+ * strictly required, don't error out if it's missing for some reason.
*/
if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
{
@@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
- * Request a restartpoint if we've replayed too much
- * xlog since the last one.
+ * Request a restartpoint if we've replayed too much xlog since the
+ * last one.
*/
if (StandbyMode && bgwriterLaunched)
{
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index f286cdfc07..6ddcc59b37 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
/*
* Once recovery has reached a consistent state, the invalid-page table
* should be empty and remain so. If a reference to an invalid page is
- * found after consistency is reached, PANIC immediately. This might
- * seem aggressive, but it's better than letting the invalid reference
- * linger in the hash table until the end of recovery and PANIC there,
- * which might come only much later if this is a standby server.
+ * found after consistency is reached, PANIC immediately. This might seem
+ * aggressive, but it's better than letting the invalid reference linger
+ * in the hash table until the end of recovery and PANIC there, which
+ * might come only much later if this is a standby server.
*/
if (reachedConsistency)
{
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 9315e79c99..45cd0808ce 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -186,10 +186,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
foreach(j, grantees)
{
- AclItem aclitem;
+ AclItem aclitem;
Acl *newer_acl;
- aclitem. ai_grantee = lfirst_oid(j);
+ aclitem.ai_grantee = lfirst_oid(j);
/*
* Grant options can only be granted to individual roles, not PUBLIC.
@@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to roles")));
- aclitem. ai_grantor = grantorId;
+ aclitem.ai_grantor = grantorId;
/*
* The asymmetry in the conditions here comes from the spec. In
@@ -3073,7 +3073,7 @@ ExecGrant_Type(InternalGrant *istmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("cannot set privileges of array types"),
- errhint("Set the privileges of the element type instead.")));
+ errhint("Set the privileges of the element type instead.")));
/* Used GRANT DOMAIN on a non-domain? */
if (istmt->objtype == ACL_OBJECT_DOMAIN &&
@@ -4184,7 +4184,7 @@ pg_type_aclmask(Oid type_oid, Oid roleid, AclMode mask, AclMaskHow how)
/* "True" array types don't manage permissions of their own */
if (typeForm->typelem != 0 && typeForm->typlen == -1)
{
- Oid elttype_oid = typeForm->typelem;
+ Oid elttype_oid = typeForm->typelem;
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index db6769cb90..d4e1f76f31 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -173,7 +173,7 @@ static void reportDependentObjects(const ObjectAddresses *targetObjects,
int msglevel,
const ObjectAddress *origObject);
static void deleteOneObject(const ObjectAddress *object,
- Relation depRel, int32 flags);
+ Relation depRel, int32 flags);
static void doDeletion(const ObjectAddress *object, int flags);
static void AcquireDeletionLock(const ObjectAddress *object, int flags);
static void ReleaseDeletionLock(const ObjectAddress *object);
@@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
free_object_addresses(targetObjects);
/*
- * We closed depRel earlier in deleteOneObject if doing a drop concurrently
+ * We closed depRel earlier in deleteOneObject if doing a drop
+ * concurrently
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);
@@ -424,7 +425,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
* Since this function is currently only used to clean out temporary
* schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that
* the operation is an automatic system operation rather than a user
- * action. If, in the future, this function is used for other
+ * action. If, in the future, this function is used for other
* purposes, we might need to revisit this.
*/
deleteOneObject(thisobj, depRel, PERFORM_DELETION_INTERNAL);
@@ -514,12 +515,12 @@ findDependentObjects(const ObjectAddress *object,
/*
* The target object might be internally dependent on some other object
* (its "owner"), and/or be a member of an extension (also considered its
- * owner). If so, and if we aren't recursing from the owning object, we
+ * owner). If so, and if we aren't recursing from the owning object, we
* have to transform this deletion request into a deletion request of the
* owning object. (We'll eventually recurse back to this object, but the
- * owning object has to be visited first so it will be deleted after.)
- * The way to find out about this is to scan the pg_depend entries that
- * show what this object depends on.
+ * owning object has to be visited first so it will be deleted after.) The
+ * way to find out about this is to scan the pg_depend entries that show
+ * what this object depends on.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
@@ -577,7 +578,7 @@ findDependentObjects(const ObjectAddress *object,
/*
* Exception 1a: if the owning object is listed in
* pendingObjects, just release the caller's lock and
- * return. We'll eventually complete the DROP when we
+ * return. We'll eventually complete the DROP when we
* reach that entry in the pending list.
*/
if (pendingObjects &&
@@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object,
* Exception 1b: if the owning object is the extension
* currently being created/altered, it's okay to continue
* with the deletion. This allows dropping of an
- * extension's objects within the extension's scripts,
- * as well as corner cases such as dropping a transient
+ * extension's objects within the extension's scripts, as
+ * well as corner cases such as dropping a transient
* object created within such a script.
*/
if (creating_extension &&
@@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object,
* it's okay to continue with the deletion. This holds when
* recursing from a whole object that includes the nominal
* other end as a component, too. Since there can be more
- * than one "owning" object, we have to allow matches that
- * are more than one level down in the stack.
+ * than one "owning" object, we have to allow matches that are
+ * more than one level down in the stack.
*/
if (stack_address_present_add_flags(&otherObject, 0, stack))
break;
@@ -630,7 +631,7 @@ findDependentObjects(const ObjectAddress *object,
* owning object.
*
* First, release caller's lock on this object and get
- * deletion lock on the owning object. (We must release
+ * deletion lock on the owning object. (We must release
* caller's lock to avoid deadlock against a concurrent
* deletion of the owning object.)
*/
@@ -999,7 +1000,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
/* DROP hook of the objects being removed */
if (object_access_hook)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
+
drop_arg.dropflags = flags;
InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId,
object->objectSubId, &drop_arg);
@@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
object->objectSubId);
/*
- * Close depRel if we are doing a drop concurrently because it
- * commits the transaction, so we don't want dangling references.
+ * Close depRel if we are doing a drop concurrently because it commits the
+ * transaction, so we don't want dangling references.
*/
if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY)
heap_close(depRel, RowExclusiveLock);
@@ -1093,8 +1095,8 @@ doDeletion(const ObjectAddress *object, int flags)
if (relKind == RELKIND_INDEX)
{
- bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
- == PERFORM_DELETION_CONCURRENTLY);
+ bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
+ == PERFORM_DELETION_CONCURRENTLY);
Assert(object->objectSubId == 0);
index_drop(object->objectId, concurrent);
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 8fc69ae720..49e7644699 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -1957,7 +1957,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
ccsrc, /* Source form of check constraint */
is_local, /* conislocal */
inhcount, /* coninhcount */
- is_no_inherit); /* connoinherit */
+ is_no_inherit); /* connoinherit */
pfree(ccbin);
pfree(ccsrc);
@@ -1998,7 +1998,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
break;
case CONSTR_CHECK:
StoreRelCheck(rel, con->name, con->expr, !con->skip_validation,
- con->is_local, con->inhcount, con->is_no_inherit);
+ con->is_local, con->inhcount, con->is_no_inherit);
numchecks++;
break;
default:
@@ -2345,8 +2345,8 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
}
/* OK to update the tuple */
ereport(NOTICE,
- (errmsg("merging constraint \"%s\" with inherited definition",
- ccname)));
+ (errmsg("merging constraint \"%s\" with inherited definition",
+ ccname)));
simple_heap_update(conDesc, &tup->t_self, tup);
CatalogUpdateIndexes(conDesc, tup);
break;
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 998379c8af..9e8b1cc49b 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1155,7 +1155,7 @@ index_constraint_create(Relation heapRelation,
NULL,
NULL,
true, /* islocal */
- 0, /* inhcount */
+ 0, /* inhcount */
false); /* noinherit */
/*
@@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent)
CheckTableNotInUse(userIndexRelation, "DROP INDEX");
/*
- * Drop Index concurrently is similar in many ways to creating an
- * index concurrently, so some actions are similar to DefineIndex()
+ * Drop Index concurrently is similar in many ways to creating an index
+ * concurrently, so some actions are similar to DefineIndex()
*/
if (concurrent)
{
@@ -1339,7 +1339,7 @@ index_drop(Oid indexId, bool concurrent)
indexRelation = heap_open(IndexRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy1(INDEXRELID,
- ObjectIdGetDatum(indexId));
+ ObjectIdGetDatum(indexId));
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u", indexId);
indexForm = (Form_pg_index) GETSTRUCT(tuple);
@@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent)
* will be marked not indisvalid, so that no one else tries to either
* insert into it or use it for queries.
*
- * We must commit our current transaction so that the index update becomes
- * visible; then start another. Note that all the data structures we just
- * built are lost in the commit. The only data we keep past here are the
- * relation IDs.
+ * We must commit our current transaction so that the index update
+ * becomes visible; then start another. Note that all the data
+ * structures we just built are lost in the commit. The only data we
+ * keep past here are the relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
* that neither it nor the index can be dropped before we finish. This
- * cannot block, even if someone else is waiting for access, because we
- * already have the same lock within our transaction.
+ * cannot block, even if someone else is waiting for access, because
+ * we already have the same lock within our transaction.
*/
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
@@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent)
StartTransactionCommand();
/*
- * Now we must wait until no running transaction could have the table open
- * with the old list of indexes. To do this, inquire which xacts
- * currently would conflict with AccessExclusiveLock on the table -- ie,
- * which ones have a lock of any kind on the table. Then wait for each of
- * these xacts to commit or abort. Note we do not need to worry about
- * xacts that open the table for writing after this point; they will see
- * the index as invalid when they open the relation.
+ * Now we must wait until no running transaction could have the table
+ * open with the old list of indexes. To do this, inquire which xacts
+ * currently would conflict with AccessExclusiveLock on the table --
+ * ie, which ones have a lock of any kind on the table. Then wait for
+ * each of these xacts to commit or abort. Note we do not need to
+ * worry about xacts that open the table for writing after this point;
+ * they will see the index as invalid when they open the relation.
*
- * Note: the reason we use actual lock acquisition here, rather than just
- * checking the ProcArray and sleeping, is that deadlock is possible if
- * one of the transactions in question is blocked trying to acquire an
- * exclusive lock on our table. The lock code will detect deadlock and
- * error out properly.
+ * Note: the reason we use actual lock acquisition here, rather than
+ * just checking the ProcArray and sleeping, is that deadlock is
+ * possible if one of the transactions in question is blocked trying
+ * to acquire an exclusive lock on our table. The lock code will
+ * detect deadlock and error out properly.
*
- * Note: GetLockConflicts() never reports our own xid, hence we need not
- * check for that. Also, prepared xacts are not reported, which is fine
- * since they certainly aren't going to do anything more.
+ * Note: GetLockConflicts() never reports our own xid, hence we need
+ * not check for that. Also, prepared xacts are not reported, which
+ * is fine since they certainly aren't going to do anything more.
*/
old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
@@ -1786,7 +1786,7 @@ index_update_stats(Relation rel,
if (rd_rel->relkind != RELKIND_INDEX)
relallvisible = visibilitymap_count(rel);
- else /* don't bother for indexes */
+ else /* don't bother for indexes */
relallvisible = 0;
if (rd_rel->relpages != (int32) relpages)
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index e92efd863e..1b6bb3bb6d 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -226,7 +226,7 @@ Datum pg_is_other_temp_schema(PG_FUNCTION_ARGS);
Oid
RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
bool missing_ok, bool nowait,
- RangeVarGetRelidCallback callback, void *callback_arg)
+ RangeVarGetRelidCallback callback, void *callback_arg)
{
uint64 inval_count;
Oid relId;
@@ -247,20 +247,20 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
}
/*
- * DDL operations can change the results of a name lookup. Since all
- * such operations will generate invalidation messages, we keep track
- * of whether any such messages show up while we're performing the
- * operation, and retry until either (1) no more invalidation messages
- * show up or (2) the answer doesn't change.
+ * DDL operations can change the results of a name lookup. Since all such
+ * operations will generate invalidation messages, we keep track of
+ * whether any such messages show up while we're performing the operation,
+ * and retry until either (1) no more invalidation messages show up or (2)
+ * the answer doesn't change.
*
* But if lockmode = NoLock, then we assume that either the caller is OK
* with the answer changing under them, or that they already hold some
* appropriate lock, and therefore return the first answer we get without
- * checking for invalidation messages. Also, if the requested lock is
+ * checking for invalidation messages. Also, if the requested lock is
* already held, no LockRelationOid will not AcceptInvalidationMessages,
* so we may fail to notice a change. We could protect against that case
- * by calling AcceptInvalidationMessages() before beginning this loop,
- * but that would add a significant amount overhead, so for now we don't.
+ * by calling AcceptInvalidationMessages() before beginning this loop, but
+ * that would add a significant amount overhead, so for now we don't.
*/
for (;;)
{
@@ -282,17 +282,18 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
if (relation->relpersistence == RELPERSISTENCE_TEMP)
{
if (!OidIsValid(myTempNamespace))
- relId = InvalidOid; /* this probably can't happen? */
+ relId = InvalidOid; /* this probably can't happen? */
else
{
if (relation->schemaname)
{
- Oid namespaceId;
+ Oid namespaceId;
+
namespaceId = LookupExplicitNamespace(relation->schemaname);
if (namespaceId != myTempNamespace)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("temporary tables cannot specify a schema name")));
+ errmsg("temporary tables cannot specify a schema name")));
}
relId = get_relname_relid(relation->relname, myTempNamespace);
@@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* Invoke caller-supplied callback, if any.
*
- * This callback is a good place to check permissions: we haven't taken
- * the table lock yet (and it's really best to check permissions before
- * locking anything!), but we've gotten far enough to know what OID we
- * think we should lock. Of course, concurrent DDL might change things
- * while we're waiting for the lock, but in that case the callback will
- * be invoked again for the new OID.
+ * This callback is a good place to check permissions: we haven't
+ * taken the table lock yet (and it's really best to check permissions
+ * before locking anything!), but we've gotten far enough to know what
+ * OID we think we should lock. Of course, concurrent DDL might
+ * change things while we're waiting for the lock, but in that case
+ * the callback will be invoked again for the new OID.
*/
if (callback)
callback(relation, relId, oldRelId, callback_arg);
@@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
/*
* If no lock requested, we assume the caller knows what they're
* doing. They should have already acquired a heavyweight lock on
- * this relation earlier in the processing of this same statement,
- * so it wouldn't be appropriate to AcceptInvalidationMessages()
- * here, as that might pull the rug out from under them.
+ * this relation earlier in the processing of this same statement, so
+ * it wouldn't be appropriate to AcceptInvalidationMessages() here, as
+ * that might pull the rug out from under them.
*/
if (lockmode == NoLock)
break;
/*
- * If, upon retry, we get back the same OID we did last time, then
- * the invalidation messages we processed did not change the final
- * answer. So we're done.
+ * If, upon retry, we get back the same OID we did last time, then the
+ * invalidation messages we processed did not change the final answer.
+ * So we're done.
*
* If we got a different OID, we've locked the relation that used to
- * have this name rather than the one that does now. So release
- * the lock.
+ * have this name rather than the one that does now. So release the
+ * lock.
*/
if (retry)
{
@@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
break;
/*
- * Something may have changed. Let's repeat the name lookup, to
- * make sure this name still references the same relation it did
+ * Something may have changed. Let's repeat the name lookup, to make
+ * sure this name still references the same relation it did
* previously.
*/
retry = true;
@@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
relid = InvalidOid;
/*
- * In bootstrap processing mode, we don't bother with permissions
- * or locking. Permissions might not be working yet, and locking is
+ * In bootstrap processing mode, we don't bother with permissions or
+ * locking. Permissions might not be working yet, and locking is
* unnecessary.
*/
if (IsBootstrapProcessingMode())
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index d133f64776..5a06fcbf41 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -75,10 +75,10 @@
*/
typedef struct
{
- Oid class_oid; /* oid of catalog */
- Oid oid_index_oid; /* oid of index on system oid column */
- int oid_catcache_id; /* id of catcache on system oid column */
- AttrNumber attnum_namespace; /* attnum of namespace field */
+ Oid class_oid; /* oid of catalog */
+ Oid oid_index_oid; /* oid of index on system oid column */
+ int oid_catcache_id; /* id of catcache on system oid column */
+ AttrNumber attnum_namespace; /* attnum of namespace field */
} ObjectPropertyType;
static ObjectPropertyType ObjectProperty[] =
@@ -286,13 +286,13 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
for (;;)
{
/*
- * Remember this value, so that, after looking up the object name
- * and locking it, we can check whether any invalidation messages
- * have been processed that might require a do-over.
+ * Remember this value, so that, after looking up the object name and
+ * locking it, we can check whether any invalidation messages have
+ * been processed that might require a do-over.
*/
inval_count = SharedInvalidMessageCounter;
- /* Look up object address. */
+ /* Look up object address. */
switch (objtype)
{
case OBJECT_INDEX:
@@ -367,7 +367,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
case OBJECT_OPCLASS:
case OBJECT_OPFAMILY:
address = get_object_address_opcf(objtype,
- objname, objargs, missing_ok);
+ objname, objargs, missing_ok);
break;
case OBJECT_LARGEOBJECT:
Assert(list_length(objname) == 1);
@@ -377,10 +377,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
if (!LargeObjectExists(address.objectId))
{
if (!missing_ok)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("large object %u does not exist",
- address.objectId)));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("large object %u does not exist",
+ address.objectId)));
}
break;
case OBJECT_CAST:
@@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* At this point, we've resolved the name to an OID and locked the
* corresponding database object. However, it's possible that by the
* time we acquire the lock on the object, concurrent DDL has modified
- * the database in such a way that the name we originally looked up
- * no longer resolves to that OID.
+ * the database in such a way that the name we originally looked up no
+ * longer resolves to that OID.
*
* We can be certain that this isn't an issue if (a) no shared
* invalidation messages have been processed or (b) we've locked a
@@ -488,12 +488,12 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
* the relation, which is enough to freeze out any concurrent DDL.
*
* In all other cases, however, it's possible that the name we looked
- * up no longer refers to the object we locked, so we retry the
- * lookup and see whether we get the same answer.
+ * up no longer refers to the object we locked, so we retry the lookup
+ * and see whether we get the same answer.
*/
- if (inval_count == SharedInvalidMessageCounter || relation != NULL)
- break;
- old_address = address;
+ if (inval_count == SharedInvalidMessageCounter || relation != NULL)
+ break;
+ old_address = address;
}
/* Return the object address and the relation. */
@@ -621,7 +621,7 @@ get_relation_by_qualified_name(ObjectType objtype, List *objname,
bool missing_ok)
{
Relation relation;
- ObjectAddress address;
+ ObjectAddress address;
address.classId = RelationRelationId;
address.objectId = InvalidOid;
@@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname,
address.objectSubId = 0;
/*
- * Caller is expecting to get back the relation, even though we
- * didn't end up using it to find the rule.
+ * Caller is expecting to get back the relation, even though we didn't
+ * end up using it to find the rule.
*/
if (OidIsValid(address.objectId))
relation = heap_open(reloid, AccessShareLock);
@@ -768,7 +768,7 @@ get_object_address_relobject(ObjectType objtype, List *objname,
if (!OidIsValid(address.objectId))
{
heap_close(relation, AccessShareLock);
- relation = NULL; /* department of accident prevention */
+ relation = NULL; /* department of accident prevention */
return address;
}
}
@@ -834,9 +834,10 @@ static ObjectAddress
get_object_address_type(ObjectType objtype,
List *objname, bool missing_ok)
{
- ObjectAddress address;
+ ObjectAddress address;
TypeName *typename;
- Type tup;
+ Type tup;
+
typename = makeTypeNameFromNameList(objname);
address.classId = TypeRelationId;
@@ -1083,7 +1084,7 @@ get_object_namespace(const ObjectAddress *address)
HeapTuple tuple;
bool isnull;
Oid oid;
- ObjectPropertyType *property;
+ ObjectPropertyType *property;
/* If not owned by a namespace, just return InvalidOid. */
property = get_object_property_data(address->classId);
@@ -1122,5 +1123,5 @@ get_object_property_data(Oid class_id)
return &ObjectProperty[index];
elog(ERROR, "unrecognized class id: %u", class_id);
- return NULL; /* not reached */
+ return NULL; /* not reached */
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index dca5d09ee6..224859d76e 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -831,8 +831,8 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok)
if (OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("domain \"%s\" has multiple constraints named \"%s\"",
- format_type_be(typid), conname)));
+ errmsg("domain \"%s\" has multiple constraints named \"%s\"",
+ format_type_be(typid), conname)));
conOid = HeapTupleGetOid(tuple);
}
}
diff --git a/src/backend/catalog/pg_depend.c b/src/backend/catalog/pg_depend.c
index 843f03d2c3..8e58435606 100644
--- a/src/backend/catalog/pg_depend.c
+++ b/src/backend/catalog/pg_depend.c
@@ -150,7 +150,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object,
/* Only need to check for existing membership if isReplace */
if (isReplace)
{
- Oid oldext;
+ Oid oldext;
oldext = getExtensionOfObject(object->classId, object->objectId);
if (OidIsValid(oldext))
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index ae71b93917..599f04242f 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -228,7 +228,7 @@ ProcedureCreate(const char *procedureName,
/*
* Do not allow polymorphic return type unless at least one input argument
- * is polymorphic. ANYRANGE return type is even stricter: must have an
+ * is polymorphic. ANYRANGE return type is even stricter: must have an
* ANYRANGE input (since we can't deduce the specific range type from
* ANYELEMENT). Also, do not allow return type INTERNAL unless at least
* one input argument is INTERNAL.
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index a67aebbdb6..1edf950c56 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -1287,7 +1287,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
errmsg("cannot reassign ownership of objects owned by %s because they are required by the database system",
- getObjectDescription(&obj))));
+ getObjectDescription(&obj))));
/*
* There's no need to tell the whole truth, which is that we
diff --git a/src/backend/catalog/storage.c b/src/backend/catalog/storage.c
index 97ca95b6c8..993bc49c2a 100644
--- a/src/backend/catalog/storage.c
+++ b/src/backend/catalog/storage.c
@@ -500,8 +500,8 @@ smgr_redo(XLogRecPtr lsn, XLogRecord *record)
/*
* Forcibly create relation if it doesn't exist (which suggests that
* it was dropped somewhere later in the WAL sequence). As in
- * XLogReadBuffer, we prefer to recreate the rel and replay the log
- * as best we can until the drop is seen.
+ * XLogReadBuffer, we prefer to recreate the rel and replay the log as
+ * best we can until the drop is seen.
*/
smgrcreate(reln, MAIN_FORKNUM, true);
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 225ea866bf..9612a276f3 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -96,11 +96,11 @@ static void compute_index_stats(Relation onerel, double totalrows,
MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
Node *index_expr);
-static int acquire_sample_rows(Relation onerel, int elevel,
+static int acquire_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static int compare_rows(const void *a, const void *b);
-static int acquire_inherited_sample_rows(Relation onerel, int elevel,
+static int acquire_inherited_sample_rows(Relation onerel, int elevel,
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static void update_attstats(Oid relid, bool inh,
@@ -118,7 +118,7 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
Relation onerel;
int elevel;
AcquireSampleRowsFunc acquirefunc = NULL;
- BlockNumber relpages = 0;
+ BlockNumber relpages = 0;
/* Select logging level */
if (vacstmt->options & VACOPT_VERBOSE)
@@ -205,8 +205,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
}
/*
- * Check that it's a plain table or foreign table; we used to do this
- * in get_rel_oids() but seems safer to check after we've locked the
+ * Check that it's a plain table or foreign table; we used to do this in
+ * get_rel_oids() but seems safer to check after we've locked the
* relation.
*/
if (onerel->rd_rel->relkind == RELKIND_RELATION)
@@ -235,8 +235,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
if (!ok)
{
ereport(WARNING,
- (errmsg("skipping \"%s\" --- cannot analyze this foreign table",
- RelationGetRelationName(onerel))));
+ (errmsg("skipping \"%s\" --- cannot analyze this foreign table",
+ RelationGetRelationName(onerel))));
relation_close(onerel, ShareUpdateExclusiveLock);
return;
}
@@ -464,8 +464,8 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt,
/*
* Determine how many rows we need to sample, using the worst case from
* all analyzable columns. We use a lower bound of 100 rows to avoid
- * possible overflow in Vitter's algorithm. (Note: that will also be
- * the target in the corner case where there are no analyzable columns.)
+ * possible overflow in Vitter's algorithm. (Note: that will also be the
+ * target in the corner case where there are no analyzable columns.)
*/
targrows = 100;
for (i = 0; i < attr_cnt; i++)
@@ -1337,7 +1337,7 @@ anl_get_next_S(double t, int n, double *stateptr)
double V,
quot;
- V = anl_random_fract(); /* Generate V */
+ V = anl_random_fract(); /* Generate V */
S = 0;
t += 1;
/* Note: "num" in Vitter's code is always equal to t - n */
@@ -1398,7 +1398,7 @@ anl_get_next_S(double t, int n, double *stateptr)
y *= numer / denom;
denom -= 1;
}
- W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */
+ W = exp(-log(anl_random_fract()) / n); /* Generate W in advance */
if (exp(log(y) / n) <= (t + X) / t)
break;
}
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 349d13034e..a72b0ad5ff 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -594,10 +594,10 @@ make_new_heap(Oid OIDOldHeap, Oid NewTableSpace)
OldHeapDesc = RelationGetDescr(OldHeap);
/*
- * Note that the NewHeap will not
- * receive any of the defaults or constraints associated with the OldHeap;
- * we don't need 'em, and there's no reason to spend cycles inserting them
- * into the catalogs only to delete them.
+ * Note that the NewHeap will not receive any of the defaults or
+ * constraints associated with the OldHeap; we don't need 'em, and there's
+ * no reason to spend cycles inserting them into the catalogs only to
+ * delete them.
*/
/*
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 1d1eacd3fb..98bcb2fcf3 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -150,7 +150,7 @@ typedef struct CopyStateData
Oid *typioparams; /* array of element types for in_functions */
int *defmap; /* array of default att numbers */
ExprState **defexprs; /* array of default att expressions */
- bool volatile_defexprs; /* is any of defexprs volatile? */
+ bool volatile_defexprs; /* is any of defexprs volatile? */
/*
* These variables are used to reduce overhead in textual COPY FROM.
@@ -566,11 +566,11 @@ CopyGetData(CopyState cstate, void *databuf, int minread, int maxread)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection with an open transaction")));
+ errmsg("unexpected EOF on client connection with an open transaction")));
if (pq_getmessage(cstate->fe_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection with an open transaction")));
+ errmsg("unexpected EOF on client connection with an open transaction")));
switch (mtype)
{
case 'd': /* CopyData */
@@ -1861,6 +1861,7 @@ CopyFrom(CopyState cstate)
uint64 processed = 0;
bool useHeapMultiInsert;
int nBufferedTuples = 0;
+
#define MAX_BUFFERED_TUPLES 1000
HeapTuple *bufferedTuples = NULL; /* initialize to silence warning */
Size bufferedTuplesSize = 0;
@@ -1968,8 +1969,8 @@ CopyFrom(CopyState cstate)
* processed and prepared for insertion are not there.
*/
if ((resultRelInfo->ri_TrigDesc != NULL &&
- (resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
- resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) ||
+ (resultRelInfo->ri_TrigDesc->trig_insert_before_row ||
+ resultRelInfo->ri_TrigDesc->trig_insert_instead_row)) ||
cstate->volatile_defexprs)
{
useHeapMultiInsert = false;
@@ -2162,8 +2163,8 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
int i;
/*
- * heap_multi_insert leaks memory, so switch to short-lived memory
- * context before calling it.
+ * heap_multi_insert leaks memory, so switch to short-lived memory context
+ * before calling it.
*/
oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
heap_multi_insert(cstate->rel,
@@ -2175,14 +2176,14 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
MemoryContextSwitchTo(oldcontext);
/*
- * If there are any indexes, update them for all the inserted tuples,
- * and run AFTER ROW INSERT triggers.
+ * If there are any indexes, update them for all the inserted tuples, and
+ * run AFTER ROW INSERT triggers.
*/
if (resultRelInfo->ri_NumIndices > 0)
{
for (i = 0; i < nBufferedTuples; i++)
{
- List *recheckIndexes;
+ List *recheckIndexes;
ExecStoreTuple(bufferedTuples[i], myslot, InvalidBuffer, false);
recheckIndexes =
@@ -2194,6 +2195,7 @@ CopyFromInsertBatch(CopyState cstate, EState *estate, CommandId mycid,
list_free(recheckIndexes);
}
}
+
/*
* There's no indexes, but see if we need to run AFTER ROW INSERT triggers
* anyway.
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index 5173f5a308..dc0665e2a4 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -62,12 +62,12 @@ void
ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
ParamListInfo params, char *completionTag)
{
- Query *query = (Query *) stmt->query;
+ Query *query = (Query *) stmt->query;
IntoClause *into = stmt->into;
DestReceiver *dest;
- List *rewritten;
+ List *rewritten;
PlannedStmt *plan;
- QueryDesc *queryDesc;
+ QueryDesc *queryDesc;
ScanDirection dir;
/*
@@ -98,9 +98,9 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
* plancache.c.
*
* Because the rewriter and planner tend to scribble on the input, we make
- * a preliminary copy of the source querytree. This prevents problems in
+ * a preliminary copy of the source querytree. This prevents problems in
* the case that CTAS is in a portal or plpgsql function and is executed
- * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
+ * repeatedly. (See also the same hack in EXPLAIN and PREPARE.)
*/
rewritten = QueryRewrite((Query *) copyObject(stmt->query));
@@ -115,10 +115,10 @@ ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
/*
* Use a snapshot with an updated command ID to ensure this query sees
- * results of any previously executed queries. (This could only matter
- * if the planner executed an allegedly-stable function that changed
- * the database contents, but let's do it anyway to be parallel to the
- * EXPLAIN code path.)
+ * results of any previously executed queries. (This could only matter if
+ * the planner executed an allegedly-stable function that changed the
+ * database contents, but let's do it anyway to be parallel to the EXPLAIN
+ * code path.)
*/
PushCopiedSnapshot(GetActiveSnapshot());
UpdateActiveSnapshotCommandId();
@@ -211,12 +211,12 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
DR_intorel *myState = (DR_intorel *) self;
IntoClause *into = myState->into;
CreateStmt *create;
- Oid intoRelationId;
- Relation intoRelationDesc;
+ Oid intoRelationId;
+ Relation intoRelationDesc;
RangeTblEntry *rte;
Datum toast_options;
- ListCell *lc;
- int attnum;
+ ListCell *lc;
+ int attnum;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
Assert(into != NULL); /* else somebody forgot to set it */
@@ -237,8 +237,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
create->if_not_exists = false;
/*
- * Build column definitions using "pre-cooked" type and collation info.
- * If a column name list was specified in CREATE TABLE AS, override the
+ * Build column definitions using "pre-cooked" type and collation info. If
+ * a column name list was specified in CREATE TABLE AS, override the
* column names derived from the query. (Too few column names are OK, too
* many are not.)
*/
@@ -246,8 +246,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
for (attnum = 0; attnum < typeinfo->natts; attnum++)
{
Form_pg_attribute attribute = typeinfo->attrs[attnum];
- ColumnDef *col = makeNode(ColumnDef);
- TypeName *coltype = makeNode(TypeName);
+ ColumnDef *col = makeNode(ColumnDef);
+ TypeName *coltype = makeNode(TypeName);
if (lc)
{
@@ -280,9 +280,9 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
/*
* It's possible that the column is of a collatable type but the
- * collation could not be resolved, so double-check. (We must
- * check this here because DefineRelation would adopt the type's
- * default collation rather than complaining.)
+ * collation could not be resolved, so double-check. (We must check
+ * this here because DefineRelation would adopt the type's default
+ * collation rather than complaining.)
*/
if (!OidIsValid(col->collOid) &&
type_is_collatable(coltype->typeOid))
@@ -297,8 +297,8 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
if (lc != NULL)
ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("CREATE TABLE AS specifies too many column names")));
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("CREATE TABLE AS specifies too many column names")));
/*
* Actually create the target table
@@ -342,7 +342,7 @@ intorel_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
for (attnum = 1; attnum <= intoRelationDesc->rd_att->natts; attnum++)
rte->modifiedCols = bms_add_member(rte->modifiedCols,
- attnum - FirstLowInvalidHeapAttributeNumber);
+ attnum - FirstLowInvalidHeapAttributeNumber);
ExecCheckRTPerms(list_make1(rte), true);
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 90155b9c14..b7224bde87 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -695,8 +695,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty
errmsg("encoding \"%s\" does not match locale \"%s\"",
pg_encoding_to_char(encoding),
ctype),
- errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".",
- pg_encoding_to_char(ctype_encoding))));
+ errdetail("The chosen LC_CTYPE setting requires encoding \"%s\".",
+ pg_encoding_to_char(ctype_encoding))));
if (!(collate_encoding == encoding ||
collate_encoding == PG_SQL_ASCII ||
@@ -710,8 +710,8 @@ check_encoding_locale_matches(int encoding, const char *collate, const char *cty
errmsg("encoding \"%s\" does not match locale \"%s\"",
pg_encoding_to_char(encoding),
collate),
- errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".",
- pg_encoding_to_char(collate_encoding))));
+ errdetail("The chosen LC_COLLATE setting requires encoding \"%s\".",
+ pg_encoding_to_char(collate_encoding))));
}
/* Error cleanup callback for createdb */
@@ -784,7 +784,8 @@ dropdb(const char *dbname, bool missing_ok)
/* DROP hook for the database being removed */
if (object_access_hook)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
+
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP,
DatabaseRelationId, db_id, 0, &drop_arg);
@@ -831,8 +832,7 @@ dropdb(const char *dbname, bool missing_ok)
ReleaseSysCache(tup);
/*
- * Delete any comments or security labels associated with
- * the database.
+ * Delete any comments or security labels associated with the database.
*/
DeleteSharedComments(db_id, DatabaseRelationId);
DeleteSharedSecurityLabel(db_id, DatabaseRelationId);
@@ -860,18 +860,18 @@ dropdb(const char *dbname, bool missing_ok)
pgstat_drop_database(db_id);
/*
- * Tell checkpointer to forget any pending fsync and unlink requests for files
- * in the database; else the fsyncs will fail at next checkpoint, or
+ * Tell checkpointer to forget any pending fsync and unlink requests for
+ * files in the database; else the fsyncs will fail at next checkpoint, or
* worse, it will delete files that belong to a newly created database
* with the same OID.
*/
ForgetDatabaseFsyncRequests(db_id);
/*
- * Force a checkpoint to make sure the checkpointer has received the message
- * sent by ForgetDatabaseFsyncRequests. On Windows, this also ensures that
- * background procs don't hold any open files, which would cause rmdir() to
- * fail.
+ * Force a checkpoint to make sure the checkpointer has received the
+ * message sent by ForgetDatabaseFsyncRequests. On Windows, this also
+ * ensures that background procs don't hold any open files, which would
+ * cause rmdir() to fail.
*/
RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c
index 298940c7c4..1b8529ed84 100644
--- a/src/backend/commands/dropcmds.c
+++ b/src/backend/commands/dropcmds.c
@@ -30,7 +30,7 @@
#include "utils/syscache.h"
static void does_not_exist_skipping(ObjectType objtype,
- List *objname, List *objargs);
+ List *objname, List *objargs);
/*
* Drop one or more objects.
@@ -54,7 +54,7 @@ RemoveObjects(DropStmt *stmt)
foreach(cell1, stmt->objects)
{
- ObjectAddress address;
+ ObjectAddress address;
List *objname = lfirst(cell1);
List *objargs = NIL;
Relation relation = NULL;
@@ -97,8 +97,8 @@ RemoveObjects(DropStmt *stmt)
if (((Form_pg_proc) GETSTRUCT(tup))->proisagg)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is an aggregate function",
- NameListToString(objname)),
+ errmsg("\"%s\" is an aggregate function",
+ NameListToString(objname)),
errhint("Use DROP AGGREGATE to drop aggregate functions.")));
ReleaseSysCache(tup);
@@ -149,7 +149,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
break;
case OBJECT_CONVERSION:
msg = gettext_noop("conversion \"%s\" does not exist, skipping");
- name = NameListToString(objname);
+ name = NameListToString(objname);
break;
case OBJECT_SCHEMA:
msg = gettext_noop("schema \"%s\" does not exist, skipping");
@@ -196,9 +196,9 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
case OBJECT_CAST:
msg = gettext_noop("cast from type %s to type %s does not exist, skipping");
name = format_type_be(typenameTypeId(NULL,
- (TypeName *) linitial(objname)));
+ (TypeName *) linitial(objname)));
args = format_type_be(typenameTypeId(NULL,
- (TypeName *) linitial(objargs)));
+ (TypeName *) linitial(objargs)));
break;
case OBJECT_TRIGGER:
msg = gettext_noop("trigger \"%s\" for table \"%s\" does not exist, skipping");
@@ -231,7 +231,7 @@ does_not_exist_skipping(ObjectType objtype, List *objname, List *objargs)
args = strVal(linitial(objargs));
break;
default:
- elog(ERROR, "unexpected object type (%d)", (int)objtype);
+ elog(ERROR, "unexpected object type (%d)", (int) objtype);
break;
}
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index e2b4b994b4..1e8f618a34 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -117,7 +117,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
TupOutputState *tstate;
List *rewritten;
ListCell *lc;
- bool timing_set = false;
+ bool timing_set = false;
/* Initialize ExplainState. */
ExplainInitState(&es);
@@ -169,7 +169,7 @@ ExplainQuery(ExplainStmt *stmt, const char *queryString,
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
-
+
/* if the timing was not set explicitly, set default value */
es.timing = (timing_set) ? es.timing : es.analyze;
@@ -340,9 +340,9 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
if (IsA(utilityStmt, CreateTableAsStmt))
{
/*
- * We have to rewrite the contained SELECT and then pass it back
- * to ExplainOneQuery. It's probably not really necessary to copy
- * the contained parsetree another time, but let's be safe.
+ * We have to rewrite the contained SELECT and then pass it back to
+ * ExplainOneQuery. It's probably not really necessary to copy the
+ * contained parsetree another time, but let's be safe.
*/
CreateTableAsStmt *ctas = (CreateTableAsStmt *) utilityStmt;
List *rewritten;
@@ -1021,7 +1021,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
{
if (planstate->instrument->need_timer)
appendStringInfo(es->str,
- " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
+ " (actual time=%.3f..%.3f rows=%.0f loops=%.0f)",
startup_sec, total_sec, rows, nloops);
else
appendStringInfo(es->str,
@@ -1095,7 +1095,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
planstate, es);
if (es->analyze)
ExplainPropertyLong("Heap Fetches",
- ((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es);
+ ((IndexOnlyScanState *) planstate)->ioss_HeapFetches, es);
break;
case T_BitmapIndexScan:
show_scan_qual(((BitmapIndexScan *) plan)->indexqualorig,
@@ -1237,7 +1237,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
bool has_temp = (usage->temp_blks_read > 0 ||
usage->temp_blks_written > 0);
bool has_timing = (!INSTR_TIME_IS_ZERO(usage->blk_read_time) ||
- !INSTR_TIME_IS_ZERO(usage->blk_write_time));
+ !INSTR_TIME_IS_ZERO(usage->blk_write_time));
/* Show only positive counter values. */
if (has_shared || has_local || has_temp)
@@ -1301,10 +1301,10 @@ ExplainNode(PlanState *planstate, List *ancestors,
appendStringInfoString(es->str, "I/O Timings:");
if (!INSTR_TIME_IS_ZERO(usage->blk_read_time))
appendStringInfo(es->str, " read=%0.3f",
- INSTR_TIME_GET_MILLISEC(usage->blk_read_time));
+ INSTR_TIME_GET_MILLISEC(usage->blk_read_time));
if (!INSTR_TIME_IS_ZERO(usage->blk_write_time))
appendStringInfo(es->str, " write=%0.3f",
- INSTR_TIME_GET_MILLISEC(usage->blk_write_time));
+ INSTR_TIME_GET_MILLISEC(usage->blk_write_time));
appendStringInfoChar(es->str, '\n');
}
}
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 732791cc41..cde3d60ee8 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -899,8 +899,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
{
t_sql = DirectFunctionCall3(replace_text,
t_sql,
- CStringGetTextDatum("MODULE_PATHNAME"),
- CStringGetTextDatum(control->module_pathname));
+ CStringGetTextDatum("MODULE_PATHNAME"),
+ CStringGetTextDatum(control->module_pathname));
}
/* And now back to C string */
@@ -1585,14 +1585,14 @@ RemoveExtensionById(Oid extId)
* might write "DROP EXTENSION foo" in foo's own script files, as because
* errors in dependency management in extension script files could give
* rise to cases where an extension is dropped as a result of recursing
- * from some contained object. Because of that, we must test for the case
+ * from some contained object. Because of that, we must test for the case
* here, not at some higher level of the DROP EXTENSION command.
*/
if (extId == CurrentExtensionObject)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot drop extension \"%s\" because it is being modified",
- get_extension_name(extId))));
+ errmsg("cannot drop extension \"%s\" because it is being modified",
+ get_extension_name(extId))));
rel = heap_open(ExtensionRelationId, RowExclusiveLock);
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index 30135e6de8..342ecc2931 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -166,7 +166,7 @@ transformGenericOptions(Oid catalogId,
if (OidIsValid(fdwvalidator))
{
- Datum valarg = result;
+ Datum valarg = result;
/*
* Pass a null options list as an empty array, so that validators
@@ -215,13 +215,13 @@ RenameForeignDataWrapper(const char *oldname, const char *newname)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("foreign-data wrapper \"%s\" does not exist", oldname)));
+ errmsg("foreign-data wrapper \"%s\" does not exist", oldname)));
/* make sure the new name doesn't exist */
if (SearchSysCacheExists1(FOREIGNDATAWRAPPERNAME, CStringGetDatum(newname)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("foreign-data wrapper \"%s\" already exists", newname)));
+ errmsg("foreign-data wrapper \"%s\" already exists", newname)));
/* must be owner of FDW */
if (!pg_foreign_data_wrapper_ownercheck(HeapTupleGetOid(tup), GetUserId()))
@@ -364,7 +364,7 @@ AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("foreign-data wrapper with OID %u does not exist", fwdId)));
+ errmsg("foreign-data wrapper with OID %u does not exist", fwdId)));
AlterForeignDataWrapperOwner_internal(rel, tup, newOwnerId);
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index ff0836c141..13e30f4a55 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -890,9 +890,9 @@ CreateFunction(CreateFunctionStmt *stmt, const char *queryString)
ReleaseSysCache(languageTuple);
/*
- * Only superuser is allowed to create leakproof functions because
- * it possibly allows unprivileged users to reference invisible tuples
- * to be filtered out using views for row-level security.
+ * Only superuser is allowed to create leakproof functions because it
+ * possibly allows unprivileged users to reference invisible tuples to be
+ * filtered out using views for row-level security.
*/
if (isLeakProof && !superuser())
ereport(ERROR,
@@ -1320,7 +1320,7 @@ AlterFunction(AlterFunctionStmt *stmt)
if (intVal(leakproof_item->arg) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("only superuser can define a leakproof function")));
+ errmsg("only superuser can define a leakproof function")));
procForm->proleakproof = intVal(leakproof_item->arg);
}
if (cost_item)
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 6c909298b7..a68d500e5b 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -96,7 +96,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation,
* concrete benefit for core types.
* When a comparison or exclusion operator has a polymorphic input type, the
- * actual input types must also match. This defends against the possibility
+ * actual input types must also match. This defends against the possibility
* that operators could vary behavior in response to get_fn_expr_argtype().
* At present, this hazard is theoretical: check_exclusion_constraint() and
* all core index access methods decline to set fn_expr for such calls.
@@ -134,6 +134,7 @@ CheckIndexCompatible(Oid oldId,
/* Caller should already have the relation locked in some way. */
relationId = RangeVarGetRelid(heapRelation, NoLock, false);
+
/*
* We can pretend isconstraint = false unconditionally. It only serves to
* decide the text of an error message that should never happen for us.
@@ -157,10 +158,10 @@ CheckIndexCompatible(Oid oldId,
ReleaseSysCache(tuple);
/*
- * Compute the operator classes, collations, and exclusion operators
- * for the new index, so we can test whether it's compatible with the
- * existing one. Note that ComputeIndexAttrs might fail here, but that's
- * OK: DefineIndex would have called this function with the same arguments
+ * Compute the operator classes, collations, and exclusion operators for
+ * the new index, so we can test whether it's compatible with the existing
+ * one. Note that ComputeIndexAttrs might fail here, but that's OK:
+ * DefineIndex would have called this function with the same arguments
* later on, and it would have failed then anyway.
*/
indexInfo = makeNode(IndexInfo);
@@ -218,11 +219,11 @@ CheckIndexCompatible(Oid oldId,
return false;
/* For polymorphic opcintype, column type changes break compatibility. */
- irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */
+ irel = index_open(oldId, AccessShareLock); /* caller probably has a lock */
for (i = 0; i < old_natts; i++)
{
if (IsPolymorphicType(get_opclass_input_type(classObjectId[i])) &&
- irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
+ irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
{
ret = false;
break;
@@ -232,7 +233,8 @@ CheckIndexCompatible(Oid oldId,
/* Any change in exclusion operator selections breaks compatibility. */
if (ret && indexInfo->ii_ExclusionOps != NULL)
{
- Oid *old_operators, *old_procs;
+ Oid *old_operators,
+ *old_procs;
uint16 *old_strats;
RelationGetExclusionInfo(irel, &old_operators, &old_procs, &old_strats);
@@ -249,7 +251,7 @@ CheckIndexCompatible(Oid oldId,
op_input_types(indexInfo->ii_ExclusionOps[i], &left, &right);
if ((IsPolymorphicType(left) || IsPolymorphicType(right)) &&
- irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
+ irel->rd_att->attrs[i]->atttypid != typeObjectId[i])
{
ret = false;
break;
@@ -1778,9 +1780,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
return;
/*
- * If the relation does exist, check whether it's an index. But note
- * that the relation might have been dropped between the time we did the
- * name lookup and now. In that case, there's nothing to do.
+ * If the relation does exist, check whether it's an index. But note that
+ * the relation might have been dropped between the time we did the name
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@@ -1798,9 +1800,9 @@ RangeVarCallbackForReindexIndex(const RangeVar *relation,
if (relId != oldRelId)
{
/*
- * Lock level here should match reindex_index() heap lock.
- * If the OID isn't valid, it means the index as concurrently dropped,
- * which is not a problem for us; just return normally.
+ * Lock level here should match reindex_index() heap lock. If the OID
+ * isn't valid, it means the index as concurrently dropped, which is
+ * not a problem for us; just return normally.
*/
*heapOid = IndexGetRelation(relId, true);
if (OidIsValid(*heapOid))
diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c
index fd3dcc3643..ab13a45900 100644
--- a/src/backend/commands/lockcmds.c
+++ b/src/backend/commands/lockcmds.c
@@ -40,9 +40,9 @@ LockTableCommand(LockStmt *lockstmt)
/*
* During recovery we only accept these variations: LOCK TABLE foo IN
- * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo
- * IN ROW EXCLUSIVE MODE This test must match the restrictions defined
- * in LockAcquire()
+ * ACCESS SHARE MODE LOCK TABLE foo IN ROW SHARE MODE LOCK TABLE foo IN
+ * ROW EXCLUSIVE MODE This test must match the restrictions defined in
+ * LockAcquire()
*/
if (lockstmt->mode > RowExclusiveLock)
PreventCommandDuringRecovery("LOCK TABLE");
@@ -74,15 +74,16 @@ static void
RangeVarCallbackForLockTable(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
- LOCKMODE lockmode = * (LOCKMODE *) arg;
+ LOCKMODE lockmode = *(LOCKMODE *) arg;
char relkind;
AclResult aclresult;
if (!OidIsValid(relid))
- return; /* doesn't exist, so no permissions check */
+ return; /* doesn't exist, so no permissions check */
relkind = get_rel_relkind(relid);
if (!relkind)
- return; /* woops, concurrently dropped; no permissions check */
+ return; /* woops, concurrently dropped; no permissions
+ * check */
/* Currently, we only allow plain tables to be locked */
if (relkind != RELKIND_RELATION)
@@ -122,9 +123,10 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
if (aclresult != ACLCHECK_OK)
{
char *relname = get_rel_name(childreloid);
+
if (!relname)
- continue; /* child concurrently dropped, just skip it */
- aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
+ continue; /* child concurrently dropped, just skip it */
+ aclcheck_error(aclresult, ACL_KIND_CLASS, relname);
}
/* We have enough rights to lock the relation; do so. */
@@ -134,17 +136,18 @@ LockTableRecurse(Oid reloid, LOCKMODE lockmode, bool nowait)
{
/* try to throw error by name; relation could be deleted... */
char *relname = get_rel_name(childreloid);
+
if (!relname)
- continue; /* child concurrently dropped, just skip it */
+ continue; /* child concurrently dropped, just skip it */
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on relation \"%s\"",
- relname)));
+ relname)));
}
/*
- * Even if we got the lock, child might have been concurrently dropped.
- * If so, we can skip it.
+ * Even if we got the lock, child might have been concurrently
+ * dropped. If so, we can skip it.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(childreloid)))
{
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 87c889604e..460b1d9ae2 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -1167,7 +1167,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (procform->prorettype != INT4OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree comparison procedures must return integer")));
+ errmsg("btree comparison procedures must return integer")));
/*
* If lefttype/righttype isn't specified, use the proc's input
@@ -1188,7 +1188,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
if (procform->prorettype != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree sort support procedures must return void")));
+ errmsg("btree sort support procedures must return void")));
/*
* Can't infer lefttype/righttype from proc, so use default rule
@@ -1217,7 +1217,7 @@ assignProcTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
/*
* The default in CREATE OPERATOR CLASS is to use the class' opcintype as
- * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
+ * lefttype and righttype. In CREATE or ALTER OPERATOR FAMILY, opcintype
* isn't available, so make the user specify the types.
*/
if (!OidIsValid(member->lefttype))
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index edd646e7c3..2d87b1c690 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -174,7 +174,7 @@ PrepareQuery(PrepareStmt *stmt, const char *queryString)
* ExecuteQuery --- implement the 'EXECUTE' utility statement.
*
* This code also supports CREATE TABLE ... AS EXECUTE. That case is
- * indicated by passing a non-null intoClause. The DestReceiver is already
+ * indicated by passing a non-null intoClause. The DestReceiver is already
* set up correctly for CREATE TABLE AS, but we still have to make a few
* other adjustments here.
*
@@ -211,7 +211,7 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
@@ -237,15 +237,15 @@ ExecuteQuery(ExecuteStmt *stmt, IntoClause *intoClause,
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
* statement is one that produces tuples. Currently we insist that it be
- * a plain old SELECT. In future we might consider supporting other
+ * a plain old SELECT. In future we might consider supporting other
* things such as INSERT ... RETURNING, but there are a couple of issues
* to be settled first, notably how WITH NO DATA should be handled in such
* a case (do we really want to suppress execution?) and how to pass down
* the OID-determining eflags (PortalStart won't handle them in such a
* case, and for that matter it's not clear the executor will either).
*
- * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the
- * proper eflags and fetch count are passed to PortalStart/PortalRun.
+ * For CREATE TABLE ... AS EXECUTE, we also have to ensure that the proper
+ * eflags and fetch count are passed to PortalStart/PortalRun.
*/
if (intoClause)
{
@@ -658,7 +658,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
{
/*
* Need an EState to evaluate parameters; must not delete it till end
- * of query, in case parameters are pass-by-reference. Note that the
+ * of query, in case parameters are pass-by-reference. Note that the
* passed-in "params" could possibly be referenced in the parameter
* expressions.
*/
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 5d2e7dc195..354389c617 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -133,7 +133,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
- false, /* isLeakProof */
+ false, /* isLeakProof */
false, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 0),
@@ -210,7 +210,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
- false, /* isLeakProof */
+ false, /* isLeakProof */
true, /* isStrict */
PROVOLATILE_VOLATILE,
buildoidvector(funcargtypes, 1),
diff --git a/src/backend/commands/seclabel.c b/src/backend/commands/seclabel.c
index 2129f62e51..c09a96e9f6 100644
--- a/src/backend/commands/seclabel.c
+++ b/src/backend/commands/seclabel.c
@@ -237,7 +237,7 @@ GetSecurityLabel(const ObjectAddress *object, const char *provider)
return seclabel;
}
-/*
+/*
* SetSharedSecurityLabel is a helper function of SetSecurityLabel to
* handle shared database objects.
*/
@@ -246,8 +246,8 @@ SetSharedSecurityLabel(const ObjectAddress *object,
const char *provider, const char *label)
{
Relation pg_shseclabel;
- ScanKeyData keys[4];
- SysScanDesc scan;
+ ScanKeyData keys[4];
+ SysScanDesc scan;
HeapTuple oldtup;
HeapTuple newtup = NULL;
Datum values[Natts_pg_shseclabel];
@@ -414,8 +414,8 @@ void
DeleteSharedSecurityLabel(Oid objectId, Oid classId)
{
Relation pg_shseclabel;
- ScanKeyData skey[2];
- SysScanDesc scan;
+ ScanKeyData skey[2];
+ SysScanDesc scan;
HeapTuple oldtup;
ScanKeyInit(&skey[0],
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 718658995e..34b74f6c38 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -430,7 +430,7 @@ AlterSequence(AlterSeqStmt *stmt)
{
ereport(NOTICE,
(errmsg("relation \"%s\" does not exist, skipping",
- stmt->sequence->relname)));
+ stmt->sequence->relname)));
return;
}
@@ -514,12 +514,12 @@ nextval(PG_FUNCTION_ARGS)
sequence = makeRangeVarFromNameList(textToQualifiedNameList(seqin));
/*
- * XXX: This is not safe in the presence of concurrent DDL, but
- * acquiring a lock here is more expensive than letting nextval_internal
- * do it, since the latter maintains a cache that keeps us from hitting
- * the lock manager more than once per transaction. It's not clear
- * whether the performance penalty is material in practice, but for now,
- * we do it this way.
+ * XXX: This is not safe in the presence of concurrent DDL, but acquiring
+ * a lock here is more expensive than letting nextval_internal do it,
+ * since the latter maintains a cache that keeps us from hitting the lock
+ * manager more than once per transaction. It's not clear whether the
+ * performance penalty is material in practice, but for now, we do it this
+ * way.
*/
relid = RangeVarGetRelid(sequence, NoLock, false);
@@ -1543,9 +1543,9 @@ seq_redo(XLogRecPtr lsn, XLogRecord *record)
* is also used for updating sequences, it's possible that a hot-standby
* backend is examining the page concurrently; so we mustn't transiently
* trash the buffer. The solution is to build the correct new page
- * contents in local workspace and then memcpy into the buffer. Then
- * only bytes that are supposed to change will change, even transiently.
- * We must palloc the local page for alignment reasons.
+ * contents in local workspace and then memcpy into the buffer. Then only
+ * bytes that are supposed to change will change, even transiently. We
+ * must palloc the local page for alignment reasons.
*/
localpage = (Page) palloc(BufferGetPageSize(buffer));
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 6148bd62da..5c69cfb85a 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -237,9 +237,9 @@ static const struct dropmsgstrings dropmsgstringarray[] = {
struct DropRelationCallbackState
{
- char relkind;
- Oid heapOid;
- bool concurrent;
+ char relkind;
+ Oid heapOid;
+ bool concurrent;
};
/* Alter table target-type flags for ATSimplePermissions */
@@ -372,8 +372,8 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
char *tablespacename, LOCKMODE lockmode);
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode);
static void ATExecSetRelOptions(Relation rel, List *defList,
- AlterTableType operation,
- LOCKMODE lockmode);
+ AlterTableType operation,
+ LOCKMODE lockmode);
static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
char fires_when, bool skip_system, LOCKMODE lockmode);
static void ATExecEnableDisableRule(Relation rel, char *rulename,
@@ -752,7 +752,7 @@ RemoveRelations(DropStmt *drop)
if (drop->behavior == DROP_CASCADE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DROP INDEX CONCURRENTLY does not support CASCADE")));
+ errmsg("DROP INDEX CONCURRENTLY does not support CASCADE")));
}
/*
@@ -799,7 +799,7 @@ RemoveRelations(DropStmt *drop)
RangeVar *rel = makeRangeVarFromNameList((List *) lfirst(cell));
Oid relOid;
ObjectAddress obj;
- struct DropRelationCallbackState state;
+ struct DropRelationCallbackState state;
/*
* These next few steps are a great deal like relation_openrv, but we
@@ -914,9 +914,9 @@ RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid, Oid oldRelOid,
/*
* In DROP INDEX, attempt to acquire lock on the parent table before
* locking the index. index_drop() will need this anyway, and since
- * regular queries lock tables before their indexes, we risk deadlock
- * if we do it the other way around. No error if we don't find a
- * pg_index entry, though --- the relation may have been droppd.
+ * regular queries lock tables before their indexes, we risk deadlock if
+ * we do it the other way around. No error if we don't find a pg_index
+ * entry, though --- the relation may have been droppd.
*/
if (relkind == RELKIND_INDEX && relOid != oldRelOid)
{
@@ -2322,12 +2322,12 @@ static void
RangeVarCallbackForRenameAttribute(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
- HeapTuple tuple;
- Form_pg_class form;
+ HeapTuple tuple;
+ Form_pg_class form;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
- return; /* concurrently dropped */
+ return; /* concurrently dropped */
form = (Form_pg_class) GETSTRUCT(tuple);
renameatt_check(relid, form, false);
ReleaseSysCache(tuple);
@@ -2351,7 +2351,7 @@ renameatt(RenameStmt *stmt)
{
ereport(NOTICE,
(errmsg("relation \"%s\" does not exist, skipping",
- stmt->relation->relname)));
+ stmt->relation->relname)));
return;
}
@@ -2379,7 +2379,7 @@ rename_constraint_internal(Oid myrelid,
{
Relation targetrelation = NULL;
Oid constraintOid;
- HeapTuple tuple;
+ HeapTuple tuple;
Form_pg_constraint con;
AssertArg(!myrelid || !mytypid);
@@ -2391,7 +2391,11 @@ rename_constraint_internal(Oid myrelid,
else
{
targetrelation = relation_open(myrelid, AccessExclusiveLock);
- /* don't tell it whether we're recursing; we allow changing typed tables here */
+
+ /*
+ * don't tell it whether we're recursing; we allow changing typed
+ * tables here
+ */
renameatt_check(myrelid, RelationGetForm(targetrelation), false);
constraintOid = get_relation_constraint_oid(myrelid, oldconname, false);
@@ -2408,9 +2412,9 @@ rename_constraint_internal(Oid myrelid,
if (recurse)
{
List *child_oids,
- *child_numparents;
+ *child_numparents;
ListCell *lo,
- *li;
+ *li;
child_oids = find_all_inheritors(myrelid, AccessExclusiveLock,
&child_numparents);
@@ -2455,7 +2459,7 @@ rename_constraint_internal(Oid myrelid,
ReleaseSysCache(tuple);
if (targetrelation)
- relation_close(targetrelation, NoLock); /* close rel but keep lock */
+ relation_close(targetrelation, NoLock); /* close rel but keep lock */
}
void
@@ -2469,7 +2473,7 @@ RenameConstraint(RenameStmt *stmt)
Relation rel;
HeapTuple tup;
- typid = typenameTypeId(NULL, makeTypeNameFromNameList(stmt->object));
+ typid = typenameTypeId(NULL, makeTypeNameFromNameList(stmt->object));
rel = heap_open(TypeRelationId, RowExclusiveLock);
tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
if (!HeapTupleIsValid(tup))
@@ -2490,9 +2494,9 @@ RenameConstraint(RenameStmt *stmt)
rename_constraint_internal(relid, typid,
stmt->subname,
stmt->newname,
- stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
+ stmt->relation ? interpretInhOption(stmt->relation->inhOpt) : false, /* recursive? */
false, /* recursing? */
- 0 /* expected inhcount */);
+ 0 /* expected inhcount */ );
}
/*
@@ -2507,8 +2511,8 @@ RenameRelation(RenameStmt *stmt)
* Grab an exclusive lock on the target table, index, sequence or view,
* which we will NOT release until end of transaction.
*
- * Lock level used here should match RenameRelationInternal, to avoid
- * lock escalation.
+ * Lock level used here should match RenameRelationInternal, to avoid lock
+ * escalation.
*/
relid = RangeVarGetRelidExtended(stmt->relation, AccessExclusiveLock,
stmt->missing_ok, false,
@@ -2519,7 +2523,7 @@ RenameRelation(RenameStmt *stmt)
{
ereport(NOTICE,
(errmsg("relation \"%s\" does not exist, skipping",
- stmt->relation->relname)));
+ stmt->relation->relname)));
return;
}
@@ -2702,11 +2706,11 @@ AlterTableLookupRelation(AlterTableStmt *stmt, LOCKMODE lockmode)
* Thanks to the magic of MVCC, an error anywhere along the way rolls back
* the whole operation; we don't have to do anything special to clean up.
*
- * The caller must lock the relation, with an appropriate lock level
+ * The caller must lock the relation, with an appropriate lock level
* for the subcommands requested. Any subcommand that needs to rewrite
* tuples in the table forces the whole command to be executed with
* AccessExclusiveLock (actually, that is currently required always, but
- * we hope to relax it at some point). We pass the lock level down
+ * we hope to relax it at some point). We pass the lock level down
* so that we can apply it recursively to inherited tables. Note that the
* lock level we want as we recurse might well be higher than required for
* that specific subcommand. So we pass down the overall lock requirement,
@@ -2773,22 +2777,22 @@ LOCKMODE
AlterTableGetLockLevel(List *cmds)
{
/*
- * Late in 9.1 dev cycle a number of issues were uncovered with access
- * to catalog relations, leading to the decision to re-enforce all DDL
- * at AccessExclusiveLock level by default.
+ * Late in 9.1 dev cycle a number of issues were uncovered with access to
+ * catalog relations, leading to the decision to re-enforce all DDL at
+ * AccessExclusiveLock level by default.
*
* The issues are that there is a pervasive assumption in the code that
- * the catalogs will not be read unless an AccessExclusiveLock is held.
- * If that rule is relaxed, we must protect against a number of potential
+ * the catalogs will not be read unless an AccessExclusiveLock is held. If
+ * that rule is relaxed, we must protect against a number of potential
* effects - infrequent, but proven possible with test cases where
* multiple DDL operations occur in a stream against frequently accessed
* tables.
*
- * 1. Catalog tables are read using SnapshotNow, which has a race bug
- * that allows a scan to return no valid rows even when one is present
- * in the case of a commit of a concurrent update of the catalog table.
- * SnapshotNow also ignores transactions in progress, so takes the
- * latest committed version without waiting for the latest changes.
+ * 1. Catalog tables are read using SnapshotNow, which has a race bug that
+ * allows a scan to return no valid rows even when one is present in the
+ * case of a commit of a concurrent update of the catalog table.
+ * SnapshotNow also ignores transactions in progress, so takes the latest
+ * committed version without waiting for the latest changes.
*
* 2. Relcache needs to be internally consistent, so unless we lock the
* definition during reads we have no way to guarantee that.
@@ -3156,8 +3160,8 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
pass = AT_PASS_MISC; /* doesn't actually matter */
break;
case AT_SetRelOptions: /* SET (...) */
- case AT_ResetRelOptions: /* RESET (...) */
- case AT_ReplaceRelOptions: /* reset them all, then set just these */
+ case AT_ResetRelOptions: /* RESET (...) */
+ case AT_ReplaceRelOptions: /* reset them all, then set just these */
ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX | ATT_VIEW);
/* This command never recurses */
/* No command-specific prep needed */
@@ -3344,8 +3348,8 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
case AT_ValidateConstraint: /* VALIDATE CONSTRAINT */
ATExecValidateConstraint(rel, cmd->name, false, false, lockmode);
break;
- case AT_ValidateConstraintRecurse: /* VALIDATE CONSTRAINT with
- * recursion */
+ case AT_ValidateConstraintRecurse: /* VALIDATE CONSTRAINT with
+ * recursion */
ATExecValidateConstraint(rel, cmd->name, true, false, lockmode);
break;
case AT_DropConstraint: /* DROP CONSTRAINT */
@@ -3361,7 +3365,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATExecAlterColumnType(tab, rel, cmd, lockmode);
break;
- case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */
+ case AT_AlterColumnGenericOptions: /* ALTER COLUMN OPTIONS */
ATExecAlterColumnGenericOptions(rel, cmd->name, (List *) cmd->def, lockmode);
break;
case AT_ChangeOwner: /* ALTER OWNER */
@@ -4725,7 +4729,7 @@ static void
check_for_column_name_collision(Relation rel, const char *colname)
{
HeapTuple attTuple;
- int attnum;
+ int attnum;
/*
* this test is deliberately not attisdropped-aware, since if one tries to
@@ -4737,7 +4741,7 @@ check_for_column_name_collision(Relation rel, const char *colname)
if (!HeapTupleIsValid(attTuple))
return;
- attnum = ((Form_pg_attribute) GETSTRUCT(attTuple))->attnum;
+ attnum = ((Form_pg_attribute) GETSTRUCT(attTuple))->attnum;
ReleaseSysCache(attTuple);
/*
@@ -4745,16 +4749,16 @@ check_for_column_name_collision(Relation rel, const char *colname)
* names, since they are normally not shown and the user might otherwise
* be confused about the reason for the conflict.
*/
- if (attnum <= 0)
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column name \"%s\" conflicts with a system column name",
- colname)));
- else
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" already exists",
- colname, RelationGetRelationName(rel))));
+ if (attnum <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_DUPLICATE_COLUMN),
+ errmsg("column name \"%s\" conflicts with a system column name",
+ colname)));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_DUPLICATE_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" already exists",
+ colname, RelationGetRelationName(rel))));
}
/*
@@ -4999,8 +5003,8 @@ ATExecColumnDefault(Relation rel, const char *colName,
* safety, but at present we do not expect anything to depend on the
* default.
*
- * We treat removing the existing default as an internal operation when
- * it is preparatory to adding a new default, but as a user-initiated
+ * We treat removing the existing default as an internal operation when it
+ * is preparatory to adding a new default, but as a user-initiated
* operation when the user asked for a drop.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, false,
@@ -5507,13 +5511,14 @@ ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
/*
* If TryReuseIndex() stashed a relfilenode for us, we used it for the new
- * index instead of building from scratch. The DROP of the old edition of
+ * index instead of building from scratch. The DROP of the old edition of
* this index will have scheduled the storage for deletion at commit, so
* cancel that pending deletion.
*/
if (OidIsValid(stmt->oldNode))
{
Relation irel = index_open(new_index, NoLock);
+
RelationPreserveStorage(irel->rd_node, true);
index_close(irel, NoLock);
}
@@ -5687,8 +5692,8 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
*/
newcons = AddRelationNewConstraints(rel, NIL,
list_make1(copyObject(constr)),
- recursing, /* allow_merge */
- !recursing); /* is_local */
+ recursing, /* allow_merge */
+ !recursing); /* is_local */
/* Add each to-be-validated constraint to Phase 3's queue */
foreach(lcon, newcons)
@@ -5743,7 +5748,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
/*
* Check if ONLY was specified with ALTER TABLE. If so, allow the
- * contraint creation only if there are no children currently. Error out
+ * contraint creation only if there are no children currently. Error out
* otherwise.
*/
if (!recurse && children != NIL)
@@ -6064,11 +6069,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Upon a change to the cast from the FK column to its pfeqop
- * operand, revalidate the constraint. For this evaluation, a
+ * operand, revalidate the constraint. For this evaluation, a
* binary coercion cast is equivalent to no cast at all. While
* type implementors should design implicit casts with an eye
- * toward consistency of operations like equality, we cannot assume
- * here that they have done so.
+ * toward consistency of operations like equality, we cannot
+ * assume here that they have done so.
*
* A function with a polymorphic argument could change behavior
* arbitrarily in response to get_fn_expr_argtype(). Therefore,
@@ -6082,7 +6087,7 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* Necessarily, the primary key column must then be of the domain
* type. Since the constraint was previously valid, all values on
* the foreign side necessarily exist on the primary side and in
- * turn conform to the domain. Consequently, we need not treat
+ * turn conform to the domain. Consequently, we need not treat
* domains specially here.
*
* Since we require that all collations share the same notion of
@@ -6091,8 +6096,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
*
* We need not directly consider the PK type. It's necessarily
* binary coercible to the opcintype of the unique index column,
- * and ri_triggers.c will only deal with PK datums in terms of that
- * opcintype. Changing the opcintype also changes pfeqop.
+ * and ri_triggers.c will only deal with PK datums in terms of
+ * that opcintype. Changing the opcintype also changes pfeqop.
*/
old_check_ok = (new_pathtype == old_pathtype &&
new_castfunc == old_castfunc &&
@@ -6144,11 +6149,11 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
createForeignKeyTriggers(rel, fkconstraint, constrOid, indexOid);
/*
- * Tell Phase 3 to check that the constraint is satisfied by existing rows.
- * We can skip this during table creation, when requested explicitly by
- * specifying NOT VALID in an ADD FOREIGN KEY command, and when we're
- * recreating a constraint following a SET DATA TYPE operation that did not
- * impugn its validity.
+ * Tell Phase 3 to check that the constraint is satisfied by existing
+ * rows. We can skip this during table creation, when requested explicitly
+ * by specifying NOT VALID in an ADD FOREIGN KEY command, and when we're
+ * recreating a constraint following a SET DATA TYPE operation that did
+ * not impugn its validity.
*/
if (!old_check_ok && !fkconstraint->skip_validation)
{
@@ -6236,12 +6241,12 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
Relation refrel;
/*
- * Triggers are already in place on both tables, so a concurrent write
- * that alters the result here is not possible. Normally we can run a
- * query here to do the validation, which would only require
- * AccessShareLock. In some cases, it is possible that we might need
- * to fire triggers to perform the check, so we take a lock at
- * RowShareLock level just in case.
+ * Triggers are already in place on both tables, so a concurrent
+ * write that alters the result here is not possible. Normally we
+ * can run a query here to do the validation, which would only
+ * require AccessShareLock. In some cases, it is possible that we
+ * might need to fire triggers to perform the check, so we take a
+ * lock at RowShareLock level just in case.
*/
refrel = heap_open(con->confrelid, RowShareLock);
@@ -6278,7 +6283,7 @@ ATExecValidateConstraint(Relation rel, char *constrName, bool recurse,
*/
foreach(child, children)
{
- Oid childoid = lfirst_oid(child);
+ Oid childoid = lfirst_oid(child);
Relation childrel;
if (childoid == RelationGetRelid(rel))
@@ -6662,27 +6667,28 @@ checkFkeyPermissions(Relation rel, int16 *attnums, int natts)
static void
validateCheckConstraint(Relation rel, HeapTuple constrtup)
{
- EState *estate;
- Datum val;
- char *conbin;
- Expr *origexpr;
- List *exprstate;
- TupleDesc tupdesc;
- HeapScanDesc scan;
- HeapTuple tuple;
- ExprContext *econtext;
- MemoryContext oldcxt;
+ EState *estate;
+ Datum val;
+ char *conbin;
+ Expr *origexpr;
+ List *exprstate;
+ TupleDesc tupdesc;
+ HeapScanDesc scan;
+ HeapTuple tuple;
+ ExprContext *econtext;
+ MemoryContext oldcxt;
TupleTableSlot *slot;
Form_pg_constraint constrForm;
- bool isnull;
+ bool isnull;
constrForm = (Form_pg_constraint) GETSTRUCT(constrtup);
estate = CreateExecutorState();
+
/*
* XXX this tuple doesn't really come from a syscache, but this doesn't
- * matter to SysCacheGetAttr, because it only wants to be able to fetch the
- * tupdesc
+ * matter to SysCacheGetAttr, because it only wants to be able to fetch
+ * the tupdesc
*/
val = SysCacheGetAttr(CONSTROID, constrtup, Anum_pg_constraint_conbin,
&isnull);
@@ -7132,7 +7138,7 @@ ATExecDropConstraint(Relation rel, const char *constrName,
con = (Form_pg_constraint) GETSTRUCT(copy_tuple);
- if (con->coninhcount <= 0) /* shouldn't happen */
+ if (con->coninhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited constraint \"%s\"",
childrelid, constrName);
@@ -7140,8 +7146,7 @@ ATExecDropConstraint(Relation rel, const char *constrName,
{
/*
* If the child constraint has other definition sources, just
- * decrement its inheritance count; if not, recurse to delete
- * it.
+ * decrement its inheritance count; if not, recurse to delete it.
*/
if (con->coninhcount == 1 && !con->conislocal)
{
@@ -7164,9 +7169,9 @@ ATExecDropConstraint(Relation rel, const char *constrName,
else
{
/*
- * If we were told to drop ONLY in this table (no recursion),
- * we need to mark the inheritors' constraints as locally
- * defined rather than inherited.
+ * If we were told to drop ONLY in this table (no recursion), we
+ * need to mark the inheritors' constraints as locally defined
+ * rather than inherited.
*/
con->coninhcount--;
con->conislocal = true;
@@ -7315,8 +7320,8 @@ ATPrepAlterColumnType(List **wqueue,
if (transform == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" cannot be cast automatically to type %s",
- colName, format_type_be(targettype)),
+ errmsg("column \"%s\" cannot be cast automatically to type %s",
+ colName, format_type_be(targettype)),
errhint("Specify a USING expression to perform the conversion.")));
/* Fix collations after all else */
@@ -7483,8 +7488,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
if (defaultexpr == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("default for column \"%s\" cannot be cast automatically to type %s",
- colName, format_type_be(targettype))));
+ errmsg("default for column \"%s\" cannot be cast automatically to type %s",
+ colName, format_type_be(targettype))));
}
else
defaultexpr = NULL;
@@ -8060,7 +8065,8 @@ TryReuseIndex(Oid oldId, IndexStmt *stmt)
stmt->indexParams,
stmt->excludeOpNames))
{
- Relation irel = index_open(oldId, NoLock);
+ Relation irel = index_open(oldId, NoLock);
+
stmt->oldNode = irel->rd_node.relNode;
index_close(irel, NoLock);
}
@@ -8085,7 +8091,7 @@ TryReuseForeignKey(Oid oldId, Constraint *con)
int i;
Assert(con->contype == CONSTR_FOREIGN);
- Assert(con->old_conpfeqop == NIL); /* already prepared this node */
+ Assert(con->old_conpfeqop == NIL); /* already prepared this node */
tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId));
if (!HeapTupleIsValid(tup)) /* should not happen */
@@ -8587,8 +8593,8 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
/* Generate new proposed reloptions (text array) */
newOptions = transformRelOptions(isnull ? (Datum) 0 : datum,
- defList, NULL, validnsps, false,
- operation == AT_ResetRelOptions);
+ defList, NULL, validnsps, false,
+ operation == AT_ResetRelOptions);
/* Validate */
switch (rel->rd_rel->relkind)
@@ -8665,8 +8671,8 @@ ATExecSetRelOptions(Relation rel, List *defList, AlterTableType operation,
}
newOptions = transformRelOptions(isnull ? (Datum) 0 : datum,
- defList, "toast", validnsps, false,
- operation == AT_ResetRelOptions);
+ defList, "toast", validnsps, false,
+ operation == AT_ResetRelOptions);
(void) heap_reloptions(RELKIND_TOASTVALUE, newOptions, true);
@@ -9831,7 +9837,7 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt)
{
ereport(NOTICE,
(errmsg("relation \"%s\" does not exist, skipping",
- stmt->relation->relname)));
+ stmt->relation->relname)));
return;
}
@@ -9848,10 +9854,10 @@ AlterTableNamespace(AlterObjectSchemaStmt *stmt)
if (sequenceIsOwned(relid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move an owned sequence into another schema"),
- errdetail("Sequence \"%s\" is linked to table \"%s\".",
- RelationGetRelationName(rel),
- get_rel_name(tableId))));
+ errmsg("cannot move an owned sequence into another schema"),
+ errdetail("Sequence \"%s\" is linked to table \"%s\".",
+ RelationGetRelationName(rel),
+ get_rel_name(tableId))));
}
/* Get and lock schema OID and check its permissions. */
@@ -10267,9 +10273,9 @@ RangeVarCallbackOwnsTable(const RangeVar *relation,
return;
/*
- * If the relation does exist, check whether it's an index. But note
- * that the relation might have been dropped between the time we did the
- * name lookup and now. In that case, there's nothing to do.
+ * If the relation does exist, check whether it's an index. But note that
+ * the relation might have been dropped between the time we did the name
+ * lookup and now. In that case, there's nothing to do.
*/
relkind = get_rel_relkind(relId);
if (!relkind)
@@ -10292,16 +10298,16 @@ static void
RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
- Node *stmt = (Node *) arg;
- ObjectType reltype;
- HeapTuple tuple;
- Form_pg_class classform;
- AclResult aclresult;
- char relkind;
+ Node *stmt = (Node *) arg;
+ ObjectType reltype;
+ HeapTuple tuple;
+ Form_pg_class classform;
+ AclResult aclresult;
+ char relkind;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
- return; /* concurrently dropped */
+ return; /* concurrently dropped */
classform = (Form_pg_class) GETSTRUCT(tuple);
relkind = classform->relkind;
@@ -10324,7 +10330,7 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
*/
if (IsA(stmt, RenameStmt))
{
- aclresult = pg_namespace_aclcheck(classform->relnamespace,
+ aclresult = pg_namespace_aclcheck(classform->relnamespace,
GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
@@ -10333,20 +10339,21 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
}
else if (IsA(stmt, AlterObjectSchemaStmt))
reltype = ((AlterObjectSchemaStmt *) stmt)->objectType;
+
else if (IsA(stmt, AlterTableStmt))
reltype = ((AlterTableStmt *) stmt)->relkind;
else
{
- reltype = OBJECT_TABLE; /* placate compiler */
+ reltype = OBJECT_TABLE; /* placate compiler */
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(stmt));
}
/*
- * For compatibility with prior releases, we allow ALTER TABLE to be
- * used with most other types of relations (but not composite types).
- * We allow similar flexibility for ALTER INDEX in the case of RENAME,
- * but not otherwise. Otherwise, the user must select the correct form
- * of the command for the relation at issue.
+ * For compatibility with prior releases, we allow ALTER TABLE to be used
+ * with most other types of relations (but not composite types). We allow
+ * similar flexibility for ALTER INDEX in the case of RENAME, but not
+ * otherwise. Otherwise, the user must select the correct form of the
+ * command for the relation at issue.
*/
if (reltype == OBJECT_SEQUENCE && relkind != RELKIND_SEQUENCE)
ereport(ERROR,
@@ -10391,10 +10398,10 @@ RangeVarCallbackForAlterRelation(const RangeVar *rv, Oid relid, Oid oldrelid,
errhint("Use ALTER FOREIGN TABLE instead.")));
/*
- * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be
- * moved to a different schema, such as indexes and TOAST tables.
+ * Don't allow ALTER TABLE .. SET SCHEMA on relations that can't be moved
+ * to a different schema, such as indexes and TOAST tables.
*/
- if (IsA(stmt, AlterObjectSchemaStmt) && relkind != RELKIND_RELATION
+ if (IsA(stmt, AlterObjectSchemaStmt) &&relkind != RELKIND_RELATION
&& relkind != RELKIND_VIEW && relkind != RELKIND_SEQUENCE
&& relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index 708bebb54d..da9cb2f30e 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -437,7 +437,8 @@ DropTableSpace(DropTableSpaceStmt *stmt)
/* DROP hook for the tablespace being removed */
if (object_access_hook)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
+
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP, TableSpaceRelationId,
tablespaceoid, 0, &drop_arg);
@@ -638,7 +639,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
* Attempt to remove filesystem infrastructure for the tablespace.
*
* 'redo' indicates we are redoing a drop from XLOG; in that case we should
- * not throw an ERROR for problems, just LOG them. The worst consequence of
+ * not throw an ERROR for problems, just LOG them. The worst consequence of
* not removing files here would be failure to release some disk space, which
* does not justify throwing an error that would require manual intervention
* to get the database running again.
@@ -678,7 +679,7 @@ destroy_tablespace_directories(Oid tablespaceoid, bool redo)
* with a warning. This is because even though ProcessUtility disallows
* DROP TABLESPACE in a transaction block, it's possible that a previous
* DROP failed and rolled back after removing the tablespace directories
- * and/or symlink. We want to allow a new DROP attempt to succeed at
+ * and/or symlink. We want to allow a new DROP attempt to succeed at
* removing the catalog entries (and symlink if still present), so we
* should not give a hard error here.
*/
@@ -1199,14 +1200,14 @@ check_temp_tablespaces(char **newval, void **extra, GucSource source)
}
/*
- * In an interactive SET command, we ereport for bad info. When
+ * In an interactive SET command, we ereport for bad info. When
* source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
+ * DATABASE SET or ALTER USER SET command. pg_dumpall dumps all
* roles before tablespaces, so if we're restoring a pg_dumpall
* script the tablespace might not yet exist, but will be created
- * later. Because of that, issue a NOTICE if source == PGC_S_TEST,
- * but accept the value anyway. Otherwise, silently ignore any
- * bad list elements.
+ * later. Because of that, issue a NOTICE if source ==
+ * PGC_S_TEST, but accept the value anyway. Otherwise, silently
+ * ignore any bad list elements.
*/
curoid = get_tablespace_oid(curname, source <= PGC_S_TEST);
if (curoid == InvalidOid)
@@ -1493,10 +1494,10 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
* files then do conflict processing and try again, if currently
* enabled.
*
- * Other possible reasons for failure include bollixed file permissions
- * on a standby server when they were okay on the primary, etc etc.
- * There's not much we can do about that, so just remove what we can
- * and press on.
+ * Other possible reasons for failure include bollixed file
+ * permissions on a standby server when they were okay on the primary,
+ * etc etc. There's not much we can do about that, so just remove what
+ * we can and press on.
*/
if (!destroy_tablespace_directories(xlrec->ts_id, true))
{
@@ -1513,8 +1514,8 @@ tblspc_redo(XLogRecPtr lsn, XLogRecord *record)
if (!destroy_tablespace_directories(xlrec->ts_id, true))
ereport(LOG,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("directories for tablespace %u could not be removed",
- xlrec->ts_id),
+ errmsg("directories for tablespace %u could not be removed",
+ xlrec->ts_id),
errhint("You can remove the directories manually if necessary.")));
}
}
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 1218d033d1..4399a27446 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -199,8 +199,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
/*
* We must take a lock on the target relation to protect against
* concurrent drop. It's not clear that AccessShareLock is strong
- * enough, but we certainly need at least that much... otherwise,
- * we might end up creating a pg_constraint entry referencing a
+ * enough, but we certainly need at least that much... otherwise, we
+ * might end up creating a pg_constraint entry referencing a
* nonexistent table.
*/
constrrelid = RangeVarGetRelid(stmt->constrrel, AccessShareLock, false);
@@ -494,8 +494,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
- * NOTE that this is cool only because we have AccessExclusiveLock on
- * the relation, so the trigger set won't be changing underneath us.
+ * NOTE that this is cool only because we have AccessExclusiveLock on the
+ * relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{
@@ -1168,27 +1168,27 @@ static void
RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid relid, Oid oldrelid,
void *arg)
{
- HeapTuple tuple;
- Form_pg_class form;
+ HeapTuple tuple;
+ Form_pg_class form;
tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid));
if (!HeapTupleIsValid(tuple))
- return; /* concurrently dropped */
+ return; /* concurrently dropped */
form = (Form_pg_class) GETSTRUCT(tuple);
/* only tables and views can have triggers */
- if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW)
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table or view", rv->relname)));
+ if (form->relkind != RELKIND_RELATION && form->relkind != RELKIND_VIEW)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not a table or view", rv->relname)));
/* you must own the table to rename one of its triggers */
- if (!pg_class_ownercheck(relid, GetUserId()))
- aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
- if (!allowSystemTableMods && IsSystemClass(form))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied: \"%s\" is a system catalog",
+ if (!pg_class_ownercheck(relid, GetUserId()))
+ aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS, rv->relname);
+ if (!allowSystemTableMods && IsSystemClass(form))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied: \"%s\" is a system catalog",
rv->relname)));
ReleaseSysCache(tuple);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 77559842e5..fdb5bdbc11 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -609,7 +609,7 @@ DefineType(List *names, List *parameters)
F_ARRAY_SEND, /* send procedure */
typmodinOid, /* typmodin procedure */
typmodoutOid, /* typmodout procedure */
- F_ARRAY_TYPANALYZE, /* analyze procedure */
+ F_ARRAY_TYPANALYZE, /* analyze procedure */
typoid, /* element type ID */
true, /* yes this is an array type */
InvalidOid, /* no further array type */
@@ -1140,7 +1140,7 @@ DefineEnum(CreateEnumStmt *stmt)
F_ARRAY_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
- F_ARRAY_TYPANALYZE, /* analyze procedure */
+ F_ARRAY_TYPANALYZE, /* analyze procedure */
enumTypeOid, /* element type ID */
true, /* yes this is an array type */
InvalidOid, /* no further array type */
@@ -1450,7 +1450,7 @@ DefineRange(CreateRangeStmt *stmt)
F_ARRAY_SEND, /* send procedure */
InvalidOid, /* typmodin procedure - none */
InvalidOid, /* typmodout procedure - none */
- F_ARRAY_TYPANALYZE, /* analyze procedure */
+ F_ARRAY_TYPANALYZE, /* analyze procedure */
typoid, /* element type ID */
true, /* yes this is an array type */
InvalidOid, /* no further array type */
@@ -1477,15 +1477,15 @@ DefineRange(CreateRangeStmt *stmt)
* impossible to define a polymorphic constructor; we have to generate new
* constructor functions explicitly for each range type.
*
- * We actually define 4 functions, with 0 through 3 arguments. This is just
+ * We actually define 4 functions, with 0 through 3 arguments. This is just
* to offer more convenience for the user.
*/
static void
makeRangeConstructors(const char *name, Oid namespace,
Oid rangeOid, Oid subtype)
{
- static const char * const prosrc[2] = {"range_constructor2",
- "range_constructor3"};
+ static const char *const prosrc[2] = {"range_constructor2",
+ "range_constructor3"};
static const int pronargs[2] = {2, 3};
Oid constructorArgTypes[3];
@@ -1509,7 +1509,7 @@ makeRangeConstructors(const char *name, Oid namespace,
constructorArgTypesVector = buildoidvector(constructorArgTypes,
pronargs[i]);
- procOid = ProcedureCreate(name, /* name: same as range type */
+ procOid = ProcedureCreate(name, /* name: same as range type */
namespace, /* namespace */
false, /* replace */
false, /* returns set */
@@ -1518,7 +1518,7 @@ makeRangeConstructors(const char *name, Oid namespace,
INTERNALlanguageId, /* language */
F_FMGR_INTERNAL_VALIDATOR, /* language validator */
prosrc[i], /* prosrc */
- NULL, /* probin */
+ NULL, /* probin */
false, /* isAgg */
false, /* isWindowFunc */
false, /* security_definer */
@@ -1834,9 +1834,9 @@ findRangeSubOpclass(List *opcname, Oid subtype)
if (!IsBinaryCoercible(subtype, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opcname),
- format_type_be(subtype))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opcname),
+ format_type_be(subtype))));
}
else
{
@@ -2335,8 +2335,8 @@ AlterDomainDropConstraint(List *names, const char *constrName,
if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" of domain \"%s\" does not exist",
- constrName, TypeNameToString(typename))));
+ errmsg("constraint \"%s\" of domain \"%s\" does not exist",
+ constrName, TypeNameToString(typename))));
else
ereport(NOTICE,
(errmsg("constraint \"%s\" of domain \"%s\" does not exist, skipping",
@@ -2958,7 +2958,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
ccsrc, /* Source form of check constraint */
true, /* is local */
0, /* inhcount */
- false); /* is only */
+ false); /* is only */
/*
* Return the compiled constraint expression so the calling routine can
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 2edbabe754..a22092c202 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -936,7 +936,8 @@ DropRole(DropRoleStmt *stmt)
/* DROP hook for the role being removed */
if (object_access_hook)
{
- ObjectAccessDrop drop_arg;
+ ObjectAccessDrop drop_arg;
+
memset(&drop_arg, 0, sizeof(ObjectAccessDrop));
InvokeObjectAccessHook(OAT_DROP,
AuthIdRelationId, roleid, 0, &drop_arg);
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index c43cd8e017..710c2afc9f 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -322,13 +322,13 @@ get_rel_oids(Oid relid, const RangeVar *vacrel)
Oid relid;
/*
- * Since we don't take a lock here, the relation might be gone,
- * or the RangeVar might no longer refer to the OID we look up
- * here. In the former case, VACUUM will do nothing; in the
- * latter case, it will process the OID we looked up here, rather
- * than the new one. Neither is ideal, but there's little practical
- * alternative, since we're going to commit this transaction and
- * begin a new one between now and then.
+ * Since we don't take a lock here, the relation might be gone, or the
+ * RangeVar might no longer refer to the OID we look up here. In the
+ * former case, VACUUM will do nothing; in the latter case, it will
+ * process the OID we looked up here, rather than the new one.
+ * Neither is ideal, but there's little practical alternative, since
+ * we're going to commit this transaction and begin a new one between
+ * now and then.
*/
relid = RangeVarGetRelid(vacrel, NoLock, false);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 3ff56a7366..5e90221164 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -155,9 +155,9 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
BlockNumber possibly_freeable;
PGRUsage ru0;
TimestampTz starttime = 0;
- long secs;
- int usecs;
- double read_rate,
+ long secs;
+ int usecs;
+ double read_rate,
write_rate;
bool scan_all;
TransactionId freezeTableLimit;
@@ -222,17 +222,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
*
* A corner case here is that if we scanned no pages at all because every
* page is all-visible, we should not update relpages/reltuples, because
- * we have no new information to contribute. In particular this keeps
- * us from replacing relpages=reltuples=0 (which means "unknown tuple
+ * we have no new information to contribute. In particular this keeps us
+ * from replacing relpages=reltuples=0 (which means "unknown tuple
* density") with nonzero relpages and reltuples=0 (which means "zero
* tuple density") unless there's some actual evidence for the latter.
*
- * We do update relallvisible even in the corner case, since if the
- * table is all-visible we'd definitely like to know that. But clamp
- * the value to be not more than what we're setting relpages to.
+ * We do update relallvisible even in the corner case, since if the table
+ * is all-visible we'd definitely like to know that. But clamp the value
+ * to be not more than what we're setting relpages to.
*
- * Also, don't change relfrozenxid if we skipped any pages, since then
- * we don't know for certain that all tuples have a newer xmin.
+ * Also, don't change relfrozenxid if we skipped any pages, since then we
+ * don't know for certain that all tuples have a newer xmin.
*/
new_rel_pages = vacrelstats->rel_pages;
new_rel_tuples = vacrelstats->new_rel_tuples;
@@ -265,7 +265,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
/* and log the action if appropriate */
if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
{
- TimestampTz endtime = GetCurrentTimestamp();
+ TimestampTz endtime = GetCurrentTimestamp();
if (Log_autovacuum_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
@@ -277,17 +277,17 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
write_rate = 0;
if ((secs > 0) || (usecs > 0))
{
- read_rate = (double) BLCKSZ * VacuumPageMiss / (1024 * 1024) /
- (secs + usecs / 1000000.0);
- write_rate = (double) BLCKSZ * VacuumPageDirty / (1024 * 1024) /
- (secs + usecs / 1000000.0);
+ read_rate = (double) BLCKSZ *VacuumPageMiss / (1024 * 1024) /
+ (secs + usecs / 1000000.0);
+ write_rate = (double) BLCKSZ *VacuumPageDirty / (1024 * 1024) /
+ (secs + usecs / 1000000.0);
}
ereport(LOG,
(errmsg("automatic vacuum of table \"%s.%s.%s\": index scans: %d\n"
"pages: %d removed, %d remain\n"
"tuples: %.0f removed, %.0f remain\n"
"buffer usage: %d hits, %d misses, %d dirtied\n"
- "avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n"
+ "avg read rate: %.3f MiB/s, avg write rate: %.3f MiB/s\n"
"system usage: %s",
get_database_name(MyDatabaseId),
get_namespace_name(RelationGetNamespace(onerel)),
@@ -300,7 +300,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
VacuumPageHit,
VacuumPageMiss,
VacuumPageDirty,
- read_rate,write_rate,
+ read_rate, write_rate,
pg_rusage_show(&ru0))));
}
}
@@ -501,10 +501,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
vacrelstats->num_dead_tuples > 0)
{
/*
- * Before beginning index vacuuming, we release any pin we may hold
- * on the visibility map page. This isn't necessary for correctness,
- * but we do it anyway to avoid holding the pin across a lengthy,
- * unrelated operation.
+ * Before beginning index vacuuming, we release any pin we may
+ * hold on the visibility map page. This isn't necessary for
+ * correctness, but we do it anyway to avoid holding the pin
+ * across a lengthy, unrelated operation.
*/
if (BufferIsValid(vmbuffer))
{
@@ -535,10 +535,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/*
* Pin the visibility map page in case we need to mark the page
* all-visible. In most cases this will be very cheap, because we'll
- * already have the correct page pinned anyway. However, it's possible
- * that (a) next_not_all_visible_block is covered by a different VM page
- * than the current block or (b) we released our pin and did a cycle of
- * index vacuuming.
+ * already have the correct page pinned anyway. However, it's
+ * possible that (a) next_not_all_visible_block is covered by a
+ * different VM page than the current block or (b) we released our pin
+ * and did a cycle of index vacuuming.
*/
visibilitymap_pin(onerel, blkno, &vmbuffer);
@@ -873,10 +873,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
else if (!all_visible_according_to_vm)
{
/*
- * It should never be the case that the visibility map page
- * is set while the page-level bit is clear, but the reverse
- * is allowed. Set the visibility map bit as well so that
- * we get back in sync.
+ * It should never be the case that the visibility map page is
+ * set while the page-level bit is clear, but the reverse is
+ * allowed. Set the visibility map bit as well so that we get
+ * back in sync.
*/
visibilitymap_set(onerel, blkno, InvalidXLogRecPtr, vmbuffer,
visibility_cutoff_xid);
@@ -1152,7 +1152,7 @@ lazy_check_needs_freeze(Buffer buf)
if (heap_tuple_needs_freeze(tupleheader, FreezeLimit, buf))
return true;
- } /* scan along page */
+ } /* scan along page */
return false;
}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index c887961bc9..3e7e39d8ec 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -204,8 +204,8 @@ DefineVirtualRelation(RangeVar *relation, List *tlist, bool replace,
checkViewTupleDesc(descriptor, rel->rd_att);
/*
- * The new options list replaces the existing options list, even
- * if it's empty.
+ * The new options list replaces the existing options list, even if
+ * it's empty.
*/
atcmd = makeNode(AlterTableCmd);
atcmd->subtype = AT_ReplaceRelOptions;
@@ -504,7 +504,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
* long as the CREATE command is consistent with that --- no explicit
* schema name.
*/
- view = copyObject(stmt->view); /* don't corrupt original command */
+ view = copyObject(stmt->view); /* don't corrupt original command */
if (view->relpersistence == RELPERSISTENCE_PERMANENT
&& isViewOnTempTable(viewParse))
{
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index 03790bbe06..2c8929b588 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -151,7 +151,7 @@ execCurrentOf(CurrentOfExpr *cexpr,
{
ScanState *scanstate;
bool lisnull;
- Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY;
+ Oid tuple_tableoid PG_USED_FOR_ASSERTS_ONLY;
ItemPointer tuple_tid;
/*
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index fbb36fa6dc..440438b180 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -80,7 +80,7 @@ static void ExecutePlan(EState *estate, PlanState *planstate,
static bool ExecCheckRTEPerms(RangeTblEntry *rte);
static void ExecCheckXactReadOnly(PlannedStmt *plannedstmt);
static char *ExecBuildSlotValueDescription(TupleTableSlot *slot,
- int maxfieldlen);
+ int maxfieldlen);
static void EvalPlanQualStart(EPQState *epqstate, EState *parentestate,
Plan *planTree);
@@ -1520,7 +1520,7 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value in column \"%s\" violates not-null constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname)),
errdetail("Failing row contains %s.",
ExecBuildSlotValueDescription(slot, 64))));
}
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index a1193a8dc3..0ea21ca5f9 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -578,15 +578,15 @@ ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
/* Get the input slot and attribute number we want */
switch (variable->varno)
{
- case INNER_VAR: /* get the tuple from the inner node */
+ case INNER_VAR: /* get the tuple from the inner node */
slot = econtext->ecxt_innertuple;
break;
- case OUTER_VAR: /* get the tuple from the outer node */
+ case OUTER_VAR: /* get the tuple from the outer node */
slot = econtext->ecxt_outertuple;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default: /* get the tuple from the relation being
* scanned */
@@ -763,15 +763,15 @@ ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext,
/* Get the input slot and attribute number we want */
switch (variable->varno)
{
- case INNER_VAR: /* get the tuple from the inner node */
+ case INNER_VAR: /* get the tuple from the inner node */
slot = econtext->ecxt_innertuple;
break;
- case OUTER_VAR: /* get the tuple from the outer node */
+ case OUTER_VAR: /* get the tuple from the outer node */
slot = econtext->ecxt_outertuple;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default: /* get the tuple from the relation being
* scanned */
@@ -808,15 +808,15 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
/* Get the input slot we want */
switch (variable->varno)
{
- case INNER_VAR: /* get the tuple from the inner node */
+ case INNER_VAR: /* get the tuple from the inner node */
slot = econtext->ecxt_innertuple;
break;
- case OUTER_VAR: /* get the tuple from the outer node */
+ case OUTER_VAR: /* get the tuple from the outer node */
slot = econtext->ecxt_outertuple;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default: /* get the tuple from the relation being
* scanned */
@@ -879,15 +879,15 @@ ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
/* Get the input slot we want */
switch (variable->varno)
{
- case INNER_VAR: /* get the tuple from the inner node */
+ case INNER_VAR: /* get the tuple from the inner node */
slot = econtext->ecxt_innertuple;
break;
- case OUTER_VAR: /* get the tuple from the outer node */
+ case OUTER_VAR: /* get the tuple from the outer node */
slot = econtext->ecxt_outertuple;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default: /* get the tuple from the relation being
* scanned */
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 40cd5ce5d1..2bd8b42835 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -578,7 +578,7 @@ ExecBuildProjectionInfo(List *targetList,
projInfo->pi_lastOuterVar = attnum;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default:
varSlotOffsets[numSimpleVars] = offsetof(ExprContext,
@@ -638,7 +638,7 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo)
projInfo->pi_lastOuterVar = attnum;
break;
- /* INDEX_VAR is handled by default case */
+ /* INDEX_VAR is handled by default case */
default:
if (projInfo->pi_lastScanVar < attnum)
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index ae8d374db2..bf2f5c6882 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -128,11 +128,11 @@ typedef struct SQLFunctionParseInfo
/* non-export function prototypes */
static Node *sql_fn_param_ref(ParseState *pstate, ParamRef *pref);
static Node *sql_fn_post_column_ref(ParseState *pstate,
- ColumnRef *cref, Node *var);
+ ColumnRef *cref, Node *var);
static Node *sql_fn_make_param(SQLFunctionParseInfoPtr pinfo,
- int paramno, int location);
+ int paramno, int location);
static Node *sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
- const char *paramname, int location);
+ const char *paramname, int location);
static List *init_execution_state(List *queryTree_list,
SQLFunctionCachePtr fcache,
bool lazyEvalOK);
@@ -227,13 +227,13 @@ prepare_sql_fn_parse_info(HeapTuple procedureTuple,
Anum_pg_proc_proargnames,
&isNull);
if (isNull)
- proargnames = PointerGetDatum(NULL); /* just to be sure */
+ proargnames = PointerGetDatum(NULL); /* just to be sure */
proargmodes = SysCacheGetAttr(PROCNAMEARGSNSP, procedureTuple,
Anum_pg_proc_proargmodes,
&isNull);
if (isNull)
- proargmodes = PointerGetDatum(NULL); /* just to be sure */
+ proargmodes = PointerGetDatum(NULL); /* just to be sure */
n_arg_names = get_func_input_arg_names(proargnames, proargmodes,
&pinfo->argnames);
@@ -422,7 +422,7 @@ static Node *
sql_fn_resolve_param_name(SQLFunctionParseInfoPtr pinfo,
const char *paramname, int location)
{
- int i;
+ int i;
if (pinfo->argnames == NULL)
return NULL;
diff --git a/src/backend/executor/nodeBitmapHeapscan.c b/src/backend/executor/nodeBitmapHeapscan.c
index 849665d4e2..702e704098 100644
--- a/src/backend/executor/nodeBitmapHeapscan.c
+++ b/src/backend/executor/nodeBitmapHeapscan.c
@@ -66,6 +66,7 @@ BitmapHeapNext(BitmapHeapScanState *node)
TIDBitmap *tbm;
TBMIterator *tbmiterator;
TBMIterateResult *tbmres;
+
#ifdef USE_PREFETCH
TBMIterator *prefetch_iterator;
#endif
@@ -355,7 +356,7 @@ bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
{
OffsetNumber offnum = tbmres->offsets[curslot];
ItemPointerData tid;
- HeapTupleData heapTuple;
+ HeapTupleData heapTuple;
ItemPointerSet(&tid, page, offnum);
if (heap_hot_search_buffer(&tid, scan->rs_rd, buffer, snapshot,
diff --git a/src/backend/executor/nodeIndexonlyscan.c b/src/backend/executor/nodeIndexonlyscan.c
index af31671b3e..38078763f5 100644
--- a/src/backend/executor/nodeIndexonlyscan.c
+++ b/src/backend/executor/nodeIndexonlyscan.c
@@ -86,7 +86,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
* Note on Memory Ordering Effects: visibilitymap_test does not lock
* the visibility map buffer, and therefore the result we read here
* could be slightly stale. However, it can't be stale enough to
- * matter. It suffices to show that (1) there is a read barrier
+ * matter. It suffices to show that (1) there is a read barrier
* between the time we read the index TID and the time we test the
* visibility map; and (2) there is a write barrier between the time
* some other concurrent process clears the visibility map bit and the
@@ -106,12 +106,12 @@ IndexOnlyNext(IndexOnlyScanState *node)
node->ioss_HeapFetches++;
tuple = index_fetch_heap(scandesc);
if (tuple == NULL)
- continue; /* no visible tuple, try next index entry */
+ continue; /* no visible tuple, try next index entry */
/*
* Only MVCC snapshots are supported here, so there should be no
* need to keep following the HOT chain once a visible entry has
- * been found. If we did want to allow that, we'd need to keep
+ * been found. If we did want to allow that, we'd need to keep
* more state to remember not to call index_getnext_tid next time.
*/
if (scandesc->xs_continue_hot)
@@ -120,7 +120,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
/*
* Note: at this point we are holding a pin on the heap page, as
* recorded in scandesc->xs_cbuf. We could release that pin now,
- * but it's not clear whether it's a win to do so. The next index
+ * but it's not clear whether it's a win to do so. The next index
* entry might require a visit to the same heap page.
*/
}
@@ -176,8 +176,8 @@ StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc)
* Note: we must use the tupdesc supplied by the AM in index_getattr, not
* the slot's tupdesc, in case the latter has different datatypes (this
* happens for btree name_ops in particular). They'd better have the same
- * number of columns though, as well as being datatype-compatible which
- * is something we can't so easily check.
+ * number of columns though, as well as being datatype-compatible which is
+ * something we can't so easily check.
*/
Assert(slot->tts_tupleDescriptor->natts == nindexatts);
@@ -494,10 +494,10 @@ ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags)
* Initialize scan descriptor.
*/
indexstate->ioss_ScanDesc = index_beginscan(currentRelation,
- indexstate->ioss_RelationDesc,
- estate->es_snapshot,
- indexstate->ioss_NumScanKeys,
- indexstate->ioss_NumOrderByKeys);
+ indexstate->ioss_RelationDesc,
+ estate->es_snapshot,
+ indexstate->ioss_NumScanKeys,
+ indexstate->ioss_NumOrderByKeys);
/* Set it up for index-only scan */
indexstate->ioss_ScanDesc->xs_want_itup = true;
diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c
index 06137c6ba8..3a6bfec0db 100644
--- a/src/backend/executor/nodeMaterial.c
+++ b/src/backend/executor/nodeMaterial.c
@@ -66,7 +66,7 @@ ExecMaterial(MaterialState *node)
* Allocate a second read pointer to serve as the mark. We know it
* must have index 1, so needn't store that.
*/
- int ptrno PG_USED_FOR_ASSERTS_ONLY;
+ int ptrno PG_USED_FOR_ASSERTS_ONLY;
ptrno = tuplestore_alloc_read_pointer(tuplestorestate,
node->eflags);
diff --git a/src/backend/executor/nodeMergeAppend.c b/src/backend/executor/nodeMergeAppend.c
index d755109a33..d5141ba54e 100644
--- a/src/backend/executor/nodeMergeAppend.c
+++ b/src/backend/executor/nodeMergeAppend.c
@@ -130,7 +130,7 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
for (i = 0; i < node->numCols; i++)
{
- SortSupport sortKey = mergestate->ms_sortkeys + i;
+ SortSupport sortKey = mergestate->ms_sortkeys + i;
sortKey->ssup_cxt = CurrentMemoryContext;
sortKey->ssup_collation = node->collations[i];
@@ -276,7 +276,7 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2)
for (nkey = 0; nkey < node->ms_nkeys; nkey++)
{
- SortSupport sortKey = node->ms_sortkeys + nkey;
+ SortSupport sortKey = node->ms_sortkeys + nkey;
AttrNumber attno = sortKey->ssup_attno;
Datum datum1,
datum2;
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index a1e55646c6..bc0b20bf82 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -247,7 +247,7 @@ MJExamineQuals(List *mergeclauses,
op_lefttype,
op_righttype,
BTORDER_PROC);
- if (!OidIsValid(sortfunc)) /* should not happen */
+ if (!OidIsValid(sortfunc)) /* should not happen */
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
BTORDER_PROC, op_lefttype, op_righttype, opfamily);
/* We'll use a shim to call the old-style btree comparator */
@@ -405,7 +405,7 @@ MJCompare(MergeJoinState *mergestate)
*/
if (clause->lisnull && clause->risnull)
{
- nulleqnull = true; /* NULL "=" NULL */
+ nulleqnull = true; /* NULL "=" NULL */
continue;
}
@@ -419,8 +419,8 @@ MJCompare(MergeJoinState *mergestate)
/*
* If we had any NULL-vs-NULL inputs, we do not want to report that the
- * tuples are equal. Instead, if result is still 0, change it to +1.
- * This will result in advancing the inner side of the join.
+ * tuples are equal. Instead, if result is still 0, change it to +1. This
+ * will result in advancing the inner side of the join.
*
* Likewise, if there was a constant-false joinqual, do not report
* equality. We have to check this as part of the mergequals, else the
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index dfdcb20b1d..a7bce75f0c 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -950,8 +950,8 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* If there are indices on the result relation, open them and save
* descriptors in the result relation info, so that we can add new
* index entries for the tuples we add/update. We need not do this
- * for a DELETE, however, since deletion doesn't affect indexes.
- * Also, inside an EvalPlanQual operation, the indexes might be open
+ * for a DELETE, however, since deletion doesn't affect indexes. Also,
+ * inside an EvalPlanQual operation, the indexes might be open
* already, since we share the resultrel state with the original
* query.
*/
diff --git a/src/backend/executor/nodeSetOp.c b/src/backend/executor/nodeSetOp.c
index 85590445cc..362f4466e4 100644
--- a/src/backend/executor/nodeSetOp.c
+++ b/src/backend/executor/nodeSetOp.c
@@ -344,7 +344,7 @@ setop_fill_hash_table(SetOpState *setopstate)
SetOp *node = (SetOp *) setopstate->ps.plan;
PlanState *outerPlan;
int firstFlag;
- bool in_first_rel PG_USED_FOR_ASSERTS_ONLY;
+ bool in_first_rel PG_USED_FOR_ASSERTS_ONLY;
/*
* get state info from node
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 5e4ae426b1..e222365d11 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1674,8 +1674,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
raw_parsetree_list = pg_parse_query(src);
/*
- * Do parse analysis and rule rewrite for each raw parsetree, storing
- * the results into unsaved plancache entries.
+ * Do parse analysis and rule rewrite for each raw parsetree, storing the
+ * results into unsaved plancache entries.
*/
plancache_list = NIL;
@@ -1686,8 +1686,8 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
CachedPlanSource *plansource;
/*
- * Create the CachedPlanSource before we do parse analysis, since
- * it needs to see the unmodified raw parse tree.
+ * Create the CachedPlanSource before we do parse analysis, since it
+ * needs to see the unmodified raw parse tree.
*/
plansource = CreateCachedPlan(parsetree,
src,
@@ -1722,7 +1722,7 @@ _SPI_prepare_plan(const char *src, SPIPlanPtr plan, ParamListInfo boundParams)
plan->parserSetup,
plan->parserSetupArg,
cursor_options,
- false); /* not fixed result */
+ false); /* not fixed result */
plancache_list = lappend(plancache_list, plansource);
}
@@ -1907,7 +1907,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
}
else
{
- char completionTag[COMPLETION_TAG_BUFSIZE];
+ char completionTag[COMPLETION_TAG_BUFSIZE];
ProcessUtility(stmt,
plansource->query_string,
@@ -2335,9 +2335,9 @@ _SPI_make_plan_non_temp(SPIPlanPtr plan)
/*
* Reparent all the CachedPlanSources into the procedure context. In
- * theory this could fail partway through due to the pallocs, but we
- * don't care too much since both the procedure context and the executor
- * context would go away on error.
+ * theory this could fail partway through due to the pallocs, but we don't
+ * care too much since both the procedure context and the executor context
+ * would go away on error.
*/
foreach(lc, plan->plancache_list)
{
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 5853b068da..9cdee2bb3e 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -316,8 +316,8 @@ ClientAuthentication(Port *port)
/*
* Get the authentication method to use for this frontend/database
* combination. Note: we do not parse the file at this point; this has
- * already been done elsewhere. hba.c dropped an error message
- * into the server logfile if parsing the hba config file failed.
+ * already been done elsewhere. hba.c dropped an error message into the
+ * server logfile if parsing the hba config file failed.
*/
hba_getauthmethod(port);
@@ -1365,10 +1365,10 @@ pg_SSPI_recvauth(Port *port)
}
/*
- * Overwrite the current context with the one we just received.
- * If sspictx is NULL it was the first loop and we need to allocate
- * a buffer for it. On subsequent runs, we can just overwrite the
- * buffer contents since the size does not change.
+ * Overwrite the current context with the one we just received. If
+ * sspictx is NULL it was the first loop and we need to allocate a
+ * buffer for it. On subsequent runs, we can just overwrite the buffer
+ * contents since the size does not change.
*/
if (sspictx == NULL)
{
@@ -1437,8 +1437,8 @@ pg_SSPI_recvauth(Port *port)
if (!GetTokenInformation(token, TokenUser, NULL, 0, &retlen) && GetLastError() != 122)
ereport(ERROR,
- (errmsg_internal("could not get token user size: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not get token user size: error code %lu",
+ GetLastError())));
tokenuser = malloc(retlen);
if (tokenuser == NULL)
@@ -1453,8 +1453,8 @@ pg_SSPI_recvauth(Port *port)
if (!LookupAccountSid(NULL, tokenuser->User.Sid, accountname, &accountnamesize,
domainname, &domainnamesize, &accountnameuse))
ereport(ERROR,
- (errmsg_internal("could not look up account SID: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not look up account SID: error code %lu",
+ GetLastError())));
free(tokenuser);
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index dce0eaa20e..e0ab5997fb 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -89,10 +89,10 @@ static void close_SSL(Port *);
static const char *SSLerrmessage(void);
#endif
-char *ssl_cert_file;
-char *ssl_key_file;
-char *ssl_ca_file;
-char *ssl_crl_file;
+char *ssl_cert_file;
+char *ssl_key_file;
+char *ssl_ca_file;
+char *ssl_crl_file;
/*
* How much data can be sent across a secure connection
@@ -845,8 +845,8 @@ initialize_SSL(void)
{
/*
* Always ask for SSL client cert, but don't fail if it's not
- * presented. We might fail such connections later, depending on
- * what we find in pg_hba.conf.
+ * presented. We might fail such connections later, depending on what
+ * we find in pg_hba.conf.
*/
SSL_CTX_set_verify(SSL_context,
(SSL_VERIFY_PEER |
@@ -953,7 +953,7 @@ aloop:
port->peer_cn = NULL;
if (port->peer != NULL)
{
- int len;
+ int len;
len = X509_NAME_get_text_by_NID(X509_get_subject_name(port->peer),
NID_commonName, NULL, 0);
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 56229cb4df..828f6dcc8e 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -61,8 +61,8 @@ typedef struct check_network_data
*/
typedef struct HbaToken
{
- char *string;
- bool quoted;
+ char *string;
+ bool quoted;
} HbaToken;
/*
@@ -76,9 +76,9 @@ static MemoryContext parsed_hba_context = NULL;
* These variables hold the pre-parsed contents of the ident usermap
* configuration file. ident_lines is a triple-nested list of lines, fields
* and tokens, as returned by tokenize_file. There will be one line in
- * ident_lines for each (non-empty, non-comment) line of the file. Note there
+ * ident_lines for each (non-empty, non-comment) line of the file. Note there
* will always be at least one field, since blank lines are not entered in the
- * data structure. ident_line_nums is an integer list containing the actual
+ * data structure. ident_line_nums is an integer list containing the actual
* line number for each line represented in ident_lines. ident_context is
* the memory context holding all this.
*/
@@ -246,7 +246,7 @@ make_hba_token(char *token, bool quoted)
static HbaToken *
copy_hba_token(HbaToken *in)
{
- HbaToken *out = make_hba_token(in->string, in->quoted);
+ HbaToken *out = make_hba_token(in->string, in->quoted);
return out;
}
@@ -283,12 +283,12 @@ next_field_expand(const char *filename, FILE *file)
/*
* tokenize_inc_file
- * Expand a file included from another file into an hba "field"
+ * Expand a file included from another file into an hba "field"
*
* Opens and tokenises a file included from another HBA config file with @,
* and returns all values found therein as a flat list of HbaTokens. If a
* @-token is found, recursively expand it. The given token list is used as
- * initial contents of list (so foo,bar,@baz does what you expect).
+ * initial contents of list (so foo,bar,@baz does what you expect).
*/
static List *
tokenize_inc_file(List *tokens,
@@ -377,8 +377,8 @@ tokenize_file(const char *filename, FILE *file,
List *current_line = NIL;
List *current_field = NIL;
int line_number = 1;
- MemoryContext linecxt;
- MemoryContext oldcxt;
+ MemoryContext linecxt;
+ MemoryContext oldcxt;
linecxt = AllocSetContextCreate(TopMemoryContext,
"tokenize file cxt",
@@ -442,11 +442,10 @@ is_member(Oid userid, const char *role)
if (!OidIsValid(roleid))
return false; /* if target role not exist, say "no" */
- /*
- * See if user is directly or indirectly a member of role.
- * For this purpose, a superuser is not considered to be automatically
- * a member of the role, so group auth only applies to explicit
- * membership.
+ /*
+ * See if user is directly or indirectly a member of role. For this
+ * purpose, a superuser is not considered to be automatically a member of
+ * the role, so group auth only applies to explicit membership.
*/
return is_member_of_role_nosuper(userid, roleid);
}
@@ -457,8 +456,8 @@ is_member(Oid userid, const char *role)
static bool
check_role(const char *role, Oid roleid, List *tokens)
{
- ListCell *cell;
- HbaToken *tok;
+ ListCell *cell;
+ HbaToken *tok;
foreach(cell, tokens)
{
@@ -481,8 +480,8 @@ check_role(const char *role, Oid roleid, List *tokens)
static bool
check_db(const char *dbname, const char *role, Oid roleid, List *tokens)
{
- ListCell *cell;
- HbaToken *tok;
+ ListCell *cell;
+ HbaToken *tok;
foreach(cell, tokens)
{
@@ -825,7 +824,7 @@ parse_hba_line(List *line, int line_num)
List *tokens;
ListCell *tokencell;
HbaToken *token;
- HbaLine *parsedline;
+ HbaLine *parsedline;
parsedline = palloc0(sizeof(HbaLine));
parsedline->linenumber = line_num;
@@ -1042,8 +1041,8 @@ parse_hba_line(List *line, int line_num)
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("specifying both host name and CIDR mask is invalid: \"%s\"",
token->string),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return NULL;
}
@@ -1080,9 +1079,9 @@ parse_hba_line(List *line, int line_num)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("multiple values specified for netmask"),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errmsg("multiple values specified for netmask"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return NULL;
}
token = linitial(tokens);
@@ -1293,6 +1292,7 @@ parse_hba_line(List *line, int line_num)
foreach(tokencell, tokens)
{
char *val;
+
token = lfirst(tokencell);
str = pstrdup(token->string);
@@ -1310,7 +1310,7 @@ parse_hba_line(List *line, int line_num)
return NULL;
}
- *val++ = '\0'; /* str now holds "name", val holds "value" */
+ *val++ = '\0'; /* str now holds "name", val holds "value" */
if (!parse_hba_auth_opt(str, val, parsedline, line_num))
/* parse_hba_auth_opt already logged the error message */
return NULL;
@@ -1397,17 +1397,16 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
else if (strcmp(name, "clientcert") == 0)
{
/*
- * Since we require ctHostSSL, this really can never happen
- * on non-SSL-enabled builds, so don't bother checking for
- * USE_SSL.
+ * Since we require ctHostSSL, this really can never happen on
+ * non-SSL-enabled builds, so don't bother checking for USE_SSL.
*/
if (hbaline->conntype != ctHostSSL)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("clientcert can only be configured for \"hostssl\" rows"),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errmsg("clientcert can only be configured for \"hostssl\" rows"),
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
if (strcmp(val, "1") == 0)
@@ -1418,8 +1417,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("client certificates can only be checked if a root certificate store is available"),
errhint("Make sure the configuration parameter \"ssl_ca_file\" is set."),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
hbaline->clientcert = true;
@@ -1431,8 +1430,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("clientcert can not be set to 0 when using \"cert\" authentication"),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
hbaline->clientcert = false;
@@ -1465,8 +1464,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("invalid LDAP port number: \"%s\"", val),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
}
@@ -1528,7 +1527,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
{
struct addrinfo *gai_result;
struct addrinfo hints;
- int ret;
+ int ret;
REQUIRE_AUTH_OPTION(uaRADIUS, "radiusserver", "radius");
@@ -1543,8 +1542,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not translate RADIUS server name \"%s\" to address: %s",
val, gai_strerror(ret)),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
if (gai_result)
pg_freeaddrinfo_all(hints.ai_family, gai_result);
return false;
@@ -1561,8 +1560,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("invalid RADIUS port number: \"%s\"", val),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
}
@@ -1580,8 +1579,8 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline, int line_num)
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unrecognized authentication option name: \"%s\"",
- name),
+ errmsg("unrecognized authentication option name: \"%s\"",
+ name),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
@@ -1693,7 +1692,7 @@ check_hba(hbaPort *port)
* Read the config file and create a List of HbaLine records for the contents.
*
* The configuration is read into a temporary list, and if any parse error
- * occurs the old list is kept in place and false is returned. Only if the
+ * occurs the old list is kept in place and false is returned. Only if the
* whole file parses OK is the list replaced, and the function returns true.
*
* On a false result, caller will take care of reporting a FATAL error in case
@@ -1710,9 +1709,9 @@ load_hba(void)
*line_num;
List *new_parsed_lines = NIL;
bool ok = true;
- MemoryContext linecxt;
- MemoryContext oldcxt;
- MemoryContext hbacxt;
+ MemoryContext linecxt;
+ MemoryContext oldcxt;
+ MemoryContext hbacxt;
file = AllocateFile(HbaFileName, "r");
if (file == NULL)
@@ -1742,8 +1741,8 @@ load_hba(void)
{
/*
* Parse error in the file, so indicate there's a problem. NB: a
- * problem in a line will free the memory for all previous lines as
- * well!
+ * problem in a line will free the memory for all previous lines
+ * as well!
*/
MemoryContextReset(hbacxt);
new_parsed_lines = NIL;
@@ -1761,9 +1760,9 @@ load_hba(void)
}
/*
- * A valid HBA file must have at least one entry; else there's no way
- * to connect to the postmaster. But only complain about this if we
- * didn't already have parsing errors.
+ * A valid HBA file must have at least one entry; else there's no way to
+ * connect to the postmaster. But only complain about this if we didn't
+ * already have parsing errors.
*/
if (ok && new_parsed_lines == NIL)
{
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 2082e3d4f6..5272811cc0 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -1247,9 +1247,9 @@ internal_flush(void)
/*
* We drop the buffered data anyway so that processing can
- * continue, even though we'll probably quit soon. We also
- * set a flag that'll cause the next CHECK_FOR_INTERRUPTS
- * to terminate the connection.
+ * continue, even though we'll probably quit soon. We also set a
+ * flag that'll cause the next CHECK_FOR_INTERRUPTS to terminate
+ * the connection.
*/
PqSendStart = PqSendPointer = 0;
ClientConnectionLost = 1;
@@ -1373,7 +1373,7 @@ fail:
void
pq_putmessage_noblock(char msgtype, const char *s, size_t len)
{
- int res PG_USED_FOR_ASSERTS_ONLY;
+ int res PG_USED_FOR_ASSERTS_ONLY;
int required;
/*
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index 4c904e0329..ba10840166 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -362,8 +362,8 @@ bms_subset_compare(const Bitmapset *a, const Bitmapset *b)
shortlen = Min(a->nwords, b->nwords);
for (i = 0; i < shortlen; i++)
{
- bitmapword aword = a->words[i];
- bitmapword bword = b->words[i];
+ bitmapword aword = a->words[i];
+ bitmapword bword = b->words[i];
if ((aword & ~bword) != 0)
{
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 0db60b161b..1743b8fdc8 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -381,7 +381,7 @@ _copyIndexScan(const IndexScan *from)
static IndexOnlyScan *
_copyIndexOnlyScan(const IndexOnlyScan *from)
{
- IndexOnlyScan *newnode = makeNode(IndexOnlyScan);
+ IndexOnlyScan *newnode = makeNode(IndexOnlyScan);
/*
* copy node superclass fields
@@ -4473,7 +4473,7 @@ copyObject(const void *from)
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(from));
- retval = 0; /* keep compiler quiet */
+ retval = 0; /* keep compiler quiet */
break;
}
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 9d588feac2..f19ad77026 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -2360,8 +2360,8 @@ _equalXmlSerialize(const XmlSerialize *a, const XmlSerialize *b)
static bool
_equalList(const List *a, const List *b)
{
- const ListCell *item_a;
- const ListCell *item_b;
+ const ListCell *item_a;
+ const ListCell *item_b;
/*
* Try to reject by simple scalar checks before grovelling through all the
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index 209b72222e..4d19aed8f4 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -443,7 +443,7 @@ list_nth_oid(const List *list, int n)
bool
list_member(const List *list, const void *datum)
{
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsPointerList(list));
check_list_invariants(list);
@@ -464,7 +464,7 @@ list_member(const List *list, const void *datum)
bool
list_member_ptr(const List *list, const void *datum)
{
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsPointerList(list));
check_list_invariants(list);
@@ -484,7 +484,7 @@ list_member_ptr(const List *list, const void *datum)
bool
list_member_int(const List *list, int datum)
{
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsIntegerList(list));
check_list_invariants(list);
@@ -504,7 +504,7 @@ list_member_int(const List *list, int datum)
bool
list_member_oid(const List *list, Oid datum)
{
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsOidList(list));
check_list_invariants(list);
@@ -697,7 +697,7 @@ List *
list_union(const List *list1, const List *list2)
{
List *result;
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
@@ -721,7 +721,7 @@ List *
list_union_ptr(const List *list1, const List *list2)
{
List *result;
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
@@ -744,7 +744,7 @@ List *
list_union_int(const List *list1, const List *list2)
{
List *result;
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsIntegerList(list1));
Assert(IsIntegerList(list2));
@@ -767,7 +767,7 @@ List *
list_union_oid(const List *list1, const List *list2)
{
List *result;
- const ListCell *cell;
+ const ListCell *cell;
Assert(IsOidList(list1));
Assert(IsOidList(list2));
@@ -800,7 +800,7 @@ List *
list_intersection(const List *list1, const List *list2)
{
List *result;
- const ListCell *cell;
+ const ListCell *cell;
if (list1 == NIL || list2 == NIL)
return NIL;
@@ -831,7 +831,7 @@ list_intersection(const List *list1, const List *list2)
List *
list_difference(const List *list1, const List *list2)
{
- const ListCell *cell;
+ const ListCell *cell;
List *result = NIL;
Assert(IsPointerList(list1));
@@ -857,7 +857,7 @@ list_difference(const List *list1, const List *list2)
List *
list_difference_ptr(const List *list1, const List *list2)
{
- const ListCell *cell;
+ const ListCell *cell;
List *result = NIL;
Assert(IsPointerList(list1));
@@ -882,7 +882,7 @@ list_difference_ptr(const List *list1, const List *list2)
List *
list_difference_int(const List *list1, const List *list2)
{
- const ListCell *cell;
+ const ListCell *cell;
List *result = NIL;
Assert(IsIntegerList(list1));
@@ -907,7 +907,7 @@ list_difference_int(const List *list1, const List *list2)
List *
list_difference_oid(const List *list1, const List *list2)
{
- const ListCell *cell;
+ const ListCell *cell;
List *result = NIL;
Assert(IsOidList(list1));
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 6f9e053669..813d1da1a2 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -59,7 +59,7 @@ exprType(const Node *expr)
break;
case T_ArrayRef:
{
- const ArrayRef *arrayref = (const ArrayRef *) expr;
+ const ArrayRef *arrayref = (const ArrayRef *) expr;
/* slice and/or store operations yield the array type */
if (arrayref->reflowerindexpr || arrayref->refassgnexpr)
@@ -91,7 +91,7 @@ exprType(const Node *expr)
break;
case T_SubLink:
{
- const SubLink *sublink = (const SubLink *) expr;
+ const SubLink *sublink = (const SubLink *) expr;
if (sublink->subLinkType == EXPR_SUBLINK ||
sublink->subLinkType == ARRAY_SUBLINK)
@@ -125,7 +125,7 @@ exprType(const Node *expr)
break;
case T_SubPlan:
{
- const SubPlan *subplan = (const SubPlan *) expr;
+ const SubPlan *subplan = (const SubPlan *) expr;
if (subplan->subLinkType == EXPR_SUBLINK ||
subplan->subLinkType == ARRAY_SUBLINK)
@@ -282,7 +282,7 @@ exprTypmod(const Node *expr)
break;
case T_SubLink:
{
- const SubLink *sublink = (const SubLink *) expr;
+ const SubLink *sublink = (const SubLink *) expr;
if (sublink->subLinkType == EXPR_SUBLINK ||
sublink->subLinkType == ARRAY_SUBLINK)
@@ -303,7 +303,7 @@ exprTypmod(const Node *expr)
break;
case T_SubPlan:
{
- const SubPlan *subplan = (const SubPlan *) expr;
+ const SubPlan *subplan = (const SubPlan *) expr;
if (subplan->subLinkType == EXPR_SUBLINK ||
subplan->subLinkType == ARRAY_SUBLINK)
@@ -341,7 +341,7 @@ exprTypmod(const Node *expr)
* If all the alternatives agree on type/typmod, return that
* typmod, else use -1
*/
- const CaseExpr *cexpr = (const CaseExpr *) expr;
+ const CaseExpr *cexpr = (const CaseExpr *) expr;
Oid casetype = cexpr->casetype;
int32 typmod;
ListCell *arg;
@@ -374,7 +374,7 @@ exprTypmod(const Node *expr)
* If all the elements agree on type/typmod, return that
* typmod, else use -1
*/
- const ArrayExpr *arrayexpr = (const ArrayExpr *) expr;
+ const ArrayExpr *arrayexpr = (const ArrayExpr *) expr;
Oid commontype;
int32 typmod;
ListCell *elem;
@@ -493,7 +493,7 @@ exprIsLengthCoercion(const Node *expr, int32 *coercedTypmod)
*/
if (expr && IsA(expr, FuncExpr))
{
- const FuncExpr *func = (const FuncExpr *) expr;
+ const FuncExpr *func = (const FuncExpr *) expr;
int nargs;
Const *second_arg;
@@ -707,7 +707,7 @@ exprCollation(const Node *expr)
break;
case T_SubLink:
{
- const SubLink *sublink = (const SubLink *) expr;
+ const SubLink *sublink = (const SubLink *) expr;
if (sublink->subLinkType == EXPR_SUBLINK ||
sublink->subLinkType == ARRAY_SUBLINK)
@@ -733,7 +733,7 @@ exprCollation(const Node *expr)
break;
case T_SubPlan:
{
- const SubPlan *subplan = (const SubPlan *) expr;
+ const SubPlan *subplan = (const SubPlan *) expr;
if (subplan->subLinkType == EXPR_SUBLINK ||
subplan->subLinkType == ARRAY_SUBLINK)
@@ -1137,7 +1137,7 @@ exprLocation(const Node *expr)
break;
case T_FuncExpr:
{
- const FuncExpr *fexpr = (const FuncExpr *) expr;
+ const FuncExpr *fexpr = (const FuncExpr *) expr;
/* consider both function name and leftmost arg */
loc = leftmostLoc(fexpr->location,
@@ -1157,7 +1157,7 @@ exprLocation(const Node *expr)
case T_DistinctExpr: /* struct-equivalent to OpExpr */
case T_NullIfExpr: /* struct-equivalent to OpExpr */
{
- const OpExpr *opexpr = (const OpExpr *) expr;
+ const OpExpr *opexpr = (const OpExpr *) expr;
/* consider both operator name and leftmost arg */
loc = leftmostLoc(opexpr->location,
@@ -1175,7 +1175,7 @@ exprLocation(const Node *expr)
break;
case T_BoolExpr:
{
- const BoolExpr *bexpr = (const BoolExpr *) expr;
+ const BoolExpr *bexpr = (const BoolExpr *) expr;
/*
* Same as above, to handle either NOT or AND/OR. We can't
@@ -1188,7 +1188,7 @@ exprLocation(const Node *expr)
break;
case T_SubLink:
{
- const SubLink *sublink = (const SubLink *) expr;
+ const SubLink *sublink = (const SubLink *) expr;
/* check the testexpr, if any, and the operator/keyword */
loc = leftmostLoc(exprLocation(sublink->testexpr),
@@ -1273,7 +1273,7 @@ exprLocation(const Node *expr)
break;
case T_XmlExpr:
{
- const XmlExpr *xexpr = (const XmlExpr *) expr;
+ const XmlExpr *xexpr = (const XmlExpr *) expr;
/* consider both function name and leftmost arg */
loc = leftmostLoc(xexpr->location,
@@ -1327,7 +1327,7 @@ exprLocation(const Node *expr)
break;
case T_A_Expr:
{
- const A_Expr *aexpr = (const A_Expr *) expr;
+ const A_Expr *aexpr = (const A_Expr *) expr;
/* use leftmost of operator or left operand (if any) */
/* we assume right operand can't be to left of operator */
@@ -1346,7 +1346,7 @@ exprLocation(const Node *expr)
break;
case T_FuncCall:
{
- const FuncCall *fc = (const FuncCall *) expr;
+ const FuncCall *fc = (const FuncCall *) expr;
/* consider both function name and leftmost arg */
/* (we assume any ORDER BY nodes must be to right of name) */
@@ -1364,7 +1364,7 @@ exprLocation(const Node *expr)
break;
case T_TypeCast:
{
- const TypeCast *tc = (const TypeCast *) expr;
+ const TypeCast *tc = (const TypeCast *) expr;
/*
* This could represent CAST(), ::, or TypeName 'literal', so
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index e690194b74..d6dff9de47 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -141,7 +141,7 @@ _outToken(StringInfo str, const char *s)
static void
_outList(StringInfo str, const List *node)
{
- const ListCell *lc;
+ const ListCell *lc;
appendStringInfoChar(str, '(');
diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c
index 20aeb085d8..8a5e59526d 100644
--- a/src/backend/nodes/print.c
+++ b/src/backend/nodes/print.c
@@ -251,7 +251,7 @@ pretty_format_node_dump(const char *dump)
void
print_rt(const List *rtable)
{
- const ListCell *l;
+ const ListCell *l;
int i = 1;
printf("resno\trefname \trelid\tinFromCl\n");
@@ -314,7 +314,7 @@ print_expr(const Node *expr, const List *rtable)
if (IsA(expr, Var))
{
- const Var *var = (const Var *) expr;
+ const Var *var = (const Var *) expr;
char *relname,
*attname;
@@ -348,7 +348,7 @@ print_expr(const Node *expr, const List *rtable)
}
else if (IsA(expr, Const))
{
- const Const *c = (const Const *) expr;
+ const Const *c = (const Const *) expr;
Oid typoutput;
bool typIsVarlena;
char *outputstr;
@@ -368,7 +368,7 @@ print_expr(const Node *expr, const List *rtable)
}
else if (IsA(expr, OpExpr))
{
- const OpExpr *e = (const OpExpr *) expr;
+ const OpExpr *e = (const OpExpr *) expr;
char *opname;
opname = get_opname(e->opno);
@@ -387,7 +387,7 @@ print_expr(const Node *expr, const List *rtable)
}
else if (IsA(expr, FuncExpr))
{
- const FuncExpr *e = (const FuncExpr *) expr;
+ const FuncExpr *e = (const FuncExpr *) expr;
char *funcname;
ListCell *l;
@@ -412,7 +412,7 @@ print_expr(const Node *expr, const List *rtable)
void
print_pathkeys(const List *pathkeys, const List *rtable)
{
- const ListCell *i;
+ const ListCell *i;
printf("(");
foreach(i, pathkeys)
@@ -452,7 +452,7 @@ print_pathkeys(const List *pathkeys, const List *rtable)
void
print_tl(const List *tlist, const List *rtable)
{
- const ListCell *tl;
+ const ListCell *tl;
printf("(\n");
foreach(tl, tlist)
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 7960793641..89ddf62d4d 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -49,7 +49,7 @@
#define READ_TEMP_LOCALS() \
char *token; \
int length; \
- (void) token /* possibly unused */
+ (void) token /* possibly unused */
/* ... but most need both */
#define READ_LOCALS(nodeTypeName) \
@@ -195,7 +195,7 @@ _readQuery(void)
READ_ENUM_FIELD(commandType, CmdType);
READ_ENUM_FIELD(querySource, QuerySource);
- local_node->queryId = 0; /* not saved in output format */
+ local_node->queryId = 0; /* not saved in output format */
READ_BOOL_FIELD(canSetTag);
READ_NODE_FIELD(utilityStmt);
READ_INT_FIELD(resultRelation);
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index 17dae0d1b9..728619e75d 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -956,7 +956,7 @@ tbm_lossify(TIDBitmap *tbm)
*
* Since we are called as soon as nentries exceeds maxentries, we should
* push nentries down to significantly less than maxentries, or else we'll
- * just end up doing this again very soon. We shoot for maxentries/2.
+ * just end up doing this again very soon. We shoot for maxentries/2.
*/
Assert(!tbm->iterating);
Assert(tbm->status == TBM_HASH);
@@ -992,14 +992,14 @@ tbm_lossify(TIDBitmap *tbm)
}
/*
- * With a big bitmap and small work_mem, it's possible that we cannot
- * get under maxentries. Again, if that happens, we'd end up uselessly
+ * With a big bitmap and small work_mem, it's possible that we cannot get
+ * under maxentries. Again, if that happens, we'd end up uselessly
* calling tbm_lossify over and over. To prevent this from becoming a
* performance sink, force maxentries up to at least double the current
* number of entries. (In essence, we're admitting inability to fit
- * within work_mem when we do this.) Note that this test will not fire
- * if we broke out of the loop early; and if we didn't, the current
- * number of entries is simply not reducible any further.
+ * within work_mem when we do this.) Note that this test will not fire if
+ * we broke out of the loop early; and if we didn't, the current number of
+ * entries is simply not reducible any further.
*/
if (tbm->nentries > tbm->maxentries / 2)
tbm->maxentries = Min(tbm->nentries, (INT_MAX - 1) / 2) * 2;
@@ -1011,8 +1011,8 @@ tbm_lossify(TIDBitmap *tbm)
static int
tbm_comparator(const void *left, const void *right)
{
- BlockNumber l = (*((PagetableEntry * const *) left))->blockno;
- BlockNumber r = (*((PagetableEntry * const *) right))->blockno;
+ BlockNumber l = (*((PagetableEntry *const *) left))->blockno;
+ BlockNumber r = (*((PagetableEntry *const *) right))->blockno;
if (l < r)
return -1;
diff --git a/src/backend/optimizer/geqo/geqo_selection.c b/src/backend/optimizer/geqo/geqo_selection.c
index be64576c2f..fbdcc5ff0c 100644
--- a/src/backend/optimizer/geqo/geqo_selection.c
+++ b/src/backend/optimizer/geqo/geqo_selection.c
@@ -65,8 +65,8 @@ geqo_selection(PlannerInfo *root, Chromosome *momma, Chromosome *daddy,
* one, when we can't.
*
* This code was observed to hang up in an infinite loop when the
- * platform's implementation of erand48() was broken. We now always
- * use our own version.
+ * platform's implementation of erand48() was broken. We now always use
+ * our own version.
*/
if (pool->size > 1)
{
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 0563cae1d7..f02954982a 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -50,19 +50,19 @@ join_search_hook_type join_search_hook = NULL;
static void set_base_rel_sizes(PlannerInfo *root);
static void set_base_rel_pathlists(PlannerInfo *root);
static void set_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_plain_rel_size(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_foreign_size(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void generate_mergeappend_paths(PlannerInfo *root, RelOptInfo *rel,
@@ -118,7 +118,7 @@ make_one_rel(PlannerInfo *root, List *joinlist)
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
/* ignore RTEs that are "other rels" */
if (brel->reloptkind != RELOPT_BASEREL)
@@ -211,7 +211,7 @@ set_base_rel_pathlists(PlannerInfo *root)
*/
static void
set_rel_size(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte)
+ Index rti, RangeTblEntry *rte)
{
if (rel->reloptkind == RELOPT_BASEREL &&
relation_excluded_by_constraints(root, rel, rte))
@@ -251,6 +251,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
}
break;
case RTE_SUBQUERY:
+
/*
* Subqueries don't support parameterized paths, so just go
* ahead and build their paths immediately.
@@ -264,6 +265,7 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel,
set_values_size_estimates(root, rel);
break;
case RTE_CTE:
+
/*
* CTEs don't support parameterized paths, so just go ahead
* and build their paths immediately.
@@ -574,8 +576,8 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* It is possible that constraint exclusion detected a contradiction
- * within a child subquery, even though we didn't prove one above.
- * If so, we can skip this child.
+ * within a child subquery, even though we didn't prove one above. If
+ * so, we can skip this child.
*/
if (IS_DUMMY_REL(childrel))
continue;
@@ -590,7 +592,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
/*
* Accumulate per-column estimates too. We need not do anything
- * for PlaceHolderVars in the parent list. If child expression
+ * for PlaceHolderVars in the parent list. If child expression
* isn't a Var, or we didn't record a width estimate for it, we
* have to fall back on a datatype-based estimate.
*
@@ -609,7 +611,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel,
if (IsA(childvar, Var))
{
- int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
+ int cndx = ((Var *) childvar)->varattno - childrel->min_attr;
child_width = childrel->attr_widths[cndx];
}
@@ -664,7 +666,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Generate access paths for each member relation, and remember the
- * cheapest path for each one. Also, identify all pathkeys (orderings)
+ * cheapest path for each one. Also, identify all pathkeys (orderings)
* and parameterizations (required_outer sets) available for the member
* relations.
*/
@@ -708,7 +710,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/*
* Collect lists of all the available path orderings and
- * parameterizations for all the children. We use these as a
+ * parameterizations for all the children. We use these as a
* heuristic to indicate which sort orderings and parameterizations we
* should build Append and MergeAppend paths for.
*/
@@ -753,7 +755,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
/* Have we already seen this param set? */
foreach(lco, all_child_outers)
{
- Relids existing_outers = (Relids) lfirst(lco);
+ Relids existing_outers = (Relids) lfirst(lco);
if (bms_equal(existing_outers, childouter))
{
@@ -791,7 +793,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
* so that not that many cases actually get considered here.)
*
* The Append node itself cannot enforce quals, so all qual checking must
- * be done in the child paths. This means that to have a parameterized
+ * be done in the child paths. This means that to have a parameterized
* Append path, we must have the exact same parameterization for each
* child path; otherwise some children might be failing to check the
* moved-down quals. To make them match up, we can try to increase the
@@ -799,7 +801,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
*/
foreach(l, all_child_outers)
{
- Relids required_outer = (Relids) lfirst(l);
+ Relids required_outer = (Relids) lfirst(l);
bool ok = true;
ListCell *lcr;
@@ -1115,9 +1117,9 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
rel->subroot = subroot;
/*
- * It's possible that constraint exclusion proved the subquery empty.
- * If so, it's convenient to turn it back into a dummy path so that we
- * will recognize appropriate optimizations at this level.
+ * It's possible that constraint exclusion proved the subquery empty. If
+ * so, it's convenient to turn it back into a dummy path so that we will
+ * recognize appropriate optimizations at this level.
*/
if (is_dummy_plan(rel->subplan))
{
@@ -1639,7 +1641,7 @@ qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
/*
* It would be unsafe to push down window function calls, but at least for
- * the moment we could never see any in a qual anyhow. (The same applies
+ * the moment we could never see any in a qual anyhow. (The same applies
* to aggregates, which we check for in pull_var_clause below.)
*/
Assert(!contain_window_function(qual));
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index e45bc121e4..480c1b7425 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -432,7 +432,7 @@ cost_index(IndexPath *path, PlannerInfo *root, double loop_count)
* qual clauses that we have to evaluate as qpquals. We approximate that
* list as allclauses minus any clauses appearing in indexquals. (We
* assume that pointer equality is enough to recognize duplicate
- * RestrictInfos.) This method neglects some considerations such as
+ * RestrictInfos.) This method neglects some considerations such as
* clauses that needn't be checked because they are implied by a partial
* index's predicate. It does not seem worth the cycles to try to factor
* those things in at this stage, even though createplan.c will take pains
@@ -3135,7 +3135,7 @@ get_restriction_qual_cost(PlannerInfo *root, RelOptInfo *baserel,
* innerrel: inner relation under consideration
* jointype: must be JOIN_SEMI or JOIN_ANTI
* sjinfo: SpecialJoinInfo relevant to this join
- * restrictlist: join quals
+ * restrictlist: join quals
* Output parameters:
* *semifactors is filled in (see relation.h for field definitions)
*/
@@ -3221,8 +3221,8 @@ compute_semi_anti_join_factors(PlannerInfo *root,
*
* Note: it is correct to use the inner rel's "rows" count here, even
* though we might later be considering a parameterized inner path with
- * fewer rows. This is because we have included all the join clauses
- * in the selectivity estimate.
+ * fewer rows. This is because we have included all the join clauses in
+ * the selectivity estimate.
*/
if (jselec > 0) /* protect against zero divide */
{
@@ -3271,17 +3271,18 @@ has_indexed_join_quals(NestPath *joinpath)
indexclauses = ((IndexPath *) innerpath)->indexclauses;
break;
case T_BitmapHeapScan:
- {
- /* Accept only a simple bitmap scan, not AND/OR cases */
- Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
-
- if (IsA(bmqual, IndexPath))
- indexclauses = ((IndexPath *) bmqual)->indexclauses;
- else
- return false;
- break;
- }
+ {
+ /* Accept only a simple bitmap scan, not AND/OR cases */
+ Path *bmqual = ((BitmapHeapPath *) innerpath)->bitmapqual;
+
+ if (IsA(bmqual, IndexPath))
+ indexclauses = ((IndexPath *) bmqual)->indexclauses;
+ else
+ return false;
+ break;
+ }
default:
+
/*
* If it's not a simple indexscan, it probably doesn't run quickly
* for zero rows out, even if it's a parameterized path using all
@@ -3293,8 +3294,8 @@ has_indexed_join_quals(NestPath *joinpath)
/*
* Examine the inner path's param clauses. Any that are from the outer
* path must be found in the indexclauses list, either exactly or in an
- * equivalent form generated by equivclass.c. Also, we must find at
- * least one such clause, else it's a clauseless join which isn't fast.
+ * equivalent form generated by equivclass.c. Also, we must find at least
+ * one such clause, else it's a clauseless join which isn't fast.
*/
found_one = false;
foreach(lc, innerpath->param_info->ppi_clauses)
diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c
index bb196b8f2a..e34b9553bd 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -494,11 +494,11 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids,
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (Note: since child EC
+ * considered. Otherwise child members are ignored. (Note: since child EC
* members aren't guaranteed unique, a non-NULL value means that there could
* be more than one EC that matches the expression; if so it's order-dependent
* which one you get. This is annoying but it only happens in corner cases,
- * so for now we live with just reporting the first match. See also
+ * so for now we live with just reporting the first match. See also
* generate_implied_equalities_for_indexcol and match_pathkeys_to_index.)
*
* If create_it is TRUE, we'll build a new EquivalenceClass when there is no
@@ -922,8 +922,8 @@ generate_base_implied_equalities_broken(PlannerInfo *root,
* built any join RelOptInfos.
*
* An annoying special case for parameterized scans is that the inner rel can
- * be an appendrel child (an "other rel"). In this case we must generate
- * appropriate clauses using child EC members. add_child_rel_equivalences
+ * be an appendrel child (an "other rel"). In this case we must generate
+ * appropriate clauses using child EC members. add_child_rel_equivalences
* must already have been done for the child rel.
*
* The results are sufficient for use in merge, hash, and plain nestloop join
@@ -1002,9 +1002,9 @@ generate_join_implied_equalities(PlannerInfo *root,
if (ec->ec_broken)
sublist = generate_join_implied_equalities_broken(root,
ec,
- nominal_join_relids,
+ nominal_join_relids,
outer_relids,
- nominal_inner_relids,
+ nominal_inner_relids,
inner_appinfo);
result = list_concat(result, sublist);
@@ -1217,9 +1217,9 @@ generate_join_implied_equalities_broken(PlannerInfo *root,
/*
* If we have to translate, just brute-force apply adjust_appendrel_attrs
* to all the RestrictInfos at once. This will result in returning
- * RestrictInfos that are not listed in ec_derives, but there shouldn't
- * be any duplication, and it's a sufficiently narrow corner case that
- * we shouldn't sweat too much over it anyway.
+ * RestrictInfos that are not listed in ec_derives, but there shouldn't be
+ * any duplication, and it's a sufficiently narrow corner case that we
+ * shouldn't sweat too much over it anyway.
*/
if (inner_appinfo)
result = (List *) adjust_appendrel_attrs(root, (Node *) result,
@@ -1966,7 +1966,7 @@ mutate_eclass_expressions(PlannerInfo *root,
* is a redundant list of clauses equating the index column to each of
* the other-relation values it is known to be equal to. Any one of
* these clauses can be used to create a parameterized indexscan, and there
- * is no value in using more than one. (But it *is* worthwhile to create
+ * is no value in using more than one. (But it *is* worthwhile to create
* a separate parameterized path for each one, since that leads to different
* join orders.)
*/
@@ -2014,7 +2014,7 @@ generate_implied_equalities_for_indexcol(PlannerInfo *root,
* the target relation. (Unlike regular members, the same expression
* could be a child member of more than one EC. Therefore, it's
* potentially order-dependent which EC a child relation's index
- * column gets matched to. This is annoying but it only happens in
+ * column gets matched to. This is annoying but it only happens in
* corner cases, so for now we live with just reporting the first
* match. See also get_eclass_for_sort_expr.)
*/
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 05530054e1..2e8ccd0578 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -103,12 +103,12 @@ static List *build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
List *clauses, List *other_clauses);
static List *drop_indexable_join_clauses(RelOptInfo *rel, List *clauses);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
- List *paths);
+ List *paths);
static int path_usage_comparator(const void *a, const void *b);
static Cost bitmap_scan_cost_est(PlannerInfo *root, RelOptInfo *rel,
- Path *ipath);
+ Path *ipath);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
- List *paths);
+ List *paths);
static PathClauseUsage *classify_index_clause_usage(Path *path,
List **clauselist);
static Relids get_bitmap_tree_required_outer(Path *bitmapqual);
@@ -117,15 +117,15 @@ static int find_list_position(Node *node, List **nodelist);
static bool check_index_only(RelOptInfo *rel, IndexOptInfo *index);
static double get_loop_count(PlannerInfo *root, Relids outer_relids);
static void match_restriction_clauses_to_index(RelOptInfo *rel,
- IndexOptInfo *index,
- IndexClauseSet *clauseset);
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
static void match_join_clauses_to_index(PlannerInfo *root,
RelOptInfo *rel, IndexOptInfo *index,
IndexClauseSet *clauseset,
List **joinorclauses);
static void match_eclass_clauses_to_index(PlannerInfo *root,
- IndexOptInfo *index,
- IndexClauseSet *clauseset);
+ IndexOptInfo *index,
+ IndexClauseSet *clauseset);
static void match_clauses_to_index(IndexOptInfo *index,
List *clauses,
IndexClauseSet *clauseset);
@@ -237,7 +237,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
match_restriction_clauses_to_index(rel, index, &rclauseset);
/*
- * Build index paths from the restriction clauses. These will be
+ * Build index paths from the restriction clauses. These will be
* non-parameterized paths. Plain paths go directly to add_path(),
* bitmap paths are added to bitindexpaths to be handled below.
*/
@@ -245,25 +245,25 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
&bitindexpaths);
/*
- * Identify the join clauses that can match the index. For the moment
- * we keep them separate from the restriction clauses. Note that
- * this finds only "loose" join clauses that have not been merged
- * into EquivalenceClasses. Also, collect join OR clauses for later.
+ * Identify the join clauses that can match the index. For the moment
+ * we keep them separate from the restriction clauses. Note that this
+ * finds only "loose" join clauses that have not been merged into
+ * EquivalenceClasses. Also, collect join OR clauses for later.
*/
MemSet(&jclauseset, 0, sizeof(jclauseset));
match_join_clauses_to_index(root, rel, index,
&jclauseset, &joinorclauses);
/*
- * Look for EquivalenceClasses that can generate joinclauses
- * matching the index.
+ * Look for EquivalenceClasses that can generate joinclauses matching
+ * the index.
*/
MemSet(&eclauseset, 0, sizeof(eclauseset));
match_eclass_clauses_to_index(root, index, &eclauseset);
/*
- * If we found any plain or eclass join clauses, decide what to
- * do with 'em.
+ * If we found any plain or eclass join clauses, decide what to do
+ * with 'em.
*/
if (jclauseset.nonempty || eclauseset.nonempty)
consider_index_join_clauses(root, rel, index,
@@ -287,7 +287,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
* the joinclause list. Add these to bitjoinpaths.
*/
indexpaths = generate_bitmap_or_paths(root, rel,
- joinorclauses, rel->baserestrictinfo,
+ joinorclauses, rel->baserestrictinfo,
false);
bitjoinpaths = list_concat(bitjoinpaths, indexpaths);
@@ -313,7 +313,7 @@ create_index_paths(PlannerInfo *root, RelOptInfo *rel)
* the most promising combination of join bitmap index paths. Note there
* will be only one such path no matter how many join clauses are
* available. (XXX is that good enough, or do we need to consider even
- * more paths for different subsets of possible join partners? Also,
+ * more paths for different subsets of possible join partners? Also,
* should we add in restriction bitmap paths as well?)
*/
if (bitjoinpaths != NIL)
@@ -366,19 +366,19 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* We can always include any restriction clauses in the index clauses.
* However, it's not obvious which subsets of the join clauses are worth
* generating paths from, and it's unlikely that considering every
- * possible subset is worth the cycles. Our current heuristic is based
- * on the index columns, with the idea that later index columns are less
+ * possible subset is worth the cycles. Our current heuristic is based on
+ * the index columns, with the idea that later index columns are less
* useful than earlier ones; therefore it's unlikely to be worth trying
* combinations that would remove a clause from an earlier index column
- * while adding one to a later column. Also, we know that all the
- * eclass clauses for a particular column are redundant, so we should
- * use only one of them. However, eclass clauses will always represent
- * equality which is the strongest type of index constraint, so those
- * are high-value and we should try every available combination when we
- * have eclass clauses for more than one column. Furthermore, it's
- * unlikely to be useful to combine an eclass clause with non-eclass
- * clauses for the same index column. These considerations lead to the
- * following heuristics:
+ * while adding one to a later column. Also, we know that all the eclass
+ * clauses for a particular column are redundant, so we should use only
+ * one of them. However, eclass clauses will always represent equality
+ * which is the strongest type of index constraint, so those are
+ * high-value and we should try every available combination when we have
+ * eclass clauses for more than one column. Furthermore, it's unlikely to
+ * be useful to combine an eclass clause with non-eclass clauses for the
+ * same index column. These considerations lead to the following
+ * heuristics:
*
* First, start with the restriction clauses, and add on all simple join
* clauses for column 1. If there are any such join clauses, generate
@@ -387,7 +387,7 @@ consider_index_join_clauses(PlannerInfo *root, RelOptInfo *rel,
* any other clauses we have for column 1.
*
* Next, add on all simple join clauses for column 2. If there are any
- * such join clauses, generate paths with this collection. If there are
+ * such join clauses, generate paths with this collection. If there are
* eclass clauses for columns 1 or 2, generate paths with each such clause
* replacing other clauses for its index column, including cases where we
* use restriction or simple join clauses for one column and an eclass
@@ -519,7 +519,7 @@ expand_eclass_clause_combinations(PlannerInfo *root, RelOptInfo *rel,
* bitmap indexpaths are added to *bitindexpaths for later processing.
*
* This is a fairly simple frontend to build_index_paths(). Its reason for
- * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
+ * existence is mainly to handle ScalarArrayOpExpr quals properly. If the
* index AM supports them natively, we should just include them in simple
* index paths. If not, we should exclude them while building simple index
* paths, and then make a separate attempt to include them in bitmap paths.
@@ -533,7 +533,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
ListCell *lc;
/*
- * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
+ * Build simple index paths using the clauses. Allow ScalarArrayOpExpr
* clauses only if the index AM supports them natively.
*/
indexpaths = build_index_paths(root, rel,
@@ -542,17 +542,16 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
SAOP_PER_AM, ST_ANYSCAN);
/*
- * Submit all the ones that can form plain IndexScan plans to add_path.
- * (A plain IndexPath can represent either a plain IndexScan or an
+ * Submit all the ones that can form plain IndexScan plans to add_path. (A
+ * plain IndexPath can represent either a plain IndexScan or an
* IndexOnlyScan, but for our purposes here that distinction does not
- * matter. However, some of the indexes might support only bitmap scans,
+ * matter. However, some of the indexes might support only bitmap scans,
* and those we mustn't submit to add_path here.)
*
- * Also, pick out the ones that are usable as bitmap scans. For that,
- * we must discard indexes that don't support bitmap scans, and we
- * also are only interested in paths that have some selectivity; we
- * should discard anything that was generated solely for ordering
- * purposes.
+ * Also, pick out the ones that are usable as bitmap scans. For that, we
+ * must discard indexes that don't support bitmap scans, and we also are
+ * only interested in paths that have some selectivity; we should discard
+ * anything that was generated solely for ordering purposes.
*/
foreach(lc, indexpaths)
{
@@ -568,9 +567,9 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * If the index doesn't handle ScalarArrayOpExpr clauses natively,
- * check to see if there are any such clauses, and if so generate
- * bitmap scan paths relying on executor-managed ScalarArrayOpExpr.
+ * If the index doesn't handle ScalarArrayOpExpr clauses natively, check
+ * to see if there are any such clauses, and if so generate bitmap scan
+ * paths relying on executor-managed ScalarArrayOpExpr.
*/
if (!index->amsearcharray)
{
@@ -590,7 +589,7 @@ get_index_paths(PlannerInfo *root, RelOptInfo *rel,
* We return a list of paths because (1) this routine checks some cases
* that should cause us to not generate any IndexPath, and (2) in some
* cases we want to consider both a forward and a backward scan, so as
- * to obtain both sort orders. Note that the paths are just returned
+ * to obtain both sort orders. Note that the paths are just returned
* to the caller and not immediately fed to add_path().
*
* At top level, useful_predicate should be exactly the index's predOK flag
@@ -658,19 +657,19 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
/*
* 1. Collect the index clauses into a single list.
*
- * We build a list of RestrictInfo nodes for clauses to be used with
- * this index, along with an integer list of the index column numbers
- * (zero based) that each clause should be used with. The clauses are
- * ordered by index key, so that the column numbers form a nondecreasing
- * sequence. (This order is depended on by btree and possibly other
- * places.) The lists can be empty, if the index AM allows that.
+ * We build a list of RestrictInfo nodes for clauses to be used with this
+ * index, along with an integer list of the index column numbers (zero
+ * based) that each clause should be used with. The clauses are ordered
+ * by index key, so that the column numbers form a nondecreasing sequence.
+ * (This order is depended on by btree and possibly other places.) The
+ * lists can be empty, if the index AM allows that.
*
- * found_clause is set true only if there's at least one index clause;
- * and if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
+ * found_clause is set true only if there's at least one index clause; and
+ * if saop_control is SAOP_REQUIRE, it has to be a ScalarArrayOpExpr
* clause.
*
- * We also build a Relids set showing which outer rels are required
- * by the selected clauses.
+ * We also build a Relids set showing which outer rels are required by the
+ * selected clauses.
*/
index_clauses = NIL;
clause_columns = NIL;
@@ -706,8 +705,8 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
* If no clauses match the first index column, check for amoptionalkey
* restriction. We can't generate a scan over an index with
* amoptionalkey = false unless there's at least one index clause.
- * (When working on columns after the first, this test cannot fail.
- * It is always okay for columns after the first to not have any
+ * (When working on columns after the first, this test cannot fail. It
+ * is always okay for columns after the first to not have any
* clauses.)
*/
if (index_clauses == NIL && !index->amoptionalkey)
@@ -759,7 +758,7 @@ build_index_paths(PlannerInfo *root, RelOptInfo *rel,
}
/*
- * 3. Check if an index-only scan is possible. If we're not building
+ * 3. Check if an index-only scan is possible. If we're not building
* plain indexscans, this isn't relevant since bitmap scans don't support
* index data retrieval anyway.
*/
@@ -865,8 +864,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
/*
* Ignore partial indexes that do not match the query. If a partial
- * index is marked predOK then we know it's OK. Otherwise, we have
- * to test whether the added clauses are sufficient to imply the
+ * index is marked predOK then we know it's OK. Otherwise, we have to
+ * test whether the added clauses are sufficient to imply the
* predicate. If so, we can use the index in the current context.
*
* We set useful_predicate to true iff the predicate was proven using
@@ -904,8 +903,8 @@ build_paths_for_OR(PlannerInfo *root, RelOptInfo *rel,
match_clauses_to_index(index, clauses, &clauseset);
/*
- * If no matches so far, and the index predicate isn't useful,
- * we don't want it.
+ * If no matches so far, and the index predicate isn't useful, we
+ * don't want it.
*/
if (!clauseset.nonempty && !useful_predicate)
continue;
@@ -997,7 +996,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
generate_bitmap_or_paths(root, rel,
andargs,
all_clauses,
- restriction_only));
+ restriction_only));
}
else
{
@@ -1053,7 +1052,7 @@ generate_bitmap_or_paths(PlannerInfo *root, RelOptInfo *rel,
*
* This is a helper for generate_bitmap_or_paths(). We leave OR clauses
* in the list whether they are joins or not, since we might be able to
- * extract a restriction item from an OR list. It's safe to leave such
+ * extract a restriction item from an OR list. It's safe to leave such
* clauses in the list because match_clauses_to_index() will ignore them,
* so there's no harm in passing such clauses to build_paths_for_OR().
*/
@@ -1361,7 +1360,7 @@ bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel, List *paths)
apath.path.type = T_BitmapAndPath;
apath.path.pathtype = T_BitmapAnd;
apath.path.parent = rel;
- apath.path.param_info = NULL; /* not used in bitmap trees */
+ apath.path.param_info = NULL; /* not used in bitmap trees */
apath.path.pathkeys = NIL;
apath.bitmapquals = paths;
cost_bitmap_and_node(&apath, root);
@@ -1464,7 +1463,7 @@ get_bitmap_tree_required_outer(Path *bitmapqual)
foreach(lc, ((BitmapAndPath *) bitmapqual)->bitmapquals)
{
result = bms_join(result,
- get_bitmap_tree_required_outer((Path *) lfirst(lc)));
+ get_bitmap_tree_required_outer((Path *) lfirst(lc)));
}
}
else if (IsA(bitmapqual, BitmapOrPath))
@@ -1472,7 +1471,7 @@ get_bitmap_tree_required_outer(Path *bitmapqual)
foreach(lc, ((BitmapOrPath *) bitmapqual)->bitmapquals)
{
result = bms_join(result,
- get_bitmap_tree_required_outer((Path *) lfirst(lc)));
+ get_bitmap_tree_required_outer((Path *) lfirst(lc)));
}
}
else
@@ -1581,16 +1580,16 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
return false;
/*
- * Check that all needed attributes of the relation are available from
- * the index.
+ * Check that all needed attributes of the relation are available from the
+ * index.
*
* XXX this is overly conservative for partial indexes, since we will
* consider attributes involved in the index predicate as required even
- * though the predicate won't need to be checked at runtime. (The same
- * is true for attributes used only in index quals, if we are certain
- * that the index is not lossy.) However, it would be quite expensive
- * to determine that accurately at this point, so for now we take the
- * easy way out.
+ * though the predicate won't need to be checked at runtime. (The same is
+ * true for attributes used only in index quals, if we are certain that
+ * the index is not lossy.) However, it would be quite expensive to
+ * determine that accurately at this point, so for now we take the easy
+ * way out.
*/
/*
@@ -1603,7 +1602,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
/* Add all the attributes used by restriction clauses. */
foreach(lc, rel->baserestrictinfo)
{
- RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
+ RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc);
pull_varattnos((Node *) rinfo->clause, rel->relid, &attrs_used);
}
@@ -1611,7 +1610,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
/* Construct a bitmapset of columns stored in the index. */
for (i = 0; i < index->ncolumns; i++)
{
- int attno = index->indexkeys[i];
+ int attno = index->indexkeys[i];
/*
* For the moment, we just ignore index expressions. It might be nice
@@ -1642,7 +1641,7 @@ check_index_only(RelOptInfo *rel, IndexOptInfo *index)
* Since we produce parameterized paths before we've begun to generate join
* relations, it's impossible to predict exactly how many times a parameterized
* path will be iterated; we don't know the size of the relation that will be
- * on the outside of the nestloop. However, we should try to account for
+ * on the outside of the nestloop. However, we should try to account for
* multiple iterations somehow in costing the path. The heuristic embodied
* here is to use the rowcount of the smallest other base relation needed in
* the join clauses used by the path. (We could alternatively consider the
@@ -1676,7 +1675,7 @@ get_loop_count(PlannerInfo *root, Relids outer_relids)
outer_rel = root->simple_rel_array[relid];
if (outer_rel == NULL)
continue;
- Assert(outer_rel->relid == relid); /* sanity check on array */
+ Assert(outer_rel->relid == relid); /* sanity check on array */
/* Other relation could be proven empty, if so ignore */
if (IS_DUMMY_REL(outer_rel))
@@ -1851,7 +1850,7 @@ match_clause_to_index(IndexOptInfo *index,
* doesn't involve a volatile function or a Var of the index's relation.
* In particular, Vars belonging to other relations of the query are
* accepted here, since a clause of that form can be used in a
- * parameterized indexscan. It's the responsibility of higher code levels
+ * parameterized indexscan. It's the responsibility of higher code levels
* to manage restriction and join clauses appropriately.
*
* Note: we do need to check for Vars of the index's relation on the
@@ -2149,7 +2148,7 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
List *clause_columns = NIL;
ListCell *lc1;
- *orderby_clauses_p = NIL; /* set default results */
+ *orderby_clauses_p = NIL; /* set default results */
*clause_columns_p = NIL;
/* Only indexes with the amcanorderbyop property are interesting here */
@@ -2195,9 +2194,9 @@ match_pathkeys_to_index(IndexOptInfo *index, List *pathkeys,
/*
* We allow any column of the index to match each pathkey; they
- * don't have to match left-to-right as you might expect. This
- * is correct for GiST, which is the sole existing AM supporting
- * amcanorderbyop. We might need different logic in future for
+ * don't have to match left-to-right as you might expect. This is
+ * correct for GiST, which is the sole existing AM supporting
+ * amcanorderbyop. We might need different logic in future for
* other implementations.
*/
for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
@@ -2393,8 +2392,8 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* If it's a btree index, we can reject it if its opfamily isn't
* compatible with the EC, since no clause generated from the EC could be
* used with the index. For non-btree indexes, we can't easily tell
- * whether clauses generated from the EC could be used with the index,
- * so don't check the opfamily. This might mean we return "true" for a
+ * whether clauses generated from the EC could be used with the index, so
+ * don't check the opfamily. This might mean we return "true" for a
* useless EC, so we have to recheck the results of
* generate_implied_equalities_for_indexcol; see
* match_eclass_clauses_to_index.
@@ -2425,7 +2424,7 @@ eclass_member_matches_indexcol(EquivalenceClass *ec, EquivalenceMember *em,
* if it is true.
* 2. A list of expressions in this relation, and a corresponding list of
* equality operators. The caller must have already checked that the operators
- * represent equality. (Note: the operators could be cross-type; the
+ * represent equality. (Note: the operators could be cross-type; the
* expressions should correspond to their RHS inputs.)
*
* The caller need only supply equality conditions arising from joins;
@@ -2571,7 +2570,7 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
* notion of equality.
*/
- matched = true; /* column is unique */
+ matched = true; /* column is unique */
break;
}
@@ -3300,9 +3299,9 @@ adjust_rowcompare_for_index(RowCompareExpr *clause,
/*
* See how many of the remaining columns match some index column in the
- * same way. As in match_clause_to_indexcol(), the "other" side of
- * any potential index condition is OK as long as it doesn't use Vars from
- * the indexed relation.
+ * same way. As in match_clause_to_indexcol(), the "other" side of any
+ * potential index condition is OK as long as it doesn't use Vars from the
+ * indexed relation.
*/
matching_cols = 1;
largs_cell = lnext(list_head(clause->largs));
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 446319d135..65f86194e1 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -103,7 +103,7 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* If it's SEMI or ANTI join, compute correction factors for cost
- * estimation. These will be the same for all paths.
+ * estimation. These will be the same for all paths.
*/
if (jointype == JOIN_SEMI || jointype == JOIN_ANTI)
compute_semi_anti_join_factors(root, outerrel, innerrel,
@@ -118,7 +118,7 @@ add_paths_to_joinrel(PlannerInfo *root,
* to the parameter source rel instead of joining to the other input rel.
* This restriction reduces the number of parameterized paths we have to
* deal with at higher join levels, without compromising the quality of
- * the resulting plan. We express the restriction as a Relids set that
+ * the resulting plan. We express the restriction as a Relids set that
* must overlap the parameterization of any proposed join path.
*/
foreach(lc, root->join_info_list)
@@ -136,7 +136,7 @@ add_paths_to_joinrel(PlannerInfo *root,
!bms_overlap(joinrel->relids, sjinfo->min_lefthand))
param_source_rels = bms_join(param_source_rels,
bms_difference(root->all_baserels,
- sjinfo->min_righthand));
+ sjinfo->min_righthand));
/* full joins constrain both sides symmetrically */
if (sjinfo->jointype == JOIN_FULL &&
@@ -144,7 +144,7 @@ add_paths_to_joinrel(PlannerInfo *root,
!bms_overlap(joinrel->relids, sjinfo->min_righthand))
param_source_rels = bms_join(param_source_rels,
bms_difference(root->all_baserels,
- sjinfo->min_lefthand));
+ sjinfo->min_lefthand));
}
/*
@@ -216,11 +216,11 @@ try_nestloop_path(PlannerInfo *root,
List *pathkeys)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_nestloop_required_outer(outer_path,
inner_path);
@@ -289,14 +289,14 @@ try_mergejoin_path(PlannerInfo *root,
List *innersortkeys)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
- inner_path);
+ inner_path);
if (required_outer &&
!bms_overlap(required_outer, param_source_rels))
{
@@ -368,14 +368,14 @@ try_hashjoin_path(PlannerInfo *root,
List *hashclauses)
{
Relids required_outer;
- JoinCostWorkspace workspace;
+ JoinCostWorkspace workspace;
/*
- * Check to see if proposed path is still parameterized, and reject if
- * the parameterization wouldn't be sensible.
+ * Check to see if proposed path is still parameterized, and reject if the
+ * parameterization wouldn't be sensible.
*/
required_outer = calc_non_nestloop_required_outer(outer_path,
- inner_path);
+ inner_path);
if (required_outer &&
!bms_overlap(required_outer, param_source_rels))
{
@@ -487,7 +487,7 @@ sort_inner_and_outer(PlannerInfo *root,
*
* This function intentionally does not consider parameterized input paths
* (implicit in the fact that it only looks at cheapest_total_path, which
- * is always unparameterized). If we did so, we'd have a combinatorial
+ * is always unparameterized). If we did so, we'd have a combinatorial
* explosion of mergejoin paths of dubious value. This interacts with
* decisions elsewhere that also discriminate against mergejoins with
* parameterized inputs; see comments in src/backend/optimizer/README.
@@ -582,8 +582,8 @@ sort_inner_and_outer(PlannerInfo *root,
* And now we can make the path.
*
* Note: it's possible that the cheapest paths will already be sorted
- * properly. try_mergejoin_path will detect that case and suppress
- * an explicit sort step, so we needn't do so here.
+ * properly. try_mergejoin_path will detect that case and suppress an
+ * explicit sort step, so we needn't do so here.
*/
try_mergejoin_path(root,
joinrel,
@@ -733,8 +733,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* If we need to unique-ify the outer path, it's pointless to consider
- * any but the cheapest outer. (XXX we don't consider parameterized
- * outers, nor inners, for unique-ified cases. Should we?)
+ * any but the cheapest outer. (XXX we don't consider parameterized
+ * outers, nor inners, for unique-ified cases. Should we?)
*/
if (save_jointype == JOIN_UNIQUE_OUTER)
{
@@ -774,9 +774,9 @@ match_unsorted_outer(PlannerInfo *root,
{
/*
* Consider nestloop joins using this outer path and various
- * available paths for the inner relation. We consider the
- * cheapest-total paths for each available parameterization of
- * the inner relation, including the unparameterized case.
+ * available paths for the inner relation. We consider the
+ * cheapest-total paths for each available parameterization of the
+ * inner relation, including the unparameterized case.
*/
ListCell *lc2;
@@ -847,8 +847,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest inner.
* Since a sort will be needed, only cheapest total cost matters. (But
- * try_mergejoin_path will do the right thing if
- * inner_cheapest_total is already correctly sorted.)
+ * try_mergejoin_path will do the right thing if inner_cheapest_total
+ * is already correctly sorted.)
*/
try_mergejoin_path(root,
joinrel,
@@ -873,9 +873,9 @@ match_unsorted_outer(PlannerInfo *root,
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost.
*
- * Currently we do not consider parameterized inner paths here.
- * This interacts with decisions elsewhere that also discriminate
- * against mergejoins with parameterized inputs; see comments in
+ * Currently we do not consider parameterized inner paths here. This
+ * interacts with decisions elsewhere that also discriminate against
+ * mergejoins with parameterized inputs; see comments in
* src/backend/optimizer/README.
*
* As we shorten the sortkey list, we should consider only paths that
@@ -1189,7 +1189,7 @@ hash_inner_and_outer(PlannerInfo *root,
if (outerpath == cheapest_startup_outer &&
innerpath == cheapest_total_inner)
- continue; /* already tried it */
+ continue; /* already tried it */
try_hashjoin_path(root,
joinrel,
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 24d4651507..e6a0f8dab6 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -90,7 +90,7 @@ join_search_one_level(PlannerInfo *root, int level)
if (level == 2) /* consider remaining initial rels */
other_rels = lnext(r);
- else /* consider all initial rels */
+ else /* consider all initial rels */
other_rels = list_head(joinrels[1]);
make_rels_by_clause_joins(root,
@@ -180,7 +180,7 @@ join_search_one_level(PlannerInfo *root, int level)
/*----------
* Normally, we should always have made at least one join of the current
* level. However, when special joins are involved, there may be no legal
- * way to make an N-way join for some values of N. For example consider
+ * way to make an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
* x IN (SELECT ... FROM t2,t3 WHERE ...) AND
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index cab7951891..c918c4e8da 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -95,8 +95,8 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/*
* Find potentially interesting OR joinclauses. We can use any joinclause
* that is considered safe to move to this rel by the parameterized-path
- * machinery, even though what we are going to do with it is not exactly
- * a parameterized path.
+ * machinery, even though what we are going to do with it is not exactly a
+ * parameterized path.
*/
foreach(i, rel->joininfo)
{
@@ -109,7 +109,7 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass restriction_only = true so that any
+ * indexquals. We pass restriction_only = true so that any
* sub-clauses that are actually joins will be ignored.
*/
List *orpaths;
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 0603a94e48..20a5644edd 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -223,7 +223,7 @@ canonicalize_pathkeys(PlannerInfo *root, List *pathkeys)
*
* If rel is not NULL, it identifies a specific relation we're considering
* a path for, and indicates that child EC members for that relation can be
- * considered. Otherwise child members are ignored. (See the comments for
+ * considered. Otherwise child members are ignored. (See the comments for
* get_eclass_for_sort_expr.)
*
* create_it is TRUE if we should create any missing EquivalenceClass
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index c34b9b8c38..65ad1694b0 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1138,10 +1138,10 @@ create_indexscan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by nodeIndexscan.c),
* but if there are any "special" operators involved then they must be
- * included in qpqual. The upshot is that qpqual must contain
+ * included in qpqual. The upshot is that qpqual must contain
* scan_clauses minus whatever appears in indexquals.
*
* In normal cases simple pointer equality checks will be enough to spot
@@ -1189,7 +1189,7 @@ create_indexscan_plan(PlannerInfo *root,
get_parse_rowmark(root->parse, baserelid) == NULL)
if (predicate_implied_by(clausel,
best_path->indexinfo->indpred))
- continue; /* implied by index predicate */
+ continue; /* implied by index predicate */
}
}
qpqual = lappend(qpqual, rinfo);
@@ -1228,7 +1228,7 @@ create_indexscan_plan(PlannerInfo *root,
indexoid,
fixed_indexquals,
fixed_indexorderbys,
- best_path->indexinfo->indextlist,
+ best_path->indexinfo->indextlist,
best_path->indexscandir);
else
scan_plan = (Scan *) make_indexscan(tlist,
@@ -1278,15 +1278,15 @@ create_bitmap_scan_plan(PlannerInfo *root,
/*
* The qpqual list must contain all restrictions not automatically handled
* by the index, other than pseudoconstant clauses which will be handled
- * by a separate gating plan node. All the predicates in the indexquals
+ * by a separate gating plan node. All the predicates in the indexquals
* will be checked (either by the index itself, or by
* nodeBitmapHeapscan.c), but if there are any "special" operators
- * involved then they must be added to qpqual. The upshot is that qpqual
+ * involved then they must be added to qpqual. The upshot is that qpqual
* must contain scan_clauses minus whatever appears in indexquals.
*
* This loop is similar to the comparable code in create_indexscan_plan(),
* but with some differences because it has to compare the scan clauses to
- * stripped (no RestrictInfos) indexquals. See comments there for more
+ * stripped (no RestrictInfos) indexquals. See comments there for more
* info.
*
* In normal cases simple equal() checks will be enough to spot duplicate
@@ -1880,14 +1880,14 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
Assert(rte->rtekind == RTE_RELATION);
/*
- * Sort clauses into best execution order. We do this first since the
- * FDW might have more info than we do and wish to adjust the ordering.
+ * Sort clauses into best execution order. We do this first since the FDW
+ * might have more info than we do and wish to adjust the ordering.
*/
scan_clauses = order_qual_clauses(root, scan_clauses);
/*
* Let the FDW perform its processing on the restriction clauses and
- * generate the plan node. Note that the FDW might remove restriction
+ * generate the plan node. Note that the FDW might remove restriction
* clauses that it intends to execute remotely, or even add more (if it
* has selected some join clauses for remote use but also wants them
* rechecked locally).
@@ -2005,7 +2005,7 @@ create_nestloop_plan(PlannerInfo *root,
bms_overlap(((PlaceHolderVar *) nlp->paramval)->phrels,
outerrelids) &&
bms_is_subset(find_placeholder_info(root,
- (PlaceHolderVar *) nlp->paramval,
+ (PlaceHolderVar *) nlp->paramval,
false)->ph_eval_at,
outerrelids))
{
@@ -2523,9 +2523,9 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
/*
* If not to be replaced, just return the PlaceHolderVar unmodified.
- * We use bms_overlap as a cheap/quick test to see if the PHV might
- * be evaluated in the outer rels, and then grab its PlaceHolderInfo
- * to tell for sure.
+ * We use bms_overlap as a cheap/quick test to see if the PHV might be
+ * evaluated in the outer rels, and then grab its PlaceHolderInfo to
+ * tell for sure.
*/
if (!bms_overlap(phv->phrels, root->curOuterRels))
return node;
@@ -2612,7 +2612,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path)
/*
* Check to see if the indexkey is on the right; if so, commute
- * the clause. The indexkey should be the side that refers to
+ * the clause. The indexkey should be the side that refers to
* (only) the base relation.
*/
if (!bms_equal(rinfo->left_relids, index->rel->relids))
@@ -3690,13 +3690,12 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
{
/*
* If we are given a sort column number to match, only consider
- * the single TLE at that position. It's possible that there
- * is no such TLE, in which case fall through and generate a
- * resjunk targetentry (we assume this must have happened in the
- * parent plan as well). If there is a TLE but it doesn't match
- * the pathkey's EC, we do the same, which is probably the wrong
- * thing but we'll leave it to caller to complain about the
- * mismatch.
+ * the single TLE at that position. It's possible that there is
+ * no such TLE, in which case fall through and generate a resjunk
+ * targetentry (we assume this must have happened in the parent
+ * plan as well). If there is a TLE but it doesn't match the
+ * pathkey's EC, we do the same, which is probably the wrong thing
+ * but we'll leave it to caller to complain about the mismatch.
*/
tle = get_tle_by_resno(tlist, reqColIdx[numsortkeys]);
if (tle)
@@ -3746,11 +3745,11 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
if (!tle)
{
/*
- * No matching tlist item; look for a computable expression.
- * Note that we treat Aggrefs as if they were variables; this
- * is necessary when attempting to sort the output from an Agg
- * node for use in a WindowFunc (since grouping_planner will
- * have treated the Aggrefs as variables, too).
+ * No matching tlist item; look for a computable expression. Note
+ * that we treat Aggrefs as if they were variables; this is
+ * necessary when attempting to sort the output from an Agg node
+ * for use in a WindowFunc (since grouping_planner will have
+ * treated the Aggrefs as variables, too).
*/
Expr *sortexpr = NULL;
@@ -3769,7 +3768,8 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
continue;
/*
- * Ignore child members unless they match the rel being sorted.
+ * Ignore child members unless they match the rel being
+ * sorted.
*/
if (em->em_is_child &&
!bms_equal(em->em_relids, relids))
@@ -3817,7 +3817,7 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
NULL,
true);
tlist = lappend(tlist, tle);
- lefttree->targetlist = tlist; /* just in case NIL before */
+ lefttree->targetlist = tlist; /* just in case NIL before */
}
/*
@@ -3877,8 +3877,7 @@ find_ec_member_for_tle(EquivalenceClass *ec,
/*
* We shouldn't be trying to sort by an equivalence class that
- * contains a constant, so no need to consider such cases any
- * further.
+ * contains a constant, so no need to consider such cases any further.
*/
if (em->em_is_const)
continue;
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 231e835373..3c7fa632b8 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -192,9 +192,9 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars,
where_needed);
/*
- * If we are creating PlaceHolderInfos, mark them with the
- * correct maybe-needed locations. Otherwise, it's too late to
- * change that.
+ * If we are creating PlaceHolderInfos, mark them with the correct
+ * maybe-needed locations. Otherwise, it's too late to change
+ * that.
*/
if (create_new_ph)
mark_placeholder_maybe_needed(root, phinfo, where_needed);
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 7e2c6d2c31..be52d16ff0 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -116,9 +116,9 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
rtr = (RangeTblRef *) jtnode;
rte = planner_rt_fetch(rtr->rtindex, root);
if (rte->rtekind == RTE_RELATION)
- /* ordinary relation, ok */ ;
+ /* ordinary relation, ok */ ;
else if (rte->rtekind == RTE_SUBQUERY && rte->inh)
- /* flattened UNION ALL subquery, ok */ ;
+ /* flattened UNION ALL subquery, ok */ ;
else
return;
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index c439e9652c..9838dc45d5 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -146,8 +146,8 @@ query_planner(PlannerInfo *root, List *tlist,
/*
* Make a flattened version of the rangetable for faster access (this is
- * OK because the rangetable won't change any more), and set up an
- * empty array for indexing base relations.
+ * OK because the rangetable won't change any more), and set up an empty
+ * array for indexing base relations.
*/
setup_simple_rel_arrays(root);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 0b1ee971df..df76341c0a 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -766,9 +766,9 @@ inheritance_planner(PlannerInfo *root)
/*
* The rowMarks list might contain references to subquery RTEs, so
- * make a copy that we can apply ChangeVarNodes to. (Fortunately,
- * the executor doesn't need to see the modified copies --- we can
- * just pass it the original rowMarks list.)
+ * make a copy that we can apply ChangeVarNodes to. (Fortunately, the
+ * executor doesn't need to see the modified copies --- we can just
+ * pass it the original rowMarks list.)
*/
subroot.rowMarks = (List *) copyObject(root->rowMarks);
@@ -784,10 +784,11 @@ inheritance_planner(PlannerInfo *root)
/*
* If this isn't the first child Query, generate duplicates of all
- * subquery RTEs, and adjust Var numbering to reference the duplicates.
- * To simplify the loop logic, we scan the original rtable not the
- * copy just made by adjust_appendrel_attrs; that should be OK since
- * subquery RTEs couldn't contain any references to the target rel.
+ * subquery RTEs, and adjust Var numbering to reference the
+ * duplicates. To simplify the loop logic, we scan the original rtable
+ * not the copy just made by adjust_appendrel_attrs; that should be OK
+ * since subquery RTEs couldn't contain any references to the target
+ * rel.
*/
if (final_rtable != NIL)
{
@@ -800,7 +801,7 @@ inheritance_planner(PlannerInfo *root)
if (rte->rtekind == RTE_SUBQUERY)
{
- Index newrti;
+ Index newrti;
/*
* The RTE can't contain any references to its own RT
@@ -849,7 +850,7 @@ inheritance_planner(PlannerInfo *root)
else
final_rtable = list_concat(final_rtable,
list_copy_tail(subroot.parse->rtable,
- list_length(final_rtable)));
+ list_length(final_rtable)));
/*
* We need to collect all the RelOptInfos from all child plans into
@@ -1317,18 +1318,17 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
need_sort_for_grouping = true;
/*
- * Always override create_plan's tlist, so that we don't
- * sort useless data from a "physical" tlist.
+ * Always override create_plan's tlist, so that we don't sort
+ * useless data from a "physical" tlist.
*/
need_tlist_eval = true;
}
/*
- * create_plan returns a plan with just a "flat" tlist of
- * required Vars. Usually we need to insert the sub_tlist as the
- * tlist of the top plan node. However, we can skip that if we
- * determined that whatever create_plan chose to return will be
- * good enough.
+ * create_plan returns a plan with just a "flat" tlist of required
+ * Vars. Usually we need to insert the sub_tlist as the tlist of
+ * the top plan node. However, we can skip that if we determined
+ * that whatever create_plan chose to return will be good enough.
*/
if (need_tlist_eval)
{
@@ -1546,7 +1546,7 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
*
* Note: it's essential here to use PVC_INCLUDE_AGGREGATES so that
* Vars mentioned only in aggregate expressions aren't pulled out
- * as separate targetlist entries. Otherwise we could be putting
+ * as separate targetlist entries. Otherwise we could be putting
* ungrouped Vars directly into an Agg node's tlist, resulting in
* undefined behavior.
*/
@@ -2653,8 +2653,8 @@ make_subplanTargetList(PlannerInfo *root,
}
/*
- * Otherwise, we must build a tlist containing all grouping columns,
- * plus any other Vars mentioned in the targetlist and HAVING qual.
+ * Otherwise, we must build a tlist containing all grouping columns, plus
+ * any other Vars mentioned in the targetlist and HAVING qual.
*/
sub_tlist = NIL;
non_group_cols = NIL;
@@ -2705,8 +2705,8 @@ make_subplanTargetList(PlannerInfo *root,
else
{
/*
- * Non-grouping column, so just remember the expression
- * for later call to pull_var_clause. There's no need for
+ * Non-grouping column, so just remember the expression for
+ * later call to pull_var_clause. There's no need for
* pull_var_clause to examine the TargetEntry node itself.
*/
non_group_cols = lappend(non_group_cols, tle->expr);
@@ -2733,7 +2733,7 @@ make_subplanTargetList(PlannerInfo *root,
* add them to the result tlist if not already present. (A Var used
* directly as a GROUP BY item will be present already.) Note this
* includes Vars used in resjunk items, so we are covering the needs of
- * ORDER BY and window specifications. Vars used within Aggrefs will be
+ * ORDER BY and window specifications. Vars used within Aggrefs will be
* pulled out here, too.
*/
non_group_vars = pull_var_clause((Node *) non_group_cols,
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index db301e6c59..f375b5f76d 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -327,7 +327,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
break;
case T_IndexOnlyScan:
{
- IndexOnlyScan *splan = (IndexOnlyScan *) plan;
+ IndexOnlyScan *splan = (IndexOnlyScan *) plan;
return set_indexonlyscan_references(root, splan, rtoffset);
}
@@ -573,9 +573,9 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
lcrr, splan->resultRelations,
lcp, splan->plans)
{
- List *rlist = (List *) lfirst(lcrl);
- Index resultrel = lfirst_int(lcrr);
- Plan *subplan = (Plan *) lfirst(lcp);
+ List *rlist = (List *) lfirst(lcrl);
+ Index resultrel = lfirst_int(lcrr);
+ Plan *subplan = (Plan *) lfirst(lcp);
rlist = set_returning_clause_references(root,
rlist,
@@ -590,7 +590,7 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
* Set up the visible plan targetlist as being the same as
* the first RETURNING list. This is for the use of
* EXPLAIN; the executor won't pay any attention to the
- * targetlist. We postpone this step until here so that
+ * targetlist. We postpone this step until here so that
* we don't have to do set_returning_clause_references()
* twice on identical targetlists.
*/
@@ -1885,7 +1885,7 @@ record_plan_function_dependency(PlannerInfo *root, Oid funcid)
*/
inval_item->cacheId = PROCOID;
inval_item->hashValue = GetSysCacheHashValue1(PROCOID,
- ObjectIdGetDatum(funcid));
+ ObjectIdGetDatum(funcid));
root->glob->invalItems = lappend(root->glob->invalItems, inval_item);
}
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index fb6c704548..8ce6bee856 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -1822,8 +1822,8 @@ process_sublinks_mutator(Node *node, process_sublinks_context *context)
}
/*
- * Don't recurse into the arguments of an outer PHV or aggregate here.
- * Any SubLinks in the arguments have to be dealt with at the outer query
+ * Don't recurse into the arguments of an outer PHV or aggregate here. Any
+ * SubLinks in the arguments have to be dealt with at the outer query
* level; they'll be handled when build_subplan collects the PHV or Aggref
* into the arguments to be passed down to the current subplan.
*/
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 47ddae6992..be1219eb3d 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -332,6 +332,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -357,6 +358,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -384,6 +386,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -409,6 +412,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Any inserted
* joins can get stacked onto either j->larg or j->rarg,
@@ -439,7 +443,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
if (sublink->subLinkType == EXISTS_SUBLINK)
{
if ((j = convert_EXISTS_sublink_to_join(root, sublink, true,
- available_rels1)) != NULL)
+ available_rels1)) != NULL)
{
/* Yes; insert the new join node into the join tree */
j->larg = *jtlink1;
@@ -448,11 +452,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Because
- * we are underneath a NOT, we can't pull up sublinks
- * that reference the left-hand stuff, but it's still
- * okay to pull up sublinks referencing j->rarg.
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,
@@ -464,7 +469,7 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
}
if (available_rels2 != NULL &&
(j = convert_EXISTS_sublink_to_join(root, sublink, true,
- available_rels2)) != NULL)
+ available_rels2)) != NULL)
{
/* Yes; insert the new join node into the join tree */
j->larg = *jtlink2;
@@ -473,11 +478,12 @@ pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
j->rarg = pull_up_sublinks_jointree_recurse(root,
j->rarg,
&child_rels);
+
/*
* Now recursively process the pulled-up quals. Because
- * we are underneath a NOT, we can't pull up sublinks
- * that reference the left-hand stuff, but it's still
- * okay to pull up sublinks referencing j->rarg.
+ * we are underneath a NOT, we can't pull up sublinks that
+ * reference the left-hand stuff, but it's still okay to
+ * pull up sublinks referencing j->rarg.
*/
j->quals = pull_up_sublinks_qual_recurse(root,
j->quals,
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index 7b6d12de38..6475633ae7 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -150,9 +150,9 @@ plan_set_operations(PlannerInfo *root, double tuple_fraction,
Assert(parse->distinctClause == NIL);
/*
- * We'll need to build RelOptInfos for each of the leaf subqueries,
- * which are RTE_SUBQUERY rangetable entries in this Query. Prepare the
- * index arrays for that.
+ * We'll need to build RelOptInfos for each of the leaf subqueries, which
+ * are RTE_SUBQUERY rangetable entries in this Query. Prepare the index
+ * arrays for that.
*/
setup_simple_rel_arrays(root);
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 344ebb7989..73f5e11abe 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -113,7 +113,7 @@ static Expr *simplify_function(Oid funcid,
bool process_args, bool allow_non_const,
eval_const_expressions_context *context);
static List *expand_function_arguments(List *args, Oid result_type,
- HeapTuple func_tuple);
+ HeapTuple func_tuple);
static List *reorder_function_arguments(List *args, HeapTuple func_tuple);
static List *add_function_defaults(List *args, HeapTuple func_tuple);
static List *fetch_function_defaults(HeapTuple func_tuple);
@@ -181,7 +181,7 @@ make_opclause(Oid opno, Oid opresulttype, bool opretset,
Node *
get_leftop(const Expr *clause)
{
- const OpExpr *expr = (const OpExpr *) clause;
+ const OpExpr *expr = (const OpExpr *) clause;
if (expr->args != NIL)
return linitial(expr->args);
@@ -198,7 +198,7 @@ get_leftop(const Expr *clause)
Node *
get_rightop(const Expr *clause)
{
- const OpExpr *expr = (const OpExpr *) clause;
+ const OpExpr *expr = (const OpExpr *) clause;
if (list_length(expr->args) >= 2)
return lsecond(expr->args);
@@ -1128,15 +1128,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
}
/*****************************************************************************
- * Check clauses for non-leakproof functions
+ * Check clauses for non-leakproof functions
*****************************************************************************/
/*
* contain_leaky_functions
- * Recursively search for leaky functions within a clause.
+ * Recursively search for leaky functions within a clause.
*
* Returns true if any function call with side-effect may be present in the
- * clause. Qualifiers from outside the a security_barrier view should not
+ * clause. Qualifiers from outside the a security_barrier view should not
* be pushed down into the view, lest the contents of tuples intended to be
* filtered out be revealed via side effects.
*/
@@ -1155,8 +1155,8 @@ contain_leaky_functions_walker(Node *node, void *context)
switch (nodeTag(node))
{
case T_Var:
- case T_Const:
- case T_Param:
+ case T_Const:
+ case T_Param:
case T_ArrayExpr:
case T_NamedArgExpr:
case T_BoolExpr:
@@ -1168,6 +1168,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_NullTest:
case T_BooleanTest:
case T_List:
+
/*
* We know these node types don't contain function calls; but
* something further down in the node tree might.
@@ -1176,7 +1177,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_FuncExpr:
{
- FuncExpr *expr = (FuncExpr *) node;
+ FuncExpr *expr = (FuncExpr *) node;
if (!get_func_leakproof(expr->funcid))
return true;
@@ -1187,7 +1188,7 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_DistinctExpr: /* struct-equivalent to OpExpr */
case T_NullIfExpr: /* struct-equivalent to OpExpr */
{
- OpExpr *expr = (OpExpr *) node;
+ OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (!get_func_leakproof(expr->opfuncid))
@@ -1208,11 +1209,11 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_CoerceViaIO:
{
CoerceViaIO *expr = (CoerceViaIO *) node;
- Oid funcid;
- Oid ioparam;
- bool varlena;
+ Oid funcid;
+ Oid ioparam;
+ bool varlena;
- getTypeInputInfo(exprType((Node *)expr->arg),
+ getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
if (!get_func_leakproof(funcid))
return true;
@@ -1226,11 +1227,11 @@ contain_leaky_functions_walker(Node *node, void *context)
case T_ArrayCoerceExpr:
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
- Oid funcid;
- Oid ioparam;
- bool varlena;
+ Oid funcid;
+ Oid ioparam;
+ bool varlena;
- getTypeInputInfo(exprType((Node *)expr->arg),
+ getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
if (!get_func_leakproof(funcid))
return true;
@@ -1247,7 +1248,7 @@ contain_leaky_functions_walker(Node *node, void *context)
foreach(opid, rcexpr->opnos)
{
- Oid funcid = get_opcode(lfirst_oid(opid));
+ Oid funcid = get_opcode(lfirst_oid(opid));
if (!get_func_leakproof(funcid))
return true;
@@ -1256,6 +1257,7 @@ contain_leaky_functions_walker(Node *node, void *context)
break;
default:
+
/*
* If we don't recognize the node tag, assume it might be leaky.
* This prevents an unexpected security hole if someone adds a new
@@ -2683,7 +2685,7 @@ eval_const_expressions_mutator(Node *node,
-1,
InvalidOid,
sizeof(Oid),
- ObjectIdGetDatum(intypioparam),
+ ObjectIdGetDatum(intypioparam),
false,
true),
makeConst(INT4OID,
@@ -2812,13 +2814,13 @@ eval_const_expressions_mutator(Node *node,
* TRUE: drop all remaining alternatives
* If the first non-FALSE alternative is a constant TRUE,
* we can simplify the entire CASE to that alternative's
- * expression. If there are no non-FALSE alternatives,
+ * expression. If there are no non-FALSE alternatives,
* we simplify the entire CASE to the default result (ELSE).
*
* If we have a simple-form CASE with constant test
* expression, we substitute the constant value for contained
* CaseTestExpr placeholder nodes, so that we have the
- * opportunity to reduce constant test conditions. For
+ * opportunity to reduce constant test conditions. For
* example this allows
* CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
* to reduce to 1 rather than drawing a divide-by-0 error.
@@ -3581,12 +3583,12 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
* deliver a constant result, use a transform function to generate a
* substitute node tree, or expand in-line the body of the function
* definition (which only works for simple SQL-language functions, but
- * that is a common case). Each case needs access to the function's
+ * that is a common case). Each case needs access to the function's
* pg_proc tuple, so fetch it just once.
*
* Note: the allow_non_const flag suppresses both the second and third
- * strategies; so if !allow_non_const, simplify_function can only return
- * a Const or NULL. Argument-list rewriting happens anyway, though.
+ * strategies; so if !allow_non_const, simplify_function can only return a
+ * Const or NULL. Argument-list rewriting happens anyway, though.
*/
func_tuple = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
if (!HeapTupleIsValid(func_tuple))
@@ -3603,7 +3605,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
{
args = expand_function_arguments(args, result_type, func_tuple);
args = (List *) expression_tree_mutator((Node *) args,
- eval_const_expressions_mutator,
+ eval_const_expressions_mutator,
(void *) context);
/* Argument processing done, give it back to the caller */
*args_p = args;
@@ -3618,7 +3620,7 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
if (!newexpr && allow_non_const && OidIsValid(func_form->protransform))
{
/*
- * Build a dummy FuncExpr node containing the simplified arg list. We
+ * Build a dummy FuncExpr node containing the simplified arg list. We
* use this approach to present a uniform interface to the transform
* function regardless of how the function is actually being invoked.
*/
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 61502aa642..00052f5c84 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -128,11 +128,11 @@ compare_fractional_path_costs(Path *path1, Path *path2,
*
* The fuzz_factor argument must be 1.0 plus delta, where delta is the
* fraction of the smaller cost that is considered to be a significant
- * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
+ * difference. For example, fuzz_factor = 1.01 makes the fuzziness limit
* be 1% of the smaller cost.
*
* The two paths are said to have "equal" costs if both startup and total
- * costs are fuzzily the same. Path1 is said to be better than path2 if
+ * costs are fuzzily the same. Path1 is said to be better than path2 if
* it has fuzzily better startup cost and fuzzily no worse total cost,
* or if it has fuzzily better total cost and fuzzily no worse startup cost.
* Path2 is better than path1 if the reverse holds. Finally, if one path
@@ -190,9 +190,9 @@ compare_path_costs_fuzzily(Path *path1, Path *path2, double fuzz_factor)
* and save them in the rel's cheapest-path fields.
*
* Only unparameterized paths are considered candidates for cheapest_startup
- * and cheapest_total. The cheapest_parameterized_paths list collects paths
+ * and cheapest_total. The cheapest_parameterized_paths list collects paths
* that are cheapest-total for their parameterization (i.e., there is no
- * cheaper path with the same or weaker parameterization). This list always
+ * cheaper path with the same or weaker parameterization). This list always
* includes the unparameterized cheapest-total path, too.
*
* This is normally called only after we've finished constructing the path
@@ -294,8 +294,8 @@ set_cheapest(RelOptInfo *parent_rel)
*
* There is one policy decision embedded in this function, along with its
* sibling add_path_precheck: we treat all parameterized paths as having
- * NIL pathkeys, so that they compete only on cost. This is to reduce
- * the number of parameterized paths that are kept. See discussion in
+ * NIL pathkeys, so that they compete only on cost. This is to reduce
+ * the number of parameterized paths that are kept. See discussion in
* src/backend/optimizer/README.
*
* The pathlist is kept sorted by total_cost, with cheaper paths
@@ -358,7 +358,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
p1_next = lnext(p1);
/*
- * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
+ * Do a fuzzy cost comparison with 1% fuzziness limit. (XXX does this
* percentage need to be user-configurable?)
*/
costcmp = compare_path_costs_fuzzily(new_path, old_path, 1.01);
@@ -388,20 +388,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
{
case COSTS_EQUAL:
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if (keyscmp == PATHKEYS_BETTER1)
{
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET1) &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
else if (keyscmp == PATHKEYS_BETTER2)
{
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET2) &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
else /* keyscmp == PATHKEYS_EQUAL */
{
@@ -425,19 +425,20 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
if (new_path->rows < old_path->rows)
remove_old = true; /* new dominates old */
else if (new_path->rows > old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
else if (compare_path_costs_fuzzily(new_path, old_path,
- 1.0000000001) == COSTS_BETTER1)
+ 1.0000000001) == COSTS_BETTER1)
remove_old = true; /* new dominates old */
else
- accept_new = false; /* old equals or dominates new */
+ accept_new = false; /* old equals or
+ * dominates new */
}
else if (outercmp == BMS_SUBSET1 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else if (outercmp == BMS_SUBSET2 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
/* else different parameterizations, keep both */
}
break;
@@ -445,25 +446,26 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
if (keyscmp != PATHKEYS_BETTER2)
{
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET1) &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
break;
case COSTS_BETTER2:
if (keyscmp != PATHKEYS_BETTER1)
{
outercmp = bms_subset_compare(PATH_REQ_OUTER(new_path),
- PATH_REQ_OUTER(old_path));
+ PATH_REQ_OUTER(old_path));
if ((outercmp == BMS_EQUAL ||
outercmp == BMS_SUBSET2) &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
break;
case COSTS_DIFFERENT:
+
/*
* can't get here, but keep this case to keep compiler
* quiet
@@ -529,7 +531,7 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
* and have lower bounds for its costs.
*
* Note that we do not know the path's rowcount, since getting an estimate for
- * that is too expensive to do before prechecking. We assume here that paths
+ * that is too expensive to do before prechecking. We assume here that paths
* of a superset parameterization will generate fewer rows; if that holds,
* then paths with different parameterizations cannot dominate each other
* and so we can simply ignore existing paths of another parameterization.
@@ -561,9 +563,9 @@ add_path_precheck(RelOptInfo *parent_rel,
* pathkeys as well as both cost metrics. If we find one, we can
* reject the new path.
*
- * For speed, we make exact rather than fuzzy cost comparisons.
- * If an old path dominates the new path exactly on both costs, it
- * will surely do so fuzzily.
+ * For speed, we make exact rather than fuzzy cost comparisons. If an
+ * old path dominates the new path exactly on both costs, it will
+ * surely do so fuzzily.
*/
if (total_cost >= old_path->total_cost)
{
@@ -588,9 +590,9 @@ add_path_precheck(RelOptInfo *parent_rel,
else
{
/*
- * Since the pathlist is sorted by total_cost, we can stop
- * looking once we reach a path with a total_cost larger
- * than the new path's.
+ * Since the pathlist is sorted by total_cost, we can stop looking
+ * once we reach a path with a total_cost larger than the new
+ * path's.
*/
break;
}
@@ -652,26 +654,26 @@ add_parameterized_path(RelOptInfo *parent_rel, Path *new_path)
{
if (outercmp != BMS_SUBSET2 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
}
else if (costcmp > 0)
{
if (outercmp != BMS_SUBSET1 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
}
else if (outercmp == BMS_SUBSET1 &&
new_path->rows <= old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else if (outercmp == BMS_SUBSET2 &&
new_path->rows >= old_path->rows)
- accept_new = false; /* old dominates new */
+ accept_new = false; /* old dominates new */
else if (new_path->rows < old_path->rows)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else
{
/* Same cost, rows, and param rels; arbitrarily keep old */
- accept_new = false; /* old equals or dominates new */
+ accept_new = false; /* old equals or dominates new */
}
}
@@ -697,8 +699,8 @@ add_parameterized_path(RelOptInfo *parent_rel, Path *new_path)
/*
* If we found an old path that dominates new_path, we can quit
- * scanning the list; we will not add new_path, and we assume
- * new_path cannot dominate any other elements of the list.
+ * scanning the list; we will not add new_path, and we assume new_path
+ * cannot dominate any other elements of the list.
*/
if (!accept_new)
break;
@@ -940,7 +942,7 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer)
* Compute rows and costs as sums of subplan rows and costs. We charge
* nothing extra for the Append itself, which perhaps is too optimistic,
* but since it doesn't do any selection or projection, it is a pretty
- * cheap node. If you change this, see also make_append().
+ * cheap node. If you change this, see also make_append().
*/
pathnode->path.rows = 0;
pathnode->path.startup_cost = 0;
@@ -1772,9 +1774,9 @@ create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
Relids
calc_nestloop_required_outer(Path *outer_path, Path *inner_path)
{
- Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
- Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
- Relids required_outer;
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids required_outer;
/* inner_path can require rels from outer path, but not vice versa */
Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
@@ -1804,9 +1806,9 @@ calc_nestloop_required_outer(Path *outer_path, Path *inner_path)
Relids
calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path)
{
- Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
- Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
- Relids required_outer;
+ Relids outer_paramrels = PATH_REQ_OUTER(outer_path);
+ Relids inner_paramrels = PATH_REQ_OUTER(inner_path);
+ Relids required_outer;
/* neither path can require rels from the other */
Assert(!bms_overlap(outer_paramrels, inner_path->parent->relids));
@@ -1853,9 +1855,9 @@ create_nestloop_path(PlannerInfo *root,
/*
* If the inner path is parameterized by the outer, we must drop any
- * restrict_clauses that are due to be moved into the inner path. We
- * have to do this now, rather than postpone the work till createplan
- * time, because the restrict_clauses list can affect the size and cost
+ * restrict_clauses that are due to be moved into the inner path. We have
+ * to do this now, rather than postpone the work till createplan time,
+ * because the restrict_clauses list can affect the size and cost
* estimates for this path.
*/
if (bms_overlap(inner_req_outer, outer_path->parent->relids))
@@ -2033,7 +2035,7 @@ create_hashjoin_path(PlannerInfo *root,
* same parameterization level, ensuring that they all enforce the same set
* of join quals (and thus that that parameterization can be attributed to
* an append path built from such paths). Currently, only a few path types
- * are supported here, though more could be added at need. We return NULL
+ * are supported here, though more could be added at need. We return NULL
* if we can't reparameterize the given path.
*
* Note: we intentionally do not pass created paths to add_path(); it would
@@ -2058,32 +2060,33 @@ reparameterize_path(PlannerInfo *root, Path *path,
return create_seqscan_path(root, rel, required_outer);
case T_IndexScan:
case T_IndexOnlyScan:
- {
- IndexPath *ipath = (IndexPath *) path;
- IndexPath *newpath = makeNode(IndexPath);
+ {
+ IndexPath *ipath = (IndexPath *) path;
+ IndexPath *newpath = makeNode(IndexPath);
- /*
- * We can't use create_index_path directly, and would not want to
- * because it would re-compute the indexqual conditions which is
- * wasted effort. Instead we hack things a bit: flat-copy the
- * path node, revise its param_info, and redo the cost estimate.
- */
- memcpy(newpath, ipath, sizeof(IndexPath));
- newpath->path.param_info =
- get_baserel_parampathinfo(root, rel, required_outer);
- cost_index(newpath, root, loop_count);
- return (Path *) newpath;
- }
+ /*
+ * We can't use create_index_path directly, and would not want
+ * to because it would re-compute the indexqual conditions
+ * which is wasted effort. Instead we hack things a bit:
+ * flat-copy the path node, revise its param_info, and redo
+ * the cost estimate.
+ */
+ memcpy(newpath, ipath, sizeof(IndexPath));
+ newpath->path.param_info =
+ get_baserel_parampathinfo(root, rel, required_outer);
+ cost_index(newpath, root, loop_count);
+ return (Path *) newpath;
+ }
case T_BitmapHeapScan:
- {
- BitmapHeapPath *bpath = (BitmapHeapPath *) path;
+ {
+ BitmapHeapPath *bpath = (BitmapHeapPath *) path;
- return (Path *) create_bitmap_heap_path(root,
- rel,
- bpath->bitmapqual,
- required_outer,
- loop_count);
- }
+ return (Path *) create_bitmap_heap_path(root,
+ rel,
+ bpath->bitmapqual,
+ required_outer,
+ loop_count);
+ }
case T_SubqueryScan:
return create_subqueryscan_path(root, rel, path->pathkeys,
required_outer);
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 93f1c2cdfa..e05c8ddef1 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -61,7 +61,7 @@ make_placeholder_expr(PlannerInfo *root, Expr *expr, Relids phrels)
* We build PlaceHolderInfos only for PHVs that are still present in the
* simplified query passed to query_planner().
*
- * Note: this should only be called after query_planner() has started. Also,
+ * Note: this should only be called after query_planner() has started. Also,
* create_new_ph must not be TRUE after deconstruct_jointree begins, because
* make_outerjoininfo assumes that we already know about all placeholders.
*/
@@ -259,7 +259,7 @@ mark_placeholder_maybe_needed(PlannerInfo *root, PlaceHolderInfo *phinfo,
* but they aren't going to be needed where the outer PHV is referenced.
* Rather, they'll be needed where the outer PHV is evaluated. We can
* estimate that (conservatively) as the syntactic location of the PHV's
- * expression. Recurse to take care of any such PHVs.
+ * expression. Recurse to take care of any such PHVs.
*/
mark_placeholders_in_expr(root, (Node *) phinfo->ph_var->phexpr,
phinfo->ph_var->phrels);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index aaf288a50e..38b81a05ff 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -341,7 +341,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
}
else
{
- double allvisfrac; /* dummy */
+ double allvisfrac; /* dummy */
estimate_rel_size(indexRelation, NULL,
&info->pages, &info->tuples, &allvisfrac);
@@ -403,12 +403,12 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* minimum size estimate of 10 pages. The idea here is to avoid
* assuming a newly-created table is really small, even if it
* currently is, because that may not be true once some data gets
- * loaded into it. Once a vacuum or analyze cycle has been done
+ * loaded into it. Once a vacuum or analyze cycle has been done
* on it, it's more reasonable to believe the size is somewhat
* stable.
*
* (Note that this is only an issue if the plan gets cached and
- * used again after the table has been filled. What we're trying
+ * used again after the table has been filled. What we're trying
* to avoid is using a nestloop-type plan on a table that has
* grown substantially since the plan was made. Normally,
* autovacuum/autoanalyze will occur once enough inserts have
@@ -965,7 +965,7 @@ build_index_tlist(PlannerInfo *root, IndexOptInfo *index,
if (indexkey < 0)
att_tup = SystemAttributeDefinition(indexkey,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
else
att_tup = heapRelation->rd_att->attrs[indexkey - 1];
diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c
index c3161c5293..65d191e5d3 100644
--- a/src/backend/optimizer/util/predtest.c
+++ b/src/backend/optimizer/util/predtest.c
@@ -1624,7 +1624,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
clause_op_infos = get_op_btree_interpretation(clause_op);
if (clause_op_infos)
pred_op_infos = get_op_btree_interpretation(pred_op);
- else /* no point in looking */
+ else /* no point in looking */
pred_op_infos = NIL;
foreach(lcp, pred_op_infos)
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index bfdd9ff222..8d4ab03d20 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -840,12 +840,12 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
outer_and_req = bms_union(outer_path->parent->relids,
PATH_REQ_OUTER(outer_path));
else
- outer_and_req = NULL; /* outer path does not accept parameters */
+ outer_and_req = NULL; /* outer path does not accept parameters */
if (inner_path->param_info)
inner_and_req = bms_union(inner_path->parent->relids,
PATH_REQ_OUTER(inner_path));
else
- inner_and_req = NULL; /* inner path does not accept parameters */
+ inner_and_req = NULL; /* inner path does not accept parameters */
pclauses = NIL;
foreach(lc, joinrel->joininfo)
@@ -909,7 +909,7 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
*restrict_clauses);
/*
- * And now we can build the ParamPathInfo. No point in saving the
+ * And now we can build the ParamPathInfo. No point in saving the
* input-pair-dependent clause list, though.
*
* Note: in GEQO mode, we'll be called in a temporary memory context, but
@@ -929,8 +929,8 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel,
* Get the ParamPathInfo for a parameterized path for an append relation.
*
* For an append relation, the rowcount estimate will just be the sum of
- * the estimates for its children. However, we still need a ParamPathInfo
- * to flag the fact that the path requires parameters. So this just creates
+ * the estimates for its children. However, we still need a ParamPathInfo
+ * to flag the fact that the path requires parameters. So this just creates
* a suitable struct with zero ppi_rows (and no ppi_clauses either, since
* the Append node isn't responsible for checking quals).
*/
diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c
index 2bffb0a651..9bc90c2531 100644
--- a/src/backend/optimizer/util/var.c
+++ b/src/backend/optimizer/util/var.c
@@ -218,7 +218,7 @@ pull_varattnos_walker(Node *node, pull_varattnos_context *context)
if (var->varno == context->varno && var->varlevelsup == 0)
context->varattnos =
bms_add_member(context->varattnos,
- var->varattno - FirstLowInvalidHeapAttributeNumber);
+ var->varattno - FirstLowInvalidHeapAttributeNumber);
return false;
}
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 15d848ff4f..bfd3ab941a 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -181,7 +181,7 @@ transformTopLevelStmt(ParseState *pstate, Node *parseTree)
/* If it's a set-operation tree, drill down to leftmost SelectStmt */
while (stmt && stmt->op != SETOP_NONE)
stmt = stmt->larg;
- Assert(stmt && IsA(stmt, SelectStmt) && stmt->larg == NULL);
+ Assert(stmt && IsA(stmt, SelectStmt) &&stmt->larg == NULL);
if (stmt->intoClause)
{
@@ -950,7 +950,7 @@ transformSelectStmt(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("SELECT ... INTO is not allowed here"),
parser_errposition(pstate,
- exprLocation((Node *) stmt->intoClause))));
+ exprLocation((Node *) stmt->intoClause))));
/* make FOR UPDATE/FOR SHARE info available to addRangeTableEntry */
pstate->p_locking_clause = stmt->lockingClause;
@@ -1364,12 +1364,12 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("SELECT ... INTO is not allowed here"),
parser_errposition(pstate,
- exprLocation((Node *) leftmostSelect->intoClause))));
+ exprLocation((Node *) leftmostSelect->intoClause))));
/*
- * We need to extract ORDER BY and other top-level clauses here and
- * not let transformSetOperationTree() see them --- else it'll just
- * recurse right back here!
+ * We need to extract ORDER BY and other top-level clauses here and not
+ * let transformSetOperationTree() see them --- else it'll just recurse
+ * right back here!
*/
sortClause = stmt->sortClause;
limitOffset = stmt->limitOffset;
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 3102f2089e..a1caf74fc5 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -1302,26 +1302,26 @@ coerce_to_common_type(ParseState *pstate, Node *node,
*
* 1) All arguments declared ANYELEMENT must have the same datatype.
* 2) All arguments declared ANYARRAY must have the same datatype,
- * which must be a varlena array type.
+ * which must be a varlena array type.
* 3) All arguments declared ANYRANGE must have the same datatype,
- * which must be a range type.
+ * which must be a range type.
* 4) If there are arguments of both ANYELEMENT and ANYARRAY, make sure the
- * actual ANYELEMENT datatype is in fact the element type for the actual
- * ANYARRAY datatype.
+ * actual ANYELEMENT datatype is in fact the element type for the actual
+ * ANYARRAY datatype.
* 5) Similarly, if there are arguments of both ANYELEMENT and ANYRANGE,
- * make sure the actual ANYELEMENT datatype is in fact the subtype for
- * the actual ANYRANGE type.
+ * make sure the actual ANYELEMENT datatype is in fact the subtype for
+ * the actual ANYRANGE type.
* 6) ANYENUM is treated the same as ANYELEMENT except that if it is used
- * (alone or in combination with plain ANYELEMENT), we add the extra
- * condition that the ANYELEMENT type must be an enum.
+ * (alone or in combination with plain ANYELEMENT), we add the extra
+ * condition that the ANYELEMENT type must be an enum.
* 7) ANYNONARRAY is treated the same as ANYELEMENT except that if it is used,
- * we add the extra condition that the ANYELEMENT type must not be an array.
- * (This is a no-op if used in combination with ANYARRAY or ANYENUM, but
- * is an extra restriction if not.)
+ * we add the extra condition that the ANYELEMENT type must not be an array.
+ * (This is a no-op if used in combination with ANYARRAY or ANYENUM, but
+ * is an extra restriction if not.)
*
* Domains over arrays match ANYARRAY, and are immediately flattened to their
* base type. (Thus, for example, we will consider it a match if one ANYARRAY
- * argument is a domain over int4[] while another one is just int4[].) Also
+ * argument is a domain over int4[] while another one is just int4[].) Also
* notice that such a domain does *not* match ANYNONARRAY.
*
* Similarly, domains over ranges match ANYRANGE, and are immediately
@@ -1475,7 +1475,7 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* If any polymorphic pseudotype is used in a function's arguments or
* return type, we make sure the actual data types are consistent with
- * each other. The argument consistency rules are shown above for
+ * each other. The argument consistency rules are shown above for
* check_generic_type_consistency().
*
* If we have UNKNOWN input (ie, an untyped literal) for any polymorphic
@@ -1487,35 +1487,35 @@ check_generic_type_consistency(Oid *actual_arg_types,
* if it is declared as a polymorphic type:
*
* 1) If return type is ANYARRAY, and any argument is ANYARRAY, use the
- * argument's actual type as the function's return type.
+ * argument's actual type as the function's return type.
* 2) Similarly, if return type is ANYRANGE, and any argument is ANYRANGE,
- * use the argument's actual type as the function's return type.
+ * use the argument's actual type as the function's return type.
* 3) If return type is ANYARRAY, no argument is ANYARRAY, but any argument is
- * ANYELEMENT, use the actual type of the argument to determine the
- * function's return type, i.e. the element type's corresponding array
- * type. (Note: similar behavior does not exist for ANYRANGE, because it's
- * impossible to determine the range type from the subtype alone.)
+ * ANYELEMENT, use the actual type of the argument to determine the
+ * function's return type, i.e. the element type's corresponding array
+ * type. (Note: similar behavior does not exist for ANYRANGE, because it's
+ * impossible to determine the range type from the subtype alone.)
* 4) If return type is ANYARRAY, but no argument is ANYARRAY or ANYELEMENT,
- * generate an error. Similarly, if return type is ANYRANGE, but no
- * argument is ANYRANGE, generate an error. (These conditions are
- * prevented by CREATE FUNCTION and therefore are not expected here.)
+ * generate an error. Similarly, if return type is ANYRANGE, but no
+ * argument is ANYRANGE, generate an error. (These conditions are
+ * prevented by CREATE FUNCTION and therefore are not expected here.)
* 5) If return type is ANYELEMENT, and any argument is ANYELEMENT, use the
- * argument's actual type as the function's return type.
+ * argument's actual type as the function's return type.
* 6) If return type is ANYELEMENT, no argument is ANYELEMENT, but any argument
- * is ANYARRAY or ANYRANGE, use the actual type of the argument to determine
- * the function's return type, i.e. the array type's corresponding element
- * type or the range type's corresponding subtype (or both, in which case
- * they must match).
+ * is ANYARRAY or ANYRANGE, use the actual type of the argument to determine
+ * the function's return type, i.e. the array type's corresponding element
+ * type or the range type's corresponding subtype (or both, in which case
+ * they must match).
* 7) If return type is ANYELEMENT, no argument is ANYELEMENT, ANYARRAY, or
- * ANYRANGE, generate an error. (This condition is prevented by CREATE
- * FUNCTION and therefore is not expected here.)
+ * ANYRANGE, generate an error. (This condition is prevented by CREATE
+ * FUNCTION and therefore is not expected here.)
* 8) ANYENUM is treated the same as ANYELEMENT except that if it is used
- * (alone or in combination with plain ANYELEMENT), we add the extra
- * condition that the ANYELEMENT type must be an enum.
+ * (alone or in combination with plain ANYELEMENT), we add the extra
+ * condition that the ANYELEMENT type must be an enum.
* 9) ANYNONARRAY is treated the same as ANYELEMENT except that if it is used,
- * we add the extra condition that the ANYELEMENT type must not be an array.
- * (This is a no-op if used in combination with ANYARRAY or ANYENUM, but
- * is an extra restriction if not.)
+ * we add the extra condition that the ANYELEMENT type must not be an array.
+ * (This is a no-op if used in combination with ANYARRAY or ANYENUM, but
+ * is an extra restriction if not.)
*
* Domains over arrays or ranges match ANYARRAY or ANYRANGE arguments,
* respectively, and are immediately flattened to their base type. (In
@@ -1524,14 +1524,14 @@ check_generic_type_consistency(Oid *actual_arg_types,
*
* When allow_poly is false, we are not expecting any of the actual_arg_types
* to be polymorphic, and we should not return a polymorphic result type
- * either. When allow_poly is true, it is okay to have polymorphic "actual"
+ * either. When allow_poly is true, it is okay to have polymorphic "actual"
* arg types, and we can return ANYARRAY, ANYRANGE, or ANYELEMENT as the
- * result. (This case is currently used only to check compatibility of an
+ * result. (This case is currently used only to check compatibility of an
* aggregate's declaration with the underlying transfn.)
*
* A special case is that we could see ANYARRAY as an actual_arg_type even
* when allow_poly is false (this is possible only because pg_statistic has
- * columns shown as anyarray in the catalogs). We allow this to match a
+ * columns shown as anyarray in the catalogs). We allow this to match a
* declared ANYARRAY argument, but only if there is no ANYELEMENT argument
* or result (since we can't determine a specific element type to match to
* ANYELEMENT). Note this means that functions taking ANYARRAY had better
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 973265bcb0..bb1ad9af96 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -844,7 +844,7 @@ transformAExprOp(ParseState *pstate, A_Expr *a)
list_length(a->name) == 1 &&
strcmp(strVal(linitial(a->name)), "=") == 0 &&
(exprIsNullConstant(lexpr) || exprIsNullConstant(rexpr)) &&
- (!IsA(lexpr, CaseTestExpr) && !IsA(rexpr, CaseTestExpr)))
+ (!IsA(lexpr, CaseTestExpr) &&!IsA(rexpr, CaseTestExpr)))
{
NullTest *n = makeNode(NullTest);
@@ -2066,9 +2066,9 @@ transformWholeRowRef(ParseState *pstate, RangeTblEntry *rte, int location)
vnum = RTERangeTablePosn(pstate, rte, &sublevels_up);
/*
- * Build the appropriate referencing node. Note that if the RTE is a
+ * Build the appropriate referencing node. Note that if the RTE is a
* function returning scalar, we create just a plain reference to the
- * function value, not a composite containing a single column. This is
+ * function value, not a composite containing a single column. This is
* pretty inconsistent at first sight, but it's what we've done
* historically. One argument for it is that "rel" and "rel.*" mean the
* same thing for composite relations, so why not for scalar functions...
@@ -2268,8 +2268,8 @@ make_row_comparison_op(ParseState *pstate, List *opname,
opinfo_lists[i] = get_op_btree_interpretation(opno);
/*
- * convert strategy numbers into a Bitmapset to make the
- * intersection calculation easy.
+ * convert strategy numbers into a Bitmapset to make the intersection
+ * calculation easy.
*/
this_strats = NULL;
foreach(j, opinfo_lists[i])
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index e583fae849..b051707d7e 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -787,9 +787,9 @@ func_select_candidate(int nargs,
* Having completed this examination, remove candidates that accept the
* wrong category at any unknown position. Also, if at least one
* candidate accepted a preferred type at a position, remove candidates
- * that accept non-preferred types. If just one candidate remains,
- * return that one. However, if this rule turns out to reject all
- * candidates, keep them all instead.
+ * that accept non-preferred types. If just one candidate remains, return
+ * that one. However, if this rule turns out to reject all candidates,
+ * keep them all instead.
*/
resolved_unknowns = false;
for (i = 0; i < nargs; i++)
@@ -914,7 +914,7 @@ func_select_candidate(int nargs,
* type, and see if that gives us a unique match. If so, use that match.
*
* NOTE: for a binary operator with one unknown and one non-unknown input,
- * we already tried this heuristic in binary_oper_exact(). However, that
+ * we already tried this heuristic in binary_oper_exact(). However, that
* code only finds exact matches, whereas here we will handle matches that
* involve coercion, polymorphic type resolution, etc.
*/
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 2a26b0af1d..30b307b191 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -274,10 +274,11 @@ searchRangeTable(ParseState *pstate, RangeVar *relation)
* relation.
*
* NB: It's not critical that RangeVarGetRelid return the correct answer
- * here in the face of concurrent DDL. If it doesn't, the worst case
- * scenario is a less-clear error message. Also, the tables involved in
+ * here in the face of concurrent DDL. If it doesn't, the worst case
+ * scenario is a less-clear error message. Also, the tables involved in
* the query are already locked, which reduces the number of cases in
- * which surprising behavior can occur. So we do the name lookup unlocked.
+ * which surprising behavior can occur. So we do the name lookup
+ * unlocked.
*/
if (!relation->schemaname)
cte = scanNameSpaceForCTE(pstate, refname, &ctelevelsup);
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index a512d18d01..3850a3bc64 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -1622,7 +1622,7 @@ FigureColnameInternal(Node *node, char **name)
case EXPR_SUBLINK:
{
/* Get column name of the subquery's single target */
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
Query *query = (Query *) sublink->subselect;
/*
@@ -1644,7 +1644,7 @@ FigureColnameInternal(Node *node, char **name)
}
}
break;
- /* As with other operator-like nodes, these have no names */
+ /* As with other operator-like nodes, these have no names */
case ALL_SUBLINK:
case ANY_SUBLINK:
case ROWCOMPARE_SUBLINK:
diff --git a/src/backend/parser/parse_type.c b/src/backend/parser/parse_type.c
index 531495433d..871a7d1ce3 100644
--- a/src/backend/parser/parse_type.c
+++ b/src/backend/parser/parse_type.c
@@ -111,8 +111,8 @@ LookupTypeName(ParseState *pstate, const TypeName *typeName,
/*
* Look up the field.
*
- * XXX: As no lock is taken here, this might fail in the presence
- * of concurrent DDL. But taking a lock would carry a performance
+ * XXX: As no lock is taken here, this might fail in the presence of
+ * concurrent DDL. But taking a lock would carry a performance
* penalty and would also require a permissions check.
*/
relid = RangeVarGetRelid(rel, NoLock, false);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 7c315f6c87..8810d0dbad 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -103,7 +103,7 @@ static void transformColumnDefinition(CreateStmtContext *cxt,
static void transformTableConstraint(CreateStmtContext *cxt,
Constraint *constraint);
static void transformTableLikeClause(CreateStmtContext *cxt,
- TableLikeClause *table_like_clause);
+ TableLikeClause *table_like_clause);
static void transformOfType(CreateStmtContext *cxt,
TypeName *ofTypename);
static char *chooseIndexName(const RangeVar *relation, IndexStmt *index_stmt);
@@ -309,7 +309,7 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
column->typeName->typeOid = INT2OID;
}
else if (strcmp(typname, "serial") == 0 ||
- strcmp(typname, "serial4") == 0)
+ strcmp(typname, "serial4") == 0)
{
is_serial = true;
column->typeName->names = NIL;
@@ -554,13 +554,13 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
}
/*
- * Generate ALTER FOREIGN TABLE ALTER COLUMN statement which adds
+ * Generate ALTER FOREIGN TABLE ALTER COLUMN statement which adds
* per-column foreign data wrapper options for this column.
*/
if (column->fdwoptions != NIL)
{
AlterTableStmt *stmt;
- AlterTableCmd *cmd;
+ AlterTableCmd *cmd;
cmd = makeNode(AlterTableCmd);
cmd->subtype = AT_AlterColumnGenericOptions;
@@ -667,7 +667,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
else
{
aclresult = pg_class_aclcheck(RelationGetRelid(relation), GetUserId(),
- ACL_SELECT);
+ ACL_SELECT);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_CLASS,
RelationGetRelationName(relation));
@@ -803,7 +803,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
/* Copy comment on constraint */
if ((table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS) &&
(comment = GetComment(get_relation_constraint_oid(RelationGetRelid(relation),
- n->conname, false),
+ n->conname, false),
ConstraintRelationId,
0)) != NULL)
{
@@ -2305,7 +2305,7 @@ transformAlterTableStmt(AlterTableStmt *stmt, const char *queryString)
/* this message is consistent with relation_openrv */
ereport(NOTICE,
(errmsg("relation \"%s\" does not exist, skipping",
- stmt->relation->relname)));
+ stmt->relation->relname)));
return NIL;
}
diff --git a/src/backend/port/darwin/system.c b/src/backend/port/darwin/system.c
index e9cd136bec..d571f26ef8 100644
--- a/src/backend/port/darwin/system.c
+++ b/src/backend/port/darwin/system.c
@@ -2,7 +2,7 @@
* src/backend/port/darwin/system.c
*
* only needed in OS X 10.1 and possibly early 10.2 releases */
-#include <AvailabilityMacros.h> /* pgrminclude ignore */
+#include <AvailabilityMacros.h> /* pgrminclude ignore */
#if MAC_OS_X_VERSION_MAX_ALLOWED <= MAC_OS_X_VERSION_10_2 || !defined(MAC_OS_X_VERSION_10_2)
/*
diff --git a/src/backend/port/dynloader/aix.h b/src/backend/port/dynloader/aix.h
index e5afaead8e..ba5590f598 100644
--- a/src/backend/port/dynloader/aix.h
+++ b/src/backend/port/dynloader/aix.h
@@ -16,7 +16,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* In some older systems, the RTLD_NOW flag isn't defined and the mode
diff --git a/src/backend/port/dynloader/cygwin.h b/src/backend/port/dynloader/cygwin.h
index 81ef27f17c..06f75d4c42 100644
--- a/src/backend/port/dynloader/cygwin.h
+++ b/src/backend/port/dynloader/cygwin.h
@@ -13,7 +13,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* In some older systems, the RTLD_NOW flag isn't defined and the mode
diff --git a/src/backend/port/dynloader/freebsd.h b/src/backend/port/dynloader/freebsd.h
index dbd466d9f0..bfad4deb01 100644
--- a/src/backend/port/dynloader/freebsd.h
+++ b/src/backend/port/dynloader/freebsd.h
@@ -18,7 +18,7 @@
#include <link.h>
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on NetBSD 1.0.
diff --git a/src/backend/port/dynloader/irix.h b/src/backend/port/dynloader/irix.h
index ea8fc7ca15..2db03133a0 100644
--- a/src/backend/port/dynloader/irix.h
+++ b/src/backend/port/dynloader/irix.h
@@ -15,7 +15,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on Irix.
diff --git a/src/backend/port/dynloader/linux.h b/src/backend/port/dynloader/linux.h
index f66012d37e..a6a42a7c74 100644
--- a/src/backend/port/dynloader/linux.h
+++ b/src/backend/port/dynloader/linux.h
@@ -14,7 +14,7 @@
#ifndef PORT_PROTOS_H
#define PORT_PROTOS_H
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
#ifdef HAVE_DLOPEN
#include <dlfcn.h>
#endif
diff --git a/src/backend/port/dynloader/netbsd.h b/src/backend/port/dynloader/netbsd.h
index fa5bb9a283..eb55194a2f 100644
--- a/src/backend/port/dynloader/netbsd.h
+++ b/src/backend/port/dynloader/netbsd.h
@@ -19,7 +19,7 @@
#include <link.h>
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on NetBSD 1.0.
diff --git a/src/backend/port/dynloader/openbsd.h b/src/backend/port/dynloader/openbsd.h
index 15fbe20fe4..0e475c65e2 100644
--- a/src/backend/port/dynloader/openbsd.h
+++ b/src/backend/port/dynloader/openbsd.h
@@ -18,7 +18,7 @@
#include <link.h>
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on NetBSD 1.0.
diff --git a/src/backend/port/dynloader/osf.h b/src/backend/port/dynloader/osf.h
index 469d751510..87feddf8ad 100644
--- a/src/backend/port/dynloader/osf.h
+++ b/src/backend/port/dynloader/osf.h
@@ -16,7 +16,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on Alpha OSF/1.x
diff --git a/src/backend/port/dynloader/sco.h b/src/backend/port/dynloader/sco.h
index c15d17c32e..644a0c954c 100644
--- a/src/backend/port/dynloader/sco.h
+++ b/src/backend/port/dynloader/sco.h
@@ -15,7 +15,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on SCO 3.2v5.0.2
diff --git a/src/backend/port/dynloader/solaris.h b/src/backend/port/dynloader/solaris.h
index 518b6b93ef..8b874f200f 100644
--- a/src/backend/port/dynloader/solaris.h
+++ b/src/backend/port/dynloader/solaris.h
@@ -15,7 +15,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* In some older systems, the RTLD_NOW flag isn't defined and the mode
diff --git a/src/backend/port/dynloader/unixware.h b/src/backend/port/dynloader/unixware.h
index 19141ca8d0..9d87a7c614 100644
--- a/src/backend/port/dynloader/unixware.h
+++ b/src/backend/port/dynloader/unixware.h
@@ -18,7 +18,7 @@
#define PORT_PROTOS_H
#include <dlfcn.h>
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
/*
* Dynamic Loader on UnixWare.
diff --git a/src/backend/port/dynloader/win32.h b/src/backend/port/dynloader/win32.h
index 850c07bc3c..f689dc8ff9 100644
--- a/src/backend/port/dynloader/win32.h
+++ b/src/backend/port/dynloader/win32.h
@@ -4,7 +4,7 @@
#ifndef PORT_PROTOS_H
#define PORT_PROTOS_H
-#include "utils/dynamic_loader.h" /* pgrminclude ignore */
+#include "utils/dynamic_loader.h" /* pgrminclude ignore */
#define pg_dlopen(f) dlopen((f), 1)
#define pg_dlsym dlsym
diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c
index e64282c210..65b2fc56e0 100644
--- a/src/backend/port/unix_latch.c
+++ b/src/backend/port/unix_latch.c
@@ -183,6 +183,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
{
int result = 0;
int rc;
+
#ifdef HAVE_POLL
struct pollfd pfds[3];
int nfds;
@@ -235,14 +236,15 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
*
* Note: we assume that the kernel calls involved in drainSelfPipe()
* and SetLatch() will provide adequate synchronization on machines
- * with weak memory ordering, so that we cannot miss seeing is_set
- * if the signal byte is already in the pipe when we drain it.
+ * with weak memory ordering, so that we cannot miss seeing is_set if
+ * the signal byte is already in the pipe when we drain it.
*/
drainSelfPipe();
if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
{
result |= WL_LATCH_SET;
+
/*
* Leave loop immediately, avoid blocking again. We don't attempt
* to report any other events that might also be satisfied.
@@ -309,13 +311,14 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
{
result |= WL_SOCKET_WRITEABLE;
}
+
/*
* We expect a POLLHUP when the remote end is closed, but because we
* don't expect the pipe to become readable or to have any errors
* either, treat those as postmaster death, too.
*/
if ((wakeEvents & WL_POSTMASTER_DEATH) &&
- (pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
+ (pfds[nfds - 1].revents & (POLLHUP | POLLIN | POLLERR | POLLNVAL)))
{
/*
* According to the select(2) man page on Linux, select(2) may
@@ -329,8 +332,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
if (!PostmasterIsAlive())
result |= WL_POSTMASTER_DEATH;
}
-
-#else /* !HAVE_POLL */
+#else /* !HAVE_POLL */
FD_ZERO(&input_mask);
FD_ZERO(&output_mask);
@@ -387,7 +389,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
result |= WL_SOCKET_WRITEABLE;
}
if ((wakeEvents & WL_POSTMASTER_DEATH) &&
- FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask))
+ FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask))
{
/*
* According to the select(2) man page on Linux, select(2) may
@@ -401,7 +403,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
if (!PostmasterIsAlive())
result |= WL_POSTMASTER_DEATH;
}
-#endif /* HAVE_POLL */
+#endif /* HAVE_POLL */
} while (result == 0);
waiting = false;
@@ -423,9 +425,9 @@ SetLatch(volatile Latch *latch)
pid_t owner_pid;
/*
- * XXX there really ought to be a memory barrier operation right here,
- * to ensure that any flag variables we might have changed get flushed
- * to main memory before we check/set is_set. Without that, we have to
+ * XXX there really ought to be a memory barrier operation right here, to
+ * ensure that any flag variables we might have changed get flushed to
+ * main memory before we check/set is_set. Without that, we have to
* require that callers provide their own synchronization for machines
* with weak memory ordering (see latch.h).
*/
@@ -450,12 +452,12 @@ SetLatch(volatile Latch *latch)
* Postgres; and PG database processes should handle excess SIGUSR1
* interrupts without a problem anyhow.
*
- * Another sort of race condition that's possible here is for a new process
- * to own the latch immediately after we look, so we don't signal it.
- * This is okay so long as all callers of ResetLatch/WaitLatch follow the
- * standard coding convention of waiting at the bottom of their loops,
- * not the top, so that they'll correctly process latch-setting events that
- * happen before they enter the loop.
+ * Another sort of race condition that's possible here is for a new
+ * process to own the latch immediately after we look, so we don't signal
+ * it. This is okay so long as all callers of ResetLatch/WaitLatch follow
+ * the standard coding convention of waiting at the bottom of their loops,
+ * not the top, so that they'll correctly process latch-setting events
+ * that happen before they enter the loop.
*/
owner_pid = latch->owner_pid;
if (owner_pid == 0)
@@ -484,7 +486,7 @@ ResetLatch(volatile Latch *latch)
/*
* XXX there really ought to be a memory barrier operation right here, to
* ensure that the write to is_set gets flushed to main memory before we
- * examine any flag variables. Otherwise a concurrent SetLatch might
+ * examine any flag variables. Otherwise a concurrent SetLatch might
* falsely conclude that it needn't signal us, even though we have missed
* seeing some flag updates that SetLatch was supposed to inform us of.
* For the moment, callers must supply their own synchronization of flag
diff --git a/src/backend/port/win32/mingwcompat.c b/src/backend/port/win32/mingwcompat.c
index 5d91c62bc1..0978e8cf46 100644
--- a/src/backend/port/win32/mingwcompat.c
+++ b/src/backend/port/win32/mingwcompat.c
@@ -42,8 +42,8 @@ LoadKernel32()
kernel32 = LoadLibraryEx("kernel32.dll", NULL, 0);
if (kernel32 == NULL)
ereport(FATAL,
- (errmsg_internal("could not load kernel32.dll: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not load kernel32.dll: error code %lu",
+ GetLastError())));
}
diff --git a/src/backend/port/win32/socket.c b/src/backend/port/win32/socket.c
index a7215cad6e..d9222231a1 100644
--- a/src/backend/port/win32/socket.c
+++ b/src/backend/port/win32/socket.c
@@ -151,7 +151,7 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
(errmsg_internal("could not reset socket waiting event: error code %lu", GetLastError())));
/*
- * Track whether socket is UDP or not. (NB: most likely, this is both
+ * Track whether socket is UDP or not. (NB: most likely, this is both
* useless and wrong; there is no reason to think that the behavior of
* WSAEventSelect is different for TCP and UDP.)
*/
@@ -160,8 +160,9 @@ pgwin32_waitforsinglesocket(SOCKET s, int what, int timeout)
current_socket = s;
/*
- * Attach event to socket. NOTE: we must detach it again before returning,
- * since other bits of code may try to attach other events to the socket.
+ * Attach event to socket. NOTE: we must detach it again before
+ * returning, since other bits of code may try to attach other events to
+ * the socket.
*/
if (WSAEventSelect(s, waitevent, what) != 0)
{
diff --git a/src/backend/port/win32/timer.c b/src/backend/port/win32/timer.c
index 770275acef..232317369d 100644
--- a/src/backend/port/win32/timer.c
+++ b/src/backend/port/win32/timer.c
@@ -97,8 +97,8 @@ setitimer(int which, const struct itimerval * value, struct itimerval * ovalue)
timerCommArea.event = CreateEvent(NULL, TRUE, FALSE, NULL);
if (timerCommArea.event == NULL)
ereport(FATAL,
- (errmsg_internal("could not create timer event: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not create timer event: error code %lu",
+ GetLastError())));
MemSet(&timerCommArea.value, 0, sizeof(struct itimerval));
@@ -107,8 +107,8 @@ setitimer(int which, const struct itimerval * value, struct itimerval * ovalue)
timerThreadHandle = CreateThread(NULL, 0, pg_timer_thread, NULL, 0, NULL);
if (timerThreadHandle == INVALID_HANDLE_VALUE)
ereport(FATAL,
- (errmsg_internal("could not create timer thread: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not create timer thread: error code %lu",
+ GetLastError())));
}
/* Request the timer thread to change settings */
diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c
index 05b34269b5..eb46dcad1b 100644
--- a/src/backend/port/win32_latch.c
+++ b/src/backend/port/win32_latch.c
@@ -173,6 +173,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
if ((wakeEvents & WL_LATCH_SET) && latch->is_set)
{
result |= WL_LATCH_SET;
+
/*
* Leave loop immediately, avoid blocking again. We don't attempt
* to report any other events that might also be satisfied.
@@ -199,7 +200,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
/* Latch is set, we'll handle that on next iteration of loop */
}
else if ((wakeEvents & (WL_SOCKET_READABLE | WL_SOCKET_WRITEABLE)) &&
- rc == WAIT_OBJECT_0 + 2) /* socket is at event slot 2 */
+ rc == WAIT_OBJECT_0 + 2) /* socket is at event slot 2 */
{
WSANETWORKEVENTS resEvents;
@@ -222,7 +223,7 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock,
rc == WAIT_OBJECT_0 + pmdeath_eventno)
{
/*
- * Postmaster apparently died. Since the consequences of falsely
+ * Postmaster apparently died. Since the consequences of falsely
* returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
* take the trouble to positively verify this with
* PostmasterIsAlive(), even though there is no known reason to
diff --git a/src/backend/port/win32_sema.c b/src/backend/port/win32_sema.c
index a093e9f163..ef1a4c31bd 100644
--- a/src/backend/port/win32_sema.c
+++ b/src/backend/port/win32_sema.c
@@ -162,7 +162,7 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
if (errno != 0)
ereport(FATAL,
- (errmsg("could not lock semaphore: error code %lu", GetLastError())));
+ (errmsg("could not lock semaphore: error code %lu", GetLastError())));
}
/*
@@ -204,7 +204,7 @@ PGSemaphoreTryLock(PGSemaphore sema)
/* Otherwise we are in trouble */
ereport(FATAL,
- (errmsg("could not try-lock semaphore: error code %lu", GetLastError())));
+ (errmsg("could not try-lock semaphore: error code %lu", GetLastError())));
/* keep compiler quiet */
return false;
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 9ff19b7a48..1cfac9e80b 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -543,9 +543,9 @@ AutoVacLauncherMain(int argc, char *argv[])
SetConfigOption("statement_timeout", "0", PGC_SUSET, PGC_S_OVERRIDE);
/*
- * Force default_transaction_isolation to READ COMMITTED. We don't
- * want to pay the overhead of serializable mode, nor add any risk
- * of causing deadlocks or delaying other transactions.
+ * Force default_transaction_isolation to READ COMMITTED. We don't want
+ * to pay the overhead of serializable mode, nor add any risk of causing
+ * deadlocks or delaying other transactions.
*/
SetConfigOption("default_transaction_isolation", "read committed",
PGC_SUSET, PGC_S_OVERRIDE);
@@ -1553,9 +1553,9 @@ AutoVacWorkerMain(int argc, char *argv[])
SetConfigOption("statement_timeout", "0", PGC_SUSET, PGC_S_OVERRIDE);
/*
- * Force default_transaction_isolation to READ COMMITTED. We don't
- * want to pay the overhead of serializable mode, nor add any risk
- * of causing deadlocks or delaying other transactions.
+ * Force default_transaction_isolation to READ COMMITTED. We don't want
+ * to pay the overhead of serializable mode, nor add any risk of causing
+ * deadlocks or delaying other transactions.
*/
SetConfigOption("default_transaction_isolation", "read committed",
PGC_SUSET, PGC_S_OVERRIDE);
diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c
index 32c254c842..5f93fccbfa 100644
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -121,7 +121,7 @@ BackgroundWriterMain(void)
*/
pqsignal(SIGHUP, BgSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, SIG_IGN);
- pqsignal(SIGTERM, ReqShutdownHandler); /* shutdown */
+ pqsignal(SIGTERM, ReqShutdownHandler); /* shutdown */
pqsignal(SIGQUIT, bg_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
@@ -244,8 +244,8 @@ BackgroundWriterMain(void)
*/
for (;;)
{
- bool can_hibernate;
- int rc;
+ bool can_hibernate;
+ int rc;
/* Clear any already-pending wakeups */
ResetLatch(&MyProc->procLatch);
@@ -297,7 +297,7 @@ BackgroundWriterMain(void)
*/
rc = WaitLatch(&MyProc->procLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- BgWriterDelay /* ms */);
+ BgWriterDelay /* ms */ );
/*
* If no latch event and BgBufferSync says nothing's happening, extend
@@ -314,7 +314,7 @@ BackgroundWriterMain(void)
* and the time we call StrategyNotifyBgWriter. While it's not
* critical that we not hibernate anyway, we try to reduce the odds of
* that by only hibernating when BgBufferSync says nothing's happening
- * for two consecutive cycles. Also, we mitigate any possible
+ * for two consecutive cycles. Also, we mitigate any possible
* consequences of a missed wakeup by not hibernating forever.
*/
if (rc == WL_TIMEOUT && can_hibernate && prev_hibernate)
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 6aeade92e6..87aa4e1ba0 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -2,7 +2,7 @@
*
* checkpointer.c
*
- * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
+ * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
* Checkpoints are automatically dispatched after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
@@ -14,7 +14,7 @@
* subprocess finishes, or as soon as recovery begins if we are doing archive
* recovery. It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the checkpointer to
- * execute a shutdown checkpoint and then exit(0). (All backends must be
+ * execute a shutdown checkpoint and then exit(0). (All backends must be
* stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT;
* like any backend, the checkpointer will simply abort and exit on SIGQUIT.
*
@@ -113,7 +113,7 @@ typedef struct
typedef struct
{
- pid_t checkpointer_pid; /* PID (0 if not started) */
+ pid_t checkpointer_pid; /* PID (0 if not started) */
slock_t ckpt_lck; /* protects all the ckpt_* fields */
@@ -199,7 +199,7 @@ CheckpointerMain(void)
/*
* If possible, make this process a group leader, so that the postmaster
- * can signal any child processes too. (checkpointer probably never has
+ * can signal any child processes too. (checkpointer probably never has
* any child processes, but for consistency we make all postmaster child
* processes do this.)
*/
@@ -216,10 +216,11 @@ CheckpointerMain(void)
* want to wait for the backends to exit, whereupon the postmaster will
* tell us it's okay to shut down (via SIGUSR2).
*/
- pqsignal(SIGHUP, ChkptSigHupHandler); /* set flag to read config file */
- pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
- pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
- pqsignal(SIGQUIT, chkpt_quickdie); /* hard crash time */
+ pqsignal(SIGHUP, ChkptSigHupHandler); /* set flag to read config
+ * file */
+ pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
+ pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
+ pqsignal(SIGQUIT, chkpt_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, chkpt_sigusr1_handler);
@@ -255,10 +256,10 @@ CheckpointerMain(void)
* TopMemoryContext, but resetting that would be a really bad idea.
*/
checkpointer_context = AllocSetContextCreate(TopMemoryContext,
- "Checkpointer",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "Checkpointer",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
MemoryContextSwitchTo(checkpointer_context);
/*
@@ -280,7 +281,8 @@ CheckpointerMain(void)
/*
* These operations are really just a minimal subset of
* AbortTransaction(). We don't have very many resources to worry
- * about in checkpointer, but we do have LWLocks, buffers, and temp files.
+ * about in checkpointer, but we do have LWLocks, buffers, and temp
+ * files.
*/
LWLockReleaseAll();
AbortBufferIO();
@@ -351,9 +353,8 @@ CheckpointerMain(void)
ThisTimeLineID = GetRecoveryTargetTLI();
/*
- * Ensure all shared memory values are set correctly for the config.
- * Doing this here ensures no race conditions from other concurrent
- * updaters.
+ * Ensure all shared memory values are set correctly for the config. Doing
+ * this here ensures no race conditions from other concurrent updaters.
*/
UpdateSharedMemoryConfig();
@@ -389,9 +390,9 @@ CheckpointerMain(void)
ProcessConfigFile(PGC_SIGHUP);
/*
- * Checkpointer is the last process to shut down, so we ask
- * it to hold the keys for a range of other tasks required
- * most of which have nothing to do with checkpointing at all.
+ * Checkpointer is the last process to shut down, so we ask it to
+ * hold the keys for a range of other tasks required most of which
+ * have nothing to do with checkpointing at all.
*
* For various reasons, some config values can change dynamically
* so the primary copy of them is held in shared memory to make
@@ -490,7 +491,8 @@ CheckpointerMain(void)
errhint("Consider increasing the configuration parameter \"checkpoint_segments\".")));
/*
- * Initialize checkpointer-private variables used during checkpoint
+ * Initialize checkpointer-private variables used during
+ * checkpoint
*/
ckpt_active = true;
if (!do_restartpoint)
@@ -558,8 +560,8 @@ CheckpointerMain(void)
pgstat_send_bgwriter();
/*
- * Sleep until we are signaled or it's time for another checkpoint
- * or xlog file switch.
+ * Sleep until we are signaled or it's time for another checkpoint or
+ * xlog file switch.
*/
now = (pg_time_t) time(NULL);
elapsed_secs = now - last_checkpoint_time;
@@ -576,7 +578,7 @@ CheckpointerMain(void)
rc = WaitLatch(&MyProc->procLatch,
WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
- cur_timeout * 1000L /* convert to ms */);
+ cur_timeout * 1000L /* convert to ms */ );
/*
* Emergency bailout if postmaster has died. This is to avoid the
@@ -687,8 +689,8 @@ CheckpointWriteDelay(int flags, double progress)
return;
/*
- * Perform the usual duties and take a nap, unless we're behind
- * schedule, in which case we just try to catch up as quickly as possible.
+ * Perform the usual duties and take a nap, unless we're behind schedule,
+ * in which case we just try to catch up as quickly as possible.
*/
if (!(flags & CHECKPOINT_IMMEDIATE) &&
!shutdown_requested &&
@@ -716,7 +718,8 @@ CheckpointWriteDelay(int flags, double progress)
/*
* This sleep used to be connected to bgwriter_delay, typically 200ms.
* That resulted in more frequent wakeups if not much work to do.
- * Checkpointer and bgwriter are no longer related so take the Big Sleep.
+ * Checkpointer and bgwriter are no longer related so take the Big
+ * Sleep.
*/
pg_usleep(100000L);
}
@@ -1017,7 +1020,7 @@ RequestCheckpoint(int flags)
if (ntries >= 20) /* max wait 2.0 sec */
{
elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
- "could not request checkpoint because checkpointer not running");
+ "could not request checkpoint because checkpointer not running");
break;
}
}
@@ -1130,9 +1133,9 @@ ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
CheckpointerShmem->num_backend_writes++;
/*
- * If the checkpointer isn't running or the request queue is full,
- * the backend will have to perform its own fsync request. But before
- * forcing that to happen, we can try to compact the request queue.
+ * If the checkpointer isn't running or the request queue is full, the
+ * backend will have to perform its own fsync request. But before forcing
+ * that to happen, we can try to compact the request queue.
*/
if (CheckpointerShmem->checkpointer_pid == 0 ||
(CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests &&
@@ -1339,8 +1342,8 @@ UpdateSharedMemoryConfig(void)
SyncRepUpdateSyncStandbysDefined();
/*
- * If full_page_writes has been changed by SIGHUP, we update it
- * in shared memory and write an XLOG_FPW_CHANGE record.
+ * If full_page_writes has been changed by SIGHUP, we update it in shared
+ * memory and write an XLOG_FPW_CHANGE record.
*/
UpdateFullPageWrites();
@@ -1356,9 +1359,9 @@ FirstCallSinceLastCheckpoint(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
- static int ckpt_done = 0;
- int new_done;
- bool FirstCall = false;
+ static int ckpt_done = 0;
+ int new_done;
+ bool FirstCall = false;
SpinLockAcquire(&cps->ckpt_lck);
new_done = cps->ckpt_done;
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 37fc73592c..5c43cdde65 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -361,9 +361,9 @@ pgarch_MainLoop(void)
wakened = true;
/*
- * There shouldn't be anything for the archiver to do except to wait
- * for a signal ... however, the archiver exists to protect our data,
- * so she wakes up occasionally to allow herself to be proactive.
+ * There shouldn't be anything for the archiver to do except to wait for a
+ * signal ... however, the archiver exists to protect our data, so she
+ * wakes up occasionally to allow herself to be proactive.
*/
do
{
@@ -410,18 +410,18 @@ pgarch_MainLoop(void)
* PGARCH_AUTOWAKE_INTERVAL having passed since last_copy_time, or
* until postmaster dies.
*/
- if (!time_to_stop) /* Don't wait during last iteration */
+ if (!time_to_stop) /* Don't wait during last iteration */
{
- pg_time_t curtime = (pg_time_t) time(NULL);
- int timeout;
+ pg_time_t curtime = (pg_time_t) time(NULL);
+ int timeout;
timeout = PGARCH_AUTOWAKE_INTERVAL - (curtime - last_copy_time);
if (timeout > 0)
{
- int rc;
+ int rc;
rc = WaitLatch(&mainloop_latch,
- WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
+ WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
timeout * 1000L);
if (rc & WL_TIMEOUT)
wakened = true;
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 424cfcaf4f..73d5b2e39c 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -1522,7 +1522,7 @@ pgstat_end_function_usage(PgStat_FunctionCallUsage *fcu, bool finalize)
/*
* Compute the new f_total_time as the total elapsed time added to the
- * pre-call value of f_total_time. This is necessary to avoid
+ * pre-call value of f_total_time. This is necessary to avoid
* double-counting any time taken by recursive calls of myself. (We do
* not need any similar kluge for self time, since that already excludes
* any recursive calls.)
@@ -2836,7 +2836,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* pgstat_get_crashed_backend_activity() -
*
* Return a string representing the current activity of the backend with
- * the specified PID. Like the function above, but reads shared memory with
+ * the specified PID. Like the function above, but reads shared memory with
* the expectation that it may be corrupt. On success, copy the string
* into the "buffer" argument and return that pointer. On failure,
* return NULL.
@@ -2845,7 +2845,7 @@ pgstat_get_backend_current_activity(int pid, bool checkUser)
* query that crashed a backend. In particular, no attempt is made to
* follow the correct concurrency protocol when accessing the
* BackendStatusArray. But that's OK, in the worst case we'll return a
- * corrupted message. We also must take care not to trip on ereport(ERROR).
+ * corrupted message. We also must take care not to trip on ereport(ERROR).
* ----------
*/
const char *
@@ -2890,8 +2890,8 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
/*
* Copy only ASCII-safe characters so we don't run into encoding
- * problems when reporting the message; and be sure not to run
- * off the end of memory.
+ * problems when reporting the message; and be sure not to run off
+ * the end of memory.
*/
ascii_safe_strlcpy(buffer, activity,
Min(buflen, pgstat_track_activity_query_size));
@@ -3070,7 +3070,7 @@ PgstatCollectorMain(int argc, char *argv[])
* every message; instead, do that only after a recv() fails to obtain a
* message. (This effectively means that if backends are sending us stuff
* like mad, we won't notice postmaster death until things slack off a
- * bit; which seems fine.) To do that, we have an inner loop that
+ * bit; which seems fine.) To do that, we have an inner loop that
* iterates as long as recv() succeeds. We do recognize got_SIGHUP inside
* the inner loop, which means that such interrupts will get serviced but
* the latch won't get cleared until next time there is a break in the
@@ -3234,13 +3234,14 @@ PgstatCollectorMain(int argc, char *argv[])
/* Sleep until there's something to do */
#ifndef WIN32
wr = WaitLatchOrSocket(&pgStatLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE,
+ WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE,
pgStatSock,
-1L);
#else
+
/*
* Windows, at least in its Windows Server 2003 R2 incarnation,
- * sometimes loses FD_READ events. Waking up and retrying the recv()
+ * sometimes loses FD_READ events. Waking up and retrying the recv()
* fixes that, so don't sleep indefinitely. This is a crock of the
* first water, but until somebody wants to debug exactly what's
* happening there, this is the best we can do. The two-second
@@ -3249,9 +3250,9 @@ PgstatCollectorMain(int argc, char *argv[])
* backend_read_statsfile.
*/
wr = WaitLatchOrSocket(&pgStatLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
+ WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
pgStatSock,
- 2 * 1000L /* msec */);
+ 2 * 1000L /* msec */ );
#endif
/*
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 830a83f60e..eeea933b19 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -203,7 +203,7 @@ bool enable_bonjour = false;
char *bonjour_name;
bool restart_after_crash = true;
-char *output_config_variable = NULL;
+char *output_config_variable = NULL;
/* PIDs of special child processes; 0 when not running */
static pid_t StartupPID = 0,
@@ -243,7 +243,7 @@ static bool RecoveryError = false; /* T if WAL recovery failed */
* checkpointer are launched, while the startup process continues applying WAL.
* If Hot Standby is enabled, then, after reaching a consistent point in WAL
* redo, startup process signals us again, and we switch to PM_HOT_STANDBY
- * state and begin accepting connections to perform read-only queries. When
+ * state and begin accepting connections to perform read-only queries. When
* archive recovery is finished, the startup process exits with exit code 0
* and we switch to PM_RUN state.
*
@@ -280,7 +280,8 @@ typedef enum
PM_WAIT_BACKUP, /* waiting for online backup mode to end */
PM_WAIT_READONLY, /* waiting for read only backends to exit */
PM_WAIT_BACKENDS, /* waiting for live backends to exit */
- PM_SHUTDOWN, /* waiting for checkpointer to do shutdown ckpt */
+ PM_SHUTDOWN, /* waiting for checkpointer to do shutdown
+ * ckpt */
PM_SHUTDOWN_2, /* waiting for archiver and walsenders to
* finish */
PM_WAIT_DEAD_END, /* waiting for dead_end children to exit */
@@ -481,7 +482,7 @@ static void ShmemBackendArrayRemove(Backend *bn);
* File descriptors for pipe used to monitor if postmaster is alive.
* First is POSTMASTER_FD_WATCH, second is POSTMASTER_FD_OWN.
*/
-int postmaster_alive_fds[2] = { -1, -1 };
+int postmaster_alive_fds[2] = {-1, -1};
#else
/* Process handle of postmaster used for the same purpose on Windows */
HANDLE PostmasterHandle;
@@ -740,11 +741,14 @@ PostmasterMain(int argc, char *argv[])
if (output_config_variable != NULL)
{
- /* permission is handled because the user is reading inside the data dir */
+ /*
+ * permission is handled because the user is reading inside the data
+ * dir
+ */
puts(GetConfigOption(output_config_variable, false, false));
ExitPostmaster(0);
}
-
+
/* Verify that DataDir looks reasonable */
checkDataDir();
@@ -791,8 +795,8 @@ PostmasterMain(int argc, char *argv[])
char **p;
ereport(DEBUG3,
- (errmsg_internal("%s: PostmasterMain: initial environment dump:",
- progname)));
+ (errmsg_internal("%s: PostmasterMain: initial environment dump:",
+ progname)));
ereport(DEBUG3,
(errmsg_internal("-----------------------------------------")));
for (p = environ; *p; ++p)
@@ -981,6 +985,7 @@ PostmasterMain(int argc, char *argv[])
InitPostmasterDeathWatchHandle();
#ifdef WIN32
+
/*
* Initialize I/O completion port used to deliver list of dead children.
*/
@@ -1979,6 +1984,7 @@ ClosePostmasterPorts(bool am_syslogger)
int i;
#ifndef WIN32
+
/*
* Close the write end of postmaster death watch pipe. It's important to
* do this as early as possible, so that if postmaster dies, others won't
@@ -1986,8 +1992,8 @@ ClosePostmasterPorts(bool am_syslogger)
*/
if (close(postmaster_alive_fds[POSTMASTER_FD_OWN]))
ereport(FATAL,
- (errcode_for_file_access(),
- errmsg_internal("could not close postmaster death monitoring pipe in child process: %m")));
+ (errcode_for_file_access(),
+ errmsg_internal("could not close postmaster death monitoring pipe in child process: %m")));
postmaster_alive_fds[POSTMASTER_FD_OWN] = -1;
#endif
@@ -2357,13 +2363,14 @@ reaper(SIGNAL_ARGS)
* disconnection.
*
* XXX should avoid the need for disconnection. When we do,
- * am_cascading_walsender should be replaced with RecoveryInProgress()
+ * am_cascading_walsender should be replaced with
+ * RecoveryInProgress()
*/
if (max_wal_senders > 0 && CountChildren(BACKEND_TYPE_WALSND) > 0)
{
ereport(LOG,
(errmsg("terminating all walsender processes to force cascaded "
- "standby(s) to update timeline and reconnect")));
+ "standby(s) to update timeline and reconnect")));
SignalSomeChildren(SIGUSR2, BACKEND_TYPE_WALSND);
}
@@ -2398,8 +2405,8 @@ reaper(SIGNAL_ARGS)
}
/*
- * Was it the bgwriter? Normal exit can be ignored; we'll start a
- * new one at the next iteration of the postmaster's main loop, if
+ * Was it the bgwriter? Normal exit can be ignored; we'll start a new
+ * one at the next iteration of the postmaster's main loop, if
* necessary. Any other exit condition is treated as a crash.
*/
if (pid == BgWriterPID)
@@ -2420,8 +2427,8 @@ reaper(SIGNAL_ARGS)
if (EXIT_STATUS_0(exitstatus) && pmState == PM_SHUTDOWN)
{
/*
- * OK, we saw normal exit of the checkpointer after it's been told
- * to shut down. We expect that it wrote a shutdown
+ * OK, we saw normal exit of the checkpointer after it's been
+ * told to shut down. We expect that it wrote a shutdown
* checkpoint. (If for some reason it didn't, recovery will
* occur on next postmaster start.)
*
@@ -2457,8 +2464,8 @@ reaper(SIGNAL_ARGS)
else
{
/*
- * Any unexpected exit of the checkpointer (including FATAL exit)
- * is treated as a crash.
+ * Any unexpected exit of the checkpointer (including FATAL
+ * exit) is treated as a crash.
*/
HandleChildCrash(pid, exitstatus,
_("checkpointer process"));
@@ -2847,7 +2854,7 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
if (!EXIT_STATUS_0(exitstatus))
activity = pgstat_get_crashed_backend_activity(pid,
activity_buffer,
- sizeof(activity_buffer));
+ sizeof(activity_buffer));
if (WIFEXITED(exitstatus))
ereport(lev,
@@ -2879,7 +2886,7 @@ LogChildExit(int lev, const char *procname, int pid, int exitstatus)
procname, pid, WTERMSIG(exitstatus),
WTERMSIG(exitstatus) < NSIG ?
sys_siglist[WTERMSIG(exitstatus)] : "(unknown)"),
- activity ? errdetail("Failed process was running: %s", activity) : 0));
+ activity ? errdetail("Failed process was running: %s", activity) : 0));
#else
ereport(lev,
@@ -2947,14 +2954,14 @@ PostmasterStateMachine(void)
{
/*
* PM_WAIT_BACKENDS state ends when we have no regular backends
- * (including autovac workers) and no walwriter, autovac launcher
- * or bgwriter. If we are doing crash recovery then we expect the
- * checkpointer to exit as well, otherwise not.
- * The archiver, stats, and syslogger processes
- * are disregarded since they are not connected to shared memory; we
- * also disregard dead_end children here. Walsenders are also
- * disregarded, they will be terminated later after writing the
- * checkpoint record, like the archiver process.
+ * (including autovac workers) and no walwriter, autovac launcher or
+ * bgwriter. If we are doing crash recovery then we expect the
+ * checkpointer to exit as well, otherwise not. The archiver, stats,
+ * and syslogger processes are disregarded since they are not
+ * connected to shared memory; we also disregard dead_end children
+ * here. Walsenders are also disregarded, they will be terminated
+ * later after writing the checkpoint record, like the archiver
+ * process.
*/
if (CountChildren(BACKEND_TYPE_NORMAL | BACKEND_TYPE_AUTOVAC) == 0 &&
StartupPID == 0 &&
@@ -2997,10 +3004,10 @@ PostmasterStateMachine(void)
else
{
/*
- * If we failed to fork a checkpointer, just shut down. Any
- * required cleanup will happen at next restart. We set
- * FatalError so that an "abnormal shutdown" message gets
- * logged when we exit.
+ * If we failed to fork a checkpointer, just shut down.
+ * Any required cleanup will happen at next restart. We
+ * set FatalError so that an "abnormal shutdown" message
+ * gets logged when we exit.
*/
FatalError = true;
pmState = PM_WAIT_DEAD_END;
@@ -3086,13 +3093,13 @@ PostmasterStateMachine(void)
else
{
/*
- * Terminate exclusive backup mode to avoid recovery after a clean fast
- * shutdown. Since an exclusive backup can only be taken during normal
- * running (and not, for example, while running under Hot Standby)
- * it only makes sense to do this if we reached normal running. If
- * we're still in recovery, the backup file is one we're
- * recovering *from*, and we must keep it around so that recovery
- * restarts from the right place.
+ * Terminate exclusive backup mode to avoid recovery after a clean
+ * fast shutdown. Since an exclusive backup can only be taken
+ * during normal running (and not, for example, while running
+ * under Hot Standby) it only makes sense to do this if we reached
+ * normal running. If we're still in recovery, the backup file is
+ * one we're recovering *from*, and we must keep it around so that
+ * recovery restarts from the right place.
*/
if (ReachedNormalRunning)
CancelBackup();
@@ -3437,7 +3444,7 @@ BackendInitialize(Port *port)
if (pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV) != 0)
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV) != 0)
{
int ret = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
@@ -3930,8 +3937,8 @@ internal_forkexec(int argc, char *argv[], Port *port)
INFINITE,
WT_EXECUTEONLYONCE | WT_EXECUTEINWAITTHREAD))
ereport(FATAL,
- (errmsg_internal("could not register process for wait: error code %lu",
- GetLastError())));
+ (errmsg_internal("could not register process for wait: error code %lu",
+ GetLastError())));
/* Don't close pi.hProcess here - the wait thread needs access to it */
@@ -4531,7 +4538,7 @@ StartChildProcess(AuxProcType type)
break;
case CheckpointerProcess:
ereport(LOG,
- (errmsg("could not fork checkpointer process: %m")));
+ (errmsg("could not fork checkpointer process: %m")));
break;
case WalWriterProcess:
ereport(LOG,
@@ -5111,7 +5118,6 @@ pgwin32_deadchild_callback(PVOID lpParameter, BOOLEAN TimerOrWaitFired)
/* Queue SIGCHLD signal */
pg_queue_signal(SIGCHLD);
}
-
#endif /* WIN32 */
/*
@@ -5124,10 +5130,11 @@ static void
InitPostmasterDeathWatchHandle(void)
{
#ifndef WIN32
+
/*
* Create a pipe. Postmaster holds the write end of the pipe open
- * (POSTMASTER_FD_OWN), and children hold the read end. Children can
- * pass the read file descriptor to select() to wake up in case postmaster
+ * (POSTMASTER_FD_OWN), and children hold the read end. Children can pass
+ * the read file descriptor to select() to wake up in case postmaster
* dies, or check for postmaster death with a (read() == 0). Children must
* close the write end as soon as possible after forking, because EOF
* won't be signaled in the read end until all processes have closed the
@@ -5147,8 +5154,8 @@ InitPostmasterDeathWatchHandle(void)
ereport(FATAL,
(errcode_for_socket_access(),
errmsg_internal("could not set postmaster death monitoring pipe to non-blocking mode: %m")));
-
#else
+
/*
* On Windows, we use a process handle for the same purpose.
*/
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index 3528879243..25d095b158 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -401,7 +401,7 @@ SysLoggerMain(int argc, char *argv[])
/*
* Calculate time till next time-based rotation, so that we don't
- * sleep longer than that. We assume the value of "now" obtained
+ * sleep longer than that. We assume the value of "now" obtained
* above is still close enough. Note we can't make this calculation
* until after calling logfile_rotate(), since it will advance
* next_rotation_time.
@@ -409,7 +409,7 @@ SysLoggerMain(int argc, char *argv[])
if (Log_RotationAge > 0 && !rotation_disabled)
{
if (now < next_rotation_time)
- cur_timeout = (next_rotation_time - now) * 1000L; /* msec */
+ cur_timeout = (next_rotation_time - now) * 1000L; /* msec */
else
cur_timeout = 0;
cur_flags = WL_TIMEOUT;
@@ -632,6 +632,7 @@ SysLogger_Start(void)
errmsg("could not redirect stderr: %m")));
close(fd);
_setmode(_fileno(stderr), _O_BINARY);
+
/*
* Now we are done with the write end of the pipe.
* CloseHandle() must not be called because the preceding
diff --git a/src/backend/postmaster/walwriter.c b/src/backend/postmaster/walwriter.c
index 77455db166..b7b8512555 100644
--- a/src/backend/postmaster/walwriter.c
+++ b/src/backend/postmaster/walwriter.c
@@ -244,11 +244,11 @@ WalWriterMain(void)
*/
for (;;)
{
- long cur_timeout;
- int rc;
+ long cur_timeout;
+ int rc;
/*
- * Advertise whether we might hibernate in this cycle. We do this
+ * Advertise whether we might hibernate in this cycle. We do this
* before resetting the latch to ensure that any async commits will
* see the flag set if they might possibly need to wake us up, and
* that we won't miss any signal they send us. (If we discover work
@@ -294,7 +294,7 @@ WalWriterMain(void)
* sleep time so as to reduce the server's idle power consumption.
*/
if (left_till_hibernate > 0)
- cur_timeout = WalWriterDelay; /* in ms */
+ cur_timeout = WalWriterDelay; /* in ms */
else
cur_timeout = WalWriterDelay * HIBERNATE_FACTOR;
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index c0414a2491..da59705344 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -513,7 +513,7 @@ cclass(struct vars * v, /* context */
{
size_t len;
struct cvec *cv = NULL;
- const char * const *namePtr;
+ const char *const * namePtr;
int i,
index;
@@ -521,7 +521,7 @@ cclass(struct vars * v, /* context */
* The following arrays define the valid character class names.
*/
- static const char * const classNames[] = {
+ static const char *const classNames[] = {
"alnum", "alpha", "ascii", "blank", "cntrl", "digit", "graph",
"lower", "print", "punct", "space", "upper", "xdigit", NULL
};
@@ -562,8 +562,8 @@ cclass(struct vars * v, /* context */
index = (int) CC_ALPHA;
/*
- * Now compute the character class contents. For classes that are
- * based on the behavior of a <wctype.h> or <ctype.h> function, we use
+ * Now compute the character class contents. For classes that are based
+ * on the behavior of a <wctype.h> or <ctype.h> function, we use
* pg_ctype_get_cache so that we can cache the results. Other classes
* have definitions that are hard-wired here, and for those we just
* construct a transient cvec on the fly.
@@ -605,10 +605,11 @@ cclass(struct vars * v, /* context */
cv = pg_ctype_get_cache(pg_wc_ispunct);
break;
case CC_XDIGIT:
+
/*
* It's not clear how to define this in non-western locales, and
- * even less clear that there's any particular use in trying.
- * So just hard-wire the meaning.
+ * even less clear that there's any particular use in trying. So
+ * just hard-wire the meaning.
*/
cv = getcvec(v, 0, 3);
if (cv)
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index eac951f200..e85c5ddba5 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -680,9 +680,9 @@ typedef int (*pg_wc_probefunc) (pg_wchar c);
typedef struct pg_ctype_cache
{
- pg_wc_probefunc probefunc; /* pg_wc_isalpha or a sibling */
- Oid collation; /* collation this entry is for */
- struct cvec cv; /* cache entry contents */
+ pg_wc_probefunc probefunc; /* pg_wc_isalpha or a sibling */
+ Oid collation; /* collation this entry is for */
+ struct cvec cv; /* cache entry contents */
struct pg_ctype_cache *next; /* chain link */
} pg_ctype_cache;
@@ -730,7 +730,7 @@ store_match(pg_ctype_cache *pcc, pg_wchar chr1, int nchrs)
/*
* Given a probe function (e.g., pg_wc_isalpha) get a struct cvec for all
- * chrs satisfying the probe function. The active collation is the one
+ * chrs satisfying the probe function. The active collation is the one
* previously set by pg_set_regex_collation. Return NULL if out of memory.
*
* Note that the result must not be freed or modified by caller.
@@ -777,7 +777,7 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc)
* UTF8 go up to 0x7FF, which is a pretty arbitrary cutoff but we cannot
* extend it as far as we'd like (say, 0xFFFF, the end of the Basic
* Multilingual Plane) without creating significant performance issues due
- * to too many characters being fed through the colormap code. This will
+ * to too many characters being fed through the colormap code. This will
* need redesign to fix reasonably, but at least for the moment we have
* all common European languages covered. Otherwise (not C, not UTF8) go
* up to 255. These limits are interrelated with restrictions discussed
diff --git a/src/backend/regex/regcomp.c b/src/backend/regex/regcomp.c
index 7fd0b07e2c..57055f04ab 100644
--- a/src/backend/regex/regcomp.c
+++ b/src/backend/regex/regcomp.c
@@ -1119,11 +1119,11 @@ parseqatom(struct vars * v,
{
/*
* If there's no backrefs involved, we can turn x{m,n} into
- * x{m-1,n-1}x, with capturing parens in only the second x. This
- * is valid because we only care about capturing matches from the
- * final iteration of the quantifier. It's a win because we can
- * implement the backref-free left side as a plain DFA node, since
- * we don't really care where its submatches are.
+ * x{m-1,n-1}x, with capturing parens in only the second x. This is
+ * valid because we only care about capturing matches from the final
+ * iteration of the quantifier. It's a win because we can implement
+ * the backref-free left side as a plain DFA node, since we don't
+ * really care where its submatches are.
*/
dupnfa(v->nfa, atom->begin, atom->end, s, atom->begin);
assert(m >= 1 && m != INFINITY && n >= 1);
diff --git a/src/backend/regex/rege_dfa.c b/src/backend/regex/rege_dfa.c
index f4fd41458b..da7a0bf402 100644
--- a/src/backend/regex/rege_dfa.c
+++ b/src/backend/regex/rege_dfa.c
@@ -272,7 +272,7 @@ static struct dfa *
newdfa(struct vars * v,
struct cnfa * cnfa,
struct colormap * cm,
- struct smalldfa * sml) /* preallocated space, may be NULL */
+ struct smalldfa * sml) /* preallocated space, may be NULL */
{
struct dfa *d;
size_t nss = cnfa->nstates * 2;
diff --git a/src/backend/regex/regerror.c b/src/backend/regex/regerror.c
index 9d44eb04ce..f6a3f2667f 100644
--- a/src/backend/regex/regerror.c
+++ b/src/backend/regex/regerror.c
@@ -46,7 +46,7 @@ static struct rerr
{
/* the actual table is built from regex.h */
-#include "regex/regerrs.h" /* pgrminclude ignore */
+#include "regex/regerrs.h" /* pgrminclude ignore */
{
-1, "", "oops"
}, /* explanation special-cased in code */
diff --git a/src/backend/regex/regexec.c b/src/backend/regex/regexec.c
index 5d7415b3c1..3748a9c171 100644
--- a/src/backend/regex/regexec.c
+++ b/src/backend/regex/regexec.c
@@ -531,7 +531,7 @@ zaptreesubs(struct vars * v,
{
if (t->op == '(')
{
- int n = t->subno;
+ int n = t->subno;
assert(n > 0);
if ((size_t) n < v->nmatch)
@@ -948,7 +948,7 @@ citerdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -977,8 +977,8 @@ citerdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1028,10 +1028,10 @@ citerdissect(struct vars * v,
}
/*
- * We've identified a way to divide the string into k sub-matches
- * that works so far as the child DFA can tell. If k is an allowed
- * number of matches, start the slow part: recurse to verify each
- * sub-match. We always have k <= max_matches, needn't check that.
+ * We've identified a way to divide the string into k sub-matches that
+ * works so far as the child DFA can tell. If k is an allowed number
+ * of matches, start the slow part: recurse to verify each sub-match.
+ * We always have k <= max_matches, needn't check that.
*/
if (k < min_matches)
goto backtrack;
@@ -1065,13 +1065,14 @@ citerdissect(struct vars * v,
/* match failed to verify, so backtrack */
backtrack:
+
/*
* Must consider shorter versions of the current sub-match. However,
* we'll only ask for a zero-length match if necessary.
*/
while (k > 0)
{
- chr *prev_end = endpts[k - 1];
+ chr *prev_end = endpts[k - 1];
if (endpts[k] > prev_end)
{
@@ -1132,7 +1133,7 @@ creviterdissect(struct vars * v,
}
/*
- * We need workspace to track the endpoints of each sub-match. Normally
+ * We need workspace to track the endpoints of each sub-match. Normally
* we consider only nonzero-length sub-matches, so there can be at most
* end-begin of them. However, if min is larger than that, we will also
* consider zero-length sub-matches in order to find enough matches.
@@ -1161,8 +1162,8 @@ creviterdissect(struct vars * v,
/*
* Our strategy is to first find a set of sub-match endpoints that are
* valid according to the child node's DFA, and then recursively dissect
- * each sub-match to confirm validity. If any validity check fails,
- * backtrack the last sub-match and try again. And, when we next try for
+ * each sub-match to confirm validity. If any validity check fails,
+ * backtrack the last sub-match and try again. And, when we next try for
* a validity check, we need not recheck any successfully verified
* sub-matches that we didn't move the endpoints of. nverified remembers
* how many sub-matches are currently known okay.
@@ -1214,10 +1215,10 @@ creviterdissect(struct vars * v,
}
/*
- * We've identified a way to divide the string into k sub-matches
- * that works so far as the child DFA can tell. If k is an allowed
- * number of matches, start the slow part: recurse to verify each
- * sub-match. We always have k <= max_matches, needn't check that.
+ * We've identified a way to divide the string into k sub-matches that
+ * works so far as the child DFA can tell. If k is an allowed number
+ * of matches, start the slow part: recurse to verify each sub-match.
+ * We always have k <= max_matches, needn't check that.
*/
if (k < min_matches)
goto backtrack;
@@ -1251,6 +1252,7 @@ creviterdissect(struct vars * v,
/* match failed to verify, so backtrack */
backtrack:
+
/*
* Must consider longer versions of the current sub-match.
*/
diff --git a/src/backend/replication/basebackup.c b/src/backend/replication/basebackup.c
index 72e79ce045..0bc88a4040 100644
--- a/src/backend/replication/basebackup.c
+++ b/src/backend/replication/basebackup.c
@@ -141,6 +141,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
ti->size = opt->progress ? sendDir(linkpath, strlen(linkpath), true) : -1;
tablespaces = lappend(tablespaces, ti);
#else
+
/*
* If the platform does not have symbolic links, it should not be
* possible to have tablespaces - clearly somebody else created
@@ -148,7 +149,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
*/
ereport(WARNING,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("tablespaces are not supported on this platform")));
+ errmsg("tablespaces are not supported on this platform")));
#endif
}
@@ -661,9 +662,9 @@ sendDir(char *path, int basepathlen, bool sizeonly)
/* Allow symbolic links in pg_tblspc only */
if (strcmp(path, "./pg_tblspc") == 0 &&
#ifndef WIN32
- S_ISLNK(statbuf.st_mode)
+ S_ISLNK(statbuf.st_mode)
#else
- pgwin32_is_junction(pathbuf)
+ pgwin32_is_junction(pathbuf)
#endif
)
{
@@ -687,6 +688,7 @@ sendDir(char *path, int basepathlen, bool sizeonly)
_tarWriteHeader(pathbuf + basepathlen + 1, linkpath, &statbuf);
size += 512; /* Size of the header just added */
#else
+
/*
* If the platform does not have symbolic links, it should not be
* possible to have tablespaces - clearly somebody else created
@@ -694,9 +696,9 @@ sendDir(char *path, int basepathlen, bool sizeonly)
*/
ereport(WARNING,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("tablespaces are not supported on this platform")));
+ errmsg("tablespaces are not supported on this platform")));
continue;
-#endif /* HAVE_READLINK */
+#endif /* HAVE_READLINK */
}
else if (S_ISDIR(statbuf.st_mode))
{
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 8977327c8c..a2ae8700d1 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -172,10 +172,10 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
* never update it again, so we can't be seeing a stale value in that
* case.
*
- * Note: on machines with weak memory ordering, the acquisition of
- * the lock is essential to avoid race conditions: we cannot be sure
- * the sender's state update has reached main memory until we acquire
- * the lock. We could get rid of this dance if SetLatch/ResetLatch
+ * Note: on machines with weak memory ordering, the acquisition of the
+ * lock is essential to avoid race conditions: we cannot be sure the
+ * sender's state update has reached main memory until we acquire the
+ * lock. We could get rid of this dance if SetLatch/ResetLatch
* contained memory barriers.
*/
syncRepState = MyProc->syncRepState;
@@ -241,8 +241,8 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
}
/*
- * Wait on latch. Any condition that should wake us up will set
- * the latch, so no need for timeout.
+ * Wait on latch. Any condition that should wake us up will set the
+ * latch, so no need for timeout.
*/
WaitLatch(&MyProc->procLatch, WL_LATCH_SET | WL_POSTMASTER_DEATH, -1);
}
@@ -422,8 +422,8 @@ SyncRepReleaseWaiters(void)
}
/*
- * Set the lsn first so that when we wake backends they will release
- * up to this location.
+ * Set the lsn first so that when we wake backends they will release up to
+ * this location.
*/
if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_WRITE], MyWalSnd->write))
{
@@ -477,8 +477,8 @@ SyncRepGetStandbyPriority(void)
bool found = false;
/*
- * Since synchronous cascade replication is not allowed, we always
- * set the priority of cascading walsender to zero.
+ * Since synchronous cascade replication is not allowed, we always set the
+ * priority of cascading walsender to zero.
*/
if (am_cascading_walsender)
return 0;
@@ -517,7 +517,7 @@ SyncRepGetStandbyPriority(void)
}
/*
- * Walk the specified queue from head. Set the state of any backends that
+ * Walk the specified queue from head. Set the state of any backends that
* need to be woken, remove them from the queue, and then wake them.
* Pass all = true to wake whole queue; otherwise, just wake up to
* the walsender's LSN.
@@ -601,7 +601,7 @@ SyncRepUpdateSyncStandbysDefined(void)
*/
if (!sync_standbys_defined)
{
- int i;
+ int i;
for (i = 0; i < NUM_SYNC_REP_WAIT_MODE; i++)
SyncRepWakeQueue(true, i);
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index d63ff29472..650b74fff7 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -752,8 +752,8 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
if (log_min_messages <= DEBUG2)
elog(DEBUG2, "sendtime %s receipttime %s replication apply delay %d ms transfer latency %d ms",
- timestamptz_to_str(sendTime),
- timestamptz_to_str(lastMsgReceiptTime),
- GetReplicationApplyDelay(),
- GetReplicationTransferLatency());
+ timestamptz_to_str(sendTime),
+ timestamptz_to_str(lastMsgReceiptTime),
+ GetReplicationApplyDelay(),
+ GetReplicationTransferLatency());
}
diff --git a/src/backend/replication/walreceiverfuncs.c b/src/backend/replication/walreceiverfuncs.c
index f8dd523518..876196f9da 100644
--- a/src/backend/replication/walreceiverfuncs.c
+++ b/src/backend/replication/walreceiverfuncs.c
@@ -252,8 +252,8 @@ GetReplicationApplyDelay(void)
XLogRecPtr receivePtr;
XLogRecPtr replayPtr;
- long secs;
- int usecs;
+ long secs;
+ int usecs;
SpinLockAcquire(&walrcv->mutex);
receivePtr = walrcv->receivedUpto;
@@ -284,9 +284,9 @@ GetReplicationTransferLatency(void)
TimestampTz lastMsgSendTime;
TimestampTz lastMsgReceiptTime;
- long secs = 0;
- int usecs = 0;
- int ms;
+ long secs = 0;
+ int usecs = 0;
+ int ms;
SpinLockAcquire(&walrcv->mutex);
lastMsgSendTime = walrcv->lastMsgSendTime;
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 5f938124e7..45a3b2ef29 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -74,7 +74,8 @@ WalSnd *MyWalSnd = NULL;
/* Global state */
bool am_walsender = false; /* Am I a walsender process ? */
-bool am_cascading_walsender = false; /* Am I cascading WAL to another standby ? */
+bool am_cascading_walsender = false; /* Am I cascading WAL to
+ * another standby ? */
/* User-settable parameters for walsender */
int max_wal_senders = 0; /* the maximum number of concurrent walsenders */
@@ -372,31 +373,31 @@ StartReplication(StartReplicationCmd *cmd)
SendPostmasterSignal(PMSIGNAL_ADVANCE_STATE_MACHINE);
/*
- * When promoting a cascading standby, postmaster sends SIGUSR2 to
- * any cascading walsenders to kill them. But there is a corner-case where
- * such walsender fails to receive SIGUSR2 and survives a standby promotion
- * unexpectedly. This happens when postmaster sends SIGUSR2 before
- * the walsender marks itself as a WAL sender, because postmaster sends
- * SIGUSR2 to only the processes marked as a WAL sender.
+ * When promoting a cascading standby, postmaster sends SIGUSR2 to any
+ * cascading walsenders to kill them. But there is a corner-case where
+ * such walsender fails to receive SIGUSR2 and survives a standby
+ * promotion unexpectedly. This happens when postmaster sends SIGUSR2
+ * before the walsender marks itself as a WAL sender, because postmaster
+ * sends SIGUSR2 to only the processes marked as a WAL sender.
*
* To avoid this corner-case, if recovery is NOT in progress even though
* the walsender is cascading one, we do the same thing as SIGUSR2 signal
* handler does, i.e., set walsender_ready_to_stop to true. Which causes
* the walsender to end later.
*
- * When terminating cascading walsenders, usually postmaster writes
- * the log message announcing the terminations. But there is a race condition
- * here. If there is no walsender except this process before reaching here,
- * postmaster thinks that there is no walsender and suppresses that
+ * When terminating cascading walsenders, usually postmaster writes the
+ * log message announcing the terminations. But there is a race condition
+ * here. If there is no walsender except this process before reaching
+ * here, postmaster thinks that there is no walsender and suppresses that
* log message. To handle this case, we always emit that log message here.
- * This might cause duplicate log messages, but which is less likely to happen,
- * so it's not worth writing some code to suppress them.
+ * This might cause duplicate log messages, but which is less likely to
+ * happen, so it's not worth writing some code to suppress them.
*/
if (am_cascading_walsender && !RecoveryInProgress())
{
ereport(LOG,
- (errmsg("terminating walsender process to force cascaded standby "
- "to update timeline and reconnect")));
+ (errmsg("terminating walsender process to force cascaded standby "
+ "to update timeline and reconnect")));
walsender_ready_to_stop = true;
}
@@ -405,8 +406,8 @@ StartReplication(StartReplicationCmd *cmd)
* log-shipping, since this is checked in PostmasterMain().
*
* NOTE: wal_level can only change at shutdown, so in most cases it is
- * difficult for there to be WAL data that we can still see that was written
- * at wal_level='minimal'.
+ * difficult for there to be WAL data that we can still see that was
+ * written at wal_level='minimal'.
*/
/*
@@ -693,7 +694,7 @@ ProcessStandbyHSFeedbackMessage(void)
* far enough to make reply.xmin wrap around. In that case the xmin we
* set here would be "in the future" and have no effect. No point in
* worrying about this since it's too late to save the desired data
- * anyway. Assuming that the standby sends us an increasing sequence of
+ * anyway. Assuming that the standby sends us an increasing sequence of
* xmins, this could only happen during the first reply cycle, else our
* own xmin would prevent nextXid from advancing so far.
*
@@ -792,8 +793,8 @@ WalSndLoop(void)
if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
{
ereport(DEBUG1,
- (errmsg("standby \"%s\" has now caught up with primary",
- application_name)));
+ (errmsg("standby \"%s\" has now caught up with primary",
+ application_name)));
WalSndSetState(WALSNDSTATE_STREAMING);
}
@@ -810,7 +811,7 @@ WalSndLoop(void)
if (caughtup && !pq_is_send_pending())
{
walsender_shutdown_requested = true;
- continue; /* don't want to wait more */
+ continue; /* don't want to wait more */
}
}
}
@@ -825,7 +826,7 @@ WalSndLoop(void)
if (caughtup || pq_is_send_pending())
{
TimestampTz timeout = 0;
- long sleeptime = 10000; /* 10 s */
+ long sleeptime = 10000; /* 10 s */
int wakeEvents;
wakeEvents = WL_LATCH_SET | WL_POSTMASTER_DEATH |
@@ -845,7 +846,7 @@ WalSndLoop(void)
if (replication_timeout > 0)
{
timeout = TimestampTzPlusMilliseconds(last_reply_timestamp,
- replication_timeout);
+ replication_timeout);
sleeptime = 1 + (replication_timeout / 10);
}
@@ -973,9 +974,9 @@ WalSndKill(int code, Datum arg)
void
XLogRead(char *buf, XLogRecPtr startptr, Size count)
{
- char *p;
+ char *p;
XLogRecPtr recptr;
- Size nbytes;
+ Size nbytes;
uint32 lastRemovedLog;
uint32 lastRemovedSeg;
uint32 log;
@@ -1087,9 +1088,9 @@ retry:
}
/*
- * During recovery, the currently-open WAL file might be replaced with
- * the file of the same name retrieved from archive. So we always need
- * to check what we read was valid after reading into the buffer. If it's
+ * During recovery, the currently-open WAL file might be replaced with the
+ * file of the same name retrieved from archive. So we always need to
+ * check what we read was valid after reading into the buffer. If it's
* invalid, we try to open and read the file again.
*/
if (am_cascading_walsender)
@@ -1294,8 +1295,8 @@ WalSndShutdownHandler(SIGNAL_ARGS)
SetLatch(&MyWalSnd->latch);
/*
- * Set the standard (non-walsender) state as well, so that we can
- * abort things like do_pg_stop_backup().
+ * Set the standard (non-walsender) state as well, so that we can abort
+ * things like do_pg_stop_backup().
*/
InterruptPending = true;
ProcDiePending = true;
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 6e5633dcdb..971d277b76 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -200,7 +200,7 @@ DefineRule(RuleStmt *stmt, const char *queryString)
transformRuleStmt(stmt, queryString, &actions, &whereClause);
/*
- * Find and lock the relation. Lock level should match
+ * Find and lock the relation. Lock level should match
* DefineQueryRewrite.
*/
relId = RangeVarGetRelid(stmt->relation, AccessExclusiveLock, false);
diff --git a/src/backend/rewrite/rewriteSupport.c b/src/backend/rewrite/rewriteSupport.c
index 5990159c62..3f9b5e6063 100644
--- a/src/backend/rewrite/rewriteSupport.c
+++ b/src/backend/rewrite/rewriteSupport.c
@@ -168,8 +168,8 @@ get_rewrite_oid_without_relid(const char *rulename,
if (HeapTupleIsValid(htup))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple rules named \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errmsg("there are multiple rules named \"%s\"", rulename),
+ errhint("Specify a relation name as well as a rule name.")));
}
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index d46faaf958..78145472e1 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1325,7 +1325,7 @@ BufferSync(int flags)
* This is called periodically by the background writer process.
*
* Returns true if it's appropriate for the bgwriter process to go into
- * low-power hibernation mode. (This happens if the strategy clock sweep
+ * low-power hibernation mode. (This happens if the strategy clock sweep
* has been "lapped" and no buffer allocations have occurred recently,
* or if the bgwriter has been effectively disabled by setting
* bgwriter_lru_maxpages to 0.)
@@ -1510,8 +1510,8 @@ BgBufferSync(void)
/*
* If recent_alloc remains at zero for many cycles, smoothed_alloc will
* eventually underflow to zero, and the underflows produce annoying
- * kernel warnings on some platforms. Once upcoming_alloc_est has gone
- * to zero, there's no point in tracking smaller and smaller values of
+ * kernel warnings on some platforms. Once upcoming_alloc_est has gone to
+ * zero, there's no point in tracking smaller and smaller values of
* smoothed_alloc, so just reset it to exactly zero to avoid this
* syndrome. It will pop back up as soon as recent_alloc increases.
*/
@@ -2006,11 +2006,11 @@ BufferIsPermanent(Buffer buffer)
Assert(BufferIsPinned(buffer));
/*
- * BM_PERMANENT can't be changed while we hold a pin on the buffer, so
- * we need not bother with the buffer header spinlock. Even if someone
- * else changes the buffer header flags while we're doing this, we assume
- * that changing an aligned 2-byte BufFlags value is atomic, so we'll read
- * the old value or the new value, but not random garbage.
+ * BM_PERMANENT can't be changed while we hold a pin on the buffer, so we
+ * need not bother with the buffer header spinlock. Even if someone else
+ * changes the buffer header flags while we're doing this, we assume that
+ * changing an aligned 2-byte BufFlags value is atomic, so we'll read the
+ * old value or the new value, but not random garbage.
*/
bufHdr = &BufferDescriptors[buffer - 1];
return (bufHdr->flags & BM_PERMANENT) != 0;
@@ -2461,10 +2461,10 @@ SetBufferCommitInfoNeedsSave(Buffer buffer)
* tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
* already. Since we make this test unlocked, there's a chance we might
- * fail to notice that the flags have just been cleared, and failed to reset
- * them, due to memory-ordering issues. But since this function is only
- * intended to be used in cases where failing to write out the data would
- * be harmless anyway, it doesn't really matter.
+ * fail to notice that the flags have just been cleared, and failed to
+ * reset them, due to memory-ordering issues. But since this function is
+ * only intended to be used in cases where failing to write out the data
+ * would be harmless anyway, it doesn't really matter.
*/
if ((bufHdr->flags & (BM_DIRTY | BM_JUST_DIRTIED)) !=
(BM_DIRTY | BM_JUST_DIRTIED))
diff --git a/src/backend/storage/buffer/freelist.c b/src/backend/storage/buffer/freelist.c
index 76a4beca69..c92774798c 100644
--- a/src/backend/storage/buffer/freelist.c
+++ b/src/backend/storage/buffer/freelist.c
@@ -294,7 +294,7 @@ StrategySyncStart(uint32 *complete_passes, uint32 *num_buf_alloc)
* StrategyNotifyBgWriter -- set or clear allocation notification latch
*
* If bgwriterLatch isn't NULL, the next invocation of StrategyGetBuffer will
- * set that latch. Pass NULL to clear the pending notification before it
+ * set that latch. Pass NULL to clear the pending notification before it
* happens. This feature is used by the bgwriter process to wake itself up
* from hibernation, and is not meant for anybody else to use.
*/
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index fa376ae4bb..f79f4c6a36 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -164,7 +164,7 @@ static bool have_pending_fd_cleanup = false;
/*
* Tracks the total size of all temporary files. Note: when temp_file_limit
* is being enforced, this cannot overflow since the limit cannot be more
- * than INT_MAX kilobytes. When not enforcing, it could theoretically
+ * than INT_MAX kilobytes. When not enforcing, it could theoretically
* overflow, but we don't care.
*/
static uint64 temporary_files_size = 0;
@@ -685,7 +685,7 @@ LruInsert(File file)
/* seek to the right position */
if (vfdP->seekPos != (off_t) 0)
{
- off_t returnValue PG_USED_FOR_ASSERTS_ONLY;
+ off_t returnValue PG_USED_FOR_ASSERTS_ONLY;
returnValue = lseek(vfdP->fd, vfdP->seekPos, SEEK_SET);
Assert(returnValue != (off_t) -1);
@@ -1046,7 +1046,7 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
void
FileSetTransient(File file)
{
- Vfd *vfdP;
+ Vfd *vfdP;
Assert(FileIsValid(file));
@@ -1255,7 +1255,7 @@ FileWrite(File file, char *buffer, int amount)
/*
* If enforcing temp_file_limit and it's a temp file, check to see if the
- * write would overrun temp_file_limit, and throw error if so. Note: it's
+ * write would overrun temp_file_limit, and throw error if so. Note: it's
* really a modularity violation to throw error here; we should set errno
* and return -1. However, there's no way to report a suitable error
* message if we do that. All current callers would just throw error
@@ -1263,18 +1263,18 @@ FileWrite(File file, char *buffer, int amount)
*/
if (temp_file_limit >= 0 && (VfdCache[file].fdstate & FD_TEMPORARY))
{
- off_t newPos = VfdCache[file].seekPos + amount;
+ off_t newPos = VfdCache[file].seekPos + amount;
if (newPos > VfdCache[file].fileSize)
{
- uint64 newTotal = temporary_files_size;
+ uint64 newTotal = temporary_files_size;
newTotal += newPos - VfdCache[file].fileSize;
if (newTotal > (uint64) temp_file_limit * (uint64) 1024)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("temporary file size exceeds temp_file_limit (%dkB)",
- temp_file_limit)));
+ errmsg("temporary file size exceeds temp_file_limit (%dkB)",
+ temp_file_limit)));
}
}
@@ -1293,7 +1293,7 @@ retry:
/* maintain fileSize and temporary_files_size if it's a temp file */
if (VfdCache[file].fdstate & FD_TEMPORARY)
{
- off_t newPos = VfdCache[file].seekPos;
+ off_t newPos = VfdCache[file].seekPos;
if (newPos > VfdCache[file].fileSize)
{
@@ -1915,8 +1915,8 @@ CleanupTempFiles(bool isProcExit)
/*
* If we're in the process of exiting a backend process,
* close all temporary files. Otherwise, only close
- * temporary files local to the current transaction.
- * They should be closed by the ResourceOwner mechanism
+ * temporary files local to the current transaction. They
+ * should be closed by the ResourceOwner mechanism
* already, so this is just a debugging cross-check.
*/
if (isProcExit)
@@ -1924,7 +1924,7 @@ CleanupTempFiles(bool isProcExit)
else if (fdstate & FD_XACT_TEMPORARY)
{
elog(WARNING,
- "temporary file %s not closed at end-of-transaction",
+ "temporary file %s not closed at end-of-transaction",
VfdCache[i].fileName);
FileClose(i);
}
diff --git a/src/backend/storage/ipc/pmsignal.c b/src/backend/storage/ipc/pmsignal.c
index 83842d6494..8ad7a97eeb 100644
--- a/src/backend/storage/ipc/pmsignal.c
+++ b/src/backend/storage/ipc/pmsignal.c
@@ -272,8 +272,8 @@ bool
PostmasterIsAlive(void)
{
#ifndef WIN32
- char c;
- ssize_t rc;
+ char c;
+ ssize_t rc;
rc = read(postmaster_alive_fds[POSTMASTER_FD_WATCH], &c, 1);
if (rc < 0)
@@ -287,7 +287,6 @@ PostmasterIsAlive(void)
elog(FATAL, "unexpected data in postmaster death monitoring pipe");
return false;
-
#else /* WIN32 */
return (WaitForSingleObject(PostmasterHandle, 0) == WAIT_TIMEOUT);
#endif /* WIN32 */
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index d986418a10..a3b0540aea 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -82,10 +82,10 @@ typedef struct ProcArrayStruct
TransactionId lastOverflowedXid;
/*
- * We declare pgprocnos[] as 1 entry because C wants a fixed-size array, but
- * actually it is maxProcs entries long.
+ * We declare pgprocnos[] as 1 entry because C wants a fixed-size array,
+ * but actually it is maxProcs entries long.
*/
- int pgprocnos[1]; /* VARIABLE LENGTH ARRAY */
+ int pgprocnos[1]; /* VARIABLE LENGTH ARRAY */
} ProcArrayStruct;
static ProcArrayStruct *procArray;
@@ -282,22 +282,22 @@ ProcArrayAdd(PGPROC *proc)
* locality of references much better. This is useful while traversing the
* ProcArray because there is a increased likelihood of finding the next
* PGPROC structure in the cache.
- *
+ *
* Since the occurrence of adding/removing a proc is much lower than the
* access to the ProcArray itself, the overhead should be marginal
*/
for (index = 0; index < arrayP->numProcs; index++)
{
/*
- * If we are the first PGPROC or if we have found our right position in
- * the array, break
+ * If we are the first PGPROC or if we have found our right position
+ * in the array, break
*/
if ((arrayP->pgprocnos[index] == -1) || (arrayP->pgprocnos[index] > proc->pgprocno))
break;
}
memmove(&arrayP->pgprocnos[index + 1], &arrayP->pgprocnos[index],
- (arrayP->numProcs - index) * sizeof (int));
+ (arrayP->numProcs - index) * sizeof(int));
arrayP->pgprocnos[index] = proc->pgprocno;
arrayP->numProcs++;
@@ -349,8 +349,8 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
{
/* Keep the PGPROC array sorted. See notes above */
memmove(&arrayP->pgprocnos[index], &arrayP->pgprocnos[index + 1],
- (arrayP->numProcs - index - 1) * sizeof (int));
- arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */
+ (arrayP->numProcs - index - 1) * sizeof(int));
+ arrayP->pgprocnos[arrayP->numProcs - 1] = -1; /* for debugging */
arrayP->numProcs--;
LWLockRelease(ProcArrayLock);
return;
@@ -380,7 +380,7 @@ ProcArrayRemove(PGPROC *proc, TransactionId latestXid)
void
ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
{
- PGXACT *pgxact = &allPgXact[proc->pgprocno];
+ PGXACT *pgxact = &allPgXact[proc->pgprocno];
if (TransactionIdIsValid(latestXid))
{
@@ -399,7 +399,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->inCommit = false; /* be sure this is cleared in abort */
+ pgxact->inCommit = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
/* Clear the subtransaction-XID cache too while holding the lock */
@@ -426,7 +426,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
pgxact->xmin = InvalidTransactionId;
/* must be cleared with xid/xmin: */
pgxact->vacuumFlags &= ~PROC_VACUUM_STATE_MASK;
- pgxact->inCommit = false; /* be sure this is cleared in abort */
+ pgxact->inCommit = false; /* be sure this is cleared in abort */
proc->recoveryConflictPending = false;
Assert(pgxact->nxids == 0);
@@ -446,7 +446,7 @@ ProcArrayEndTransaction(PGPROC *proc, TransactionId latestXid)
void
ProcArrayClearTransaction(PGPROC *proc)
{
- PGXACT *pgxact = &allPgXact[proc->pgprocno];
+ PGXACT *pgxact = &allPgXact[proc->pgprocno];
/*
* We can skip locking ProcArrayLock here, because this action does not
@@ -511,9 +511,9 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
/*
* If our initial RunningTransactionsData had an overflowed snapshot then
* we knew we were missing some subxids from our snapshot. If we continue
- * to see overflowed snapshots then we might never be able to start up,
- * so we make another test to see if our snapshot is now valid. We know
- * that the missing subxids are equal to or earlier than nextXid. After we
+ * to see overflowed snapshots then we might never be able to start up, so
+ * we make another test to see if our snapshot is now valid. We know that
+ * the missing subxids are equal to or earlier than nextXid. After we
* initialise we continue to apply changes during recovery, so once the
* oldestRunningXid is later than the nextXid from the initial snapshot we
* know that we no longer have missing information and can mark the
@@ -522,8 +522,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
if (standbyState == STANDBY_SNAPSHOT_PENDING)
{
/*
- * If the snapshot isn't overflowed or if its empty we can
- * reset our pending state and use this snapshot instead.
+ * If the snapshot isn't overflowed or if its empty we can reset our
+ * pending state and use this snapshot instead.
*/
if (!running->subxid_overflow || running->xcnt == 0)
{
@@ -545,8 +545,8 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
}
else
elog(trace_recovery(DEBUG1),
- "recovery snapshot waiting for non-overflowed snapshot or "
- "until oldest active xid on standby is at least %u (now %u)",
+ "recovery snapshot waiting for non-overflowed snapshot or "
+ "until oldest active xid on standby is at least %u (now %u)",
standbySnapshotPendingXmin,
running->oldestRunningXid);
return;
@@ -673,7 +673,7 @@ ProcArrayApplyRecoveryInfo(RunningTransactions running)
* ShmemVariableCache->nextXid must be beyond any observed xid.
*
* We don't expect anyone else to modify nextXid, hence we don't need to
- * hold a lock while examining it. We still acquire the lock to modify
+ * hold a lock while examining it. We still acquire the lock to modify
* it, though.
*/
nextXid = latestObservedXid;
@@ -861,10 +861,10 @@ TransactionIdIsInProgress(TransactionId xid)
/* No shortcuts, gotta grovel through the array */
for (i = 0; i < arrayP->numProcs; i++)
{
- int pgprocno = arrayP->pgprocnos[i];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
- TransactionId pxid;
+ int pgprocno = arrayP->pgprocnos[i];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ TransactionId pxid;
/* Ignore my own proc --- dealt with it above */
if (proc == MyProc)
@@ -1017,10 +1017,10 @@ TransactionIdIsActive(TransactionId xid)
for (i = 0; i < arrayP->numProcs; i++)
{
- int pgprocno = arrayP->pgprocnos[i];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
- TransactionId pxid;
+ int pgprocno = arrayP->pgprocnos[i];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ TransactionId pxid;
/* Fetch xid just once - see GetNewTransactionId */
pxid = pgxact->xid;
@@ -1115,9 +1115,9 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (ignoreVacuum && (pgxact->vacuumFlags & PROC_IN_VACUUM))
continue;
@@ -1141,7 +1141,7 @@ GetOldestXmin(bool allDbs, bool ignoreVacuum)
* have an Xmin but not (yet) an Xid; conversely, if it has an
* Xid, that could determine some not-yet-set Xmin.
*/
- xid = pgxact->xmin; /* Fetch just once */
+ xid = pgxact->xmin; /* Fetch just once */
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, result))
result = xid;
@@ -1318,7 +1318,7 @@ GetSnapshotData(Snapshot snapshot)
if (!snapshot->takenDuringRecovery)
{
- int *pgprocnos = arrayP->pgprocnos;
+ int *pgprocnos = arrayP->pgprocnos;
int numProcs;
/*
@@ -1329,32 +1329,32 @@ GetSnapshotData(Snapshot snapshot)
numProcs = arrayP->numProcs;
for (index = 0; index < numProcs; index++)
{
- int pgprocno = pgprocnos[index];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
- TransactionId xid;
+ int pgprocno = pgprocnos[index];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ TransactionId xid;
/* Ignore procs running LAZY VACUUM */
if (pgxact->vacuumFlags & PROC_IN_VACUUM)
continue;
/* Update globalxmin to be the smallest valid xmin */
- xid = pgxact->xmin; /* fetch just once */
+ xid = pgxact->xmin; /* fetch just once */
if (TransactionIdIsNormal(xid) &&
NormalTransactionIdPrecedes(xid, globalxmin))
- globalxmin = xid;
+ globalxmin = xid;
/* Fetch xid just once - see GetNewTransactionId */
xid = pgxact->xid;
/*
- * If the transaction has no XID assigned, we can skip it; it won't
- * have sub-XIDs either. If the XID is >= xmax, we can also skip
- * it; such transactions will be treated as running anyway (and any
- * sub-XIDs will also be >= xmax).
+ * If the transaction has no XID assigned, we can skip it; it
+ * won't have sub-XIDs either. If the XID is >= xmax, we can also
+ * skip it; such transactions will be treated as running anyway
+ * (and any sub-XIDs will also be >= xmax).
*/
if (!TransactionIdIsNormal(xid)
|| !NormalTransactionIdPrecedes(xid, xmax))
- continue;
+ continue;
/*
* We don't include our own XIDs (if any) in the snapshot, but we
@@ -1394,6 +1394,7 @@ GetSnapshotData(Snapshot snapshot)
if (nxids > 0)
{
volatile PGPROC *proc = &allProcs[pgprocno];
+
memcpy(snapshot->subxip + subcount,
(void *) proc->subxids.xids,
nxids * sizeof(TransactionId));
@@ -1498,23 +1499,23 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
- TransactionId xid;
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ TransactionId xid;
/* Ignore procs running LAZY VACUUM */
if (pgxact->vacuumFlags & PROC_IN_VACUUM)
continue;
- xid = pgxact->xid; /* fetch just once */
+ xid = pgxact->xid; /* fetch just once */
if (xid != sourcexid)
continue;
/*
- * We check the transaction's database ID for paranoia's sake: if
- * it's in another DB then its xmin does not cover us. Caller should
- * have detected this already, so we just treat any funny cases as
+ * We check the transaction's database ID for paranoia's sake: if it's
+ * in another DB then its xmin does not cover us. Caller should have
+ * detected this already, so we just treat any funny cases as
* "transaction not found".
*/
if (proc->databaseId != MyDatabaseId)
@@ -1523,7 +1524,7 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
/*
* Likewise, let's just make real sure its xmin does cover us.
*/
- xid = pgxact->xmin; /* fetch just once */
+ xid = pgxact->xmin; /* fetch just once */
if (!TransactionIdIsNormal(xid) ||
!TransactionIdPrecedesOrEquals(xid, xmin))
continue;
@@ -1531,8 +1532,8 @@ ProcArrayInstallImportedXmin(TransactionId xmin, TransactionId sourcexid)
/*
* We're good. Install the new xmin. As in GetSnapshotData, set
* TransactionXmin too. (Note that because snapmgr.c called
- * GetSnapshotData first, we'll be overwriting a valid xmin here,
- * so we don't check that.)
+ * GetSnapshotData first, we'll be overwriting a valid xmin here, so
+ * we don't check that.)
*/
MyPgXact->xmin = TransactionXmin = xmin;
@@ -1626,7 +1627,7 @@ GetRunningTransactionData(void)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
@@ -1726,7 +1727,7 @@ GetOldestActiveTransactionId(void)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId xid;
@@ -1783,7 +1784,7 @@ GetTransactionsInCommit(TransactionId **xids_p)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
TransactionId pxid;
@@ -1820,9 +1821,9 @@ HaveTransactionsInCommit(TransactionId *xids, int nxids)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
- TransactionId pxid;
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ TransactionId pxid;
/* Fetch xid just once - see GetNewTransactionId */
pxid = pgxact->xid;
@@ -1911,9 +1912,9 @@ BackendXidGetPid(TransactionId xid)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (pgxact->xid == xid)
{
@@ -1981,9 +1982,9 @@ GetCurrentVirtualXIDs(TransactionId limitXmin, bool excludeXmin0,
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
if (proc == MyProc)
continue;
@@ -2078,9 +2079,9 @@ GetConflictingVirtualXIDs(TransactionId limitXmin, Oid dbOid)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
/* Exclude prepared transactions */
if (proc->pid == 0)
@@ -2134,9 +2135,9 @@ CancelVirtualTransaction(VirtualTransactionId vxid, ProcSignalReason sigmode)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- VirtualTransactionId procvxid;
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ VirtualTransactionId procvxid;
GET_VXID_FROM_PGPROC(procvxid, *proc);
@@ -2189,9 +2190,9 @@ MinimumActiveBackends(int min)
*/
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
- volatile PGPROC *proc = &allProcs[pgprocno];
- volatile PGXACT *pgxact = &allPgXact[pgprocno];
+ int pgprocno = arrayP->pgprocnos[index];
+ volatile PGPROC *proc = &allProcs[pgprocno];
+ volatile PGXACT *pgxact = &allPgXact[pgprocno];
/*
* Since we're not holding a lock, need to check that the pointer is
@@ -2237,7 +2238,7 @@ CountDBBackends(Oid databaseid)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (proc->pid == 0)
@@ -2267,7 +2268,7 @@ CancelDBBackends(Oid databaseid, ProcSignalReason sigmode, bool conflictPending)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (databaseid == InvalidOid || proc->databaseId == databaseid)
@@ -2306,7 +2307,7 @@ CountUserBackends(Oid roleid)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
if (proc->pid == 0)
@@ -2367,7 +2368,7 @@ CountOtherDBBackends(Oid databaseId, int *nbackends, int *nprepared)
for (index = 0; index < arrayP->numProcs; index++)
{
- int pgprocno = arrayP->pgprocnos[index];
+ int pgprocno = arrayP->pgprocnos[index];
volatile PGPROC *proc = &allProcs[pgprocno];
volatile PGXACT *pgxact = &allPgXact[pgprocno];
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index cd15a2e9cd..6d070030b7 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -22,7 +22,7 @@
#include "utils/inval.h"
-uint64 SharedInvalidMessageCounter;
+uint64 SharedInvalidMessageCounter;
/*
diff --git a/src/backend/storage/ipc/sinvaladt.c b/src/backend/storage/ipc/sinvaladt.c
index bb727af8b2..ec0153e115 100644
--- a/src/backend/storage/ipc/sinvaladt.c
+++ b/src/backend/storage/ipc/sinvaladt.c
@@ -467,15 +467,16 @@ SIInsertDataEntries(const SharedInvalidationMessage *data, int n)
}
/*
- * Now that the maxMsgNum change is globally visible, we give
- * everyone a swift kick to make sure they read the newly added
- * messages. Releasing SInvalWriteLock will enforce a full memory
- * barrier, so these (unlocked) changes will be committed to memory
- * before we exit the function.
+ * Now that the maxMsgNum change is globally visible, we give everyone
+ * a swift kick to make sure they read the newly added messages.
+ * Releasing SInvalWriteLock will enforce a full memory barrier, so
+ * these (unlocked) changes will be committed to memory before we exit
+ * the function.
*/
for (i = 0; i < segP->lastBackend; i++)
{
ProcState *stateP = &segP->procState[i];
+
stateP->hasMessages = true;
}
@@ -524,12 +525,12 @@ SIGetDataEntries(SharedInvalidationMessage *data, int datasize)
/*
* Before starting to take locks, do a quick, unlocked test to see whether
- * there can possibly be anything to read. On a multiprocessor system,
- * it's possible that this load could migrate backwards and occur before we
- * actually enter this function, so we might miss a sinval message that
- * was just added by some other processor. But they can't migrate
- * backwards over a preceding lock acquisition, so it should be OK. If
- * we haven't acquired a lock preventing against further relevant
+ * there can possibly be anything to read. On a multiprocessor system,
+ * it's possible that this load could migrate backwards and occur before
+ * we actually enter this function, so we might miss a sinval message that
+ * was just added by some other processor. But they can't migrate
+ * backwards over a preceding lock acquisition, so it should be OK. If we
+ * haven't acquired a lock preventing against further relevant
* invalidations, any such occurrence is not much different than if the
* invalidation had arrived slightly later in the first place.
*/
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 3a6831cab0..8863a5c9a7 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -467,7 +467,7 @@ SendRecoveryConflictWithBufferPin(ProcSignalReason reason)
* determine whether an actual deadlock condition is present: the lock we
* need to wait for might be unrelated to any held by the Startup process.
* Sooner or later, this mechanism should get ripped out in favor of somehow
- * accounting for buffer locks in DeadLockCheck(). However, errors here
+ * accounting for buffer locks in DeadLockCheck(). However, errors here
* seem to be very low-probability in practice, so for now it's not worth
* the trouble.
*/
@@ -658,7 +658,7 @@ StandbyReleaseOldLocks(int nxids, TransactionId *xids)
for (cell = list_head(RecoveryLockList); cell; cell = next)
{
xl_standby_lock *lock = (xl_standby_lock *) lfirst(cell);
- bool remove = false;
+ bool remove = false;
next = lnext(cell);
@@ -668,8 +668,8 @@ StandbyReleaseOldLocks(int nxids, TransactionId *xids)
remove = false;
else
{
- int i;
- bool found = false;
+ int i;
+ bool found = false;
for (i = 0; i < nxids; i++)
{
@@ -1009,8 +1009,8 @@ LogAccessExclusiveLockPrepare(void)
* RecordTransactionAbort() do not optimise away the transaction
* completion record which recovery relies upon to release locks. It's a
* hack, but for a corner case not worth adding code for into the main
- * commit path. Second, we must assign an xid before the lock is
- * recorded in shared memory, otherwise a concurrently executing
+ * commit path. Second, we must assign an xid before the lock is recorded
+ * in shared memory, otherwise a concurrently executing
* GetRunningTransactionLocks() might see a lock associated with an
* InvalidTransactionId which we later assert cannot happen.
*/
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 9717075354..cfe3954637 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -164,7 +164,7 @@ typedef struct TwoPhaseLockRecord
* our locks to the primary lock table, but it can never be lower than the
* real value, since only we can acquire locks on our own behalf.
*/
-static int FastPathLocalUseCount = 0;
+static int FastPathLocalUseCount = 0;
/* Macros for manipulating proc->fpLockBits */
#define FAST_PATH_BITS_PER_SLOT 3
@@ -186,7 +186,7 @@ static int FastPathLocalUseCount = 0;
/*
* The fast-path lock mechanism is concerned only with relation locks on
- * unshared relations by backends bound to a database. The fast-path
+ * unshared relations by backends bound to a database. The fast-path
* mechanism exists mostly to accelerate acquisition and release of locks
* that rarely conflict. Because ShareUpdateExclusiveLock is
* self-conflicting, it can't use the fast-path mechanism; but it also does
@@ -207,7 +207,7 @@ static int FastPathLocalUseCount = 0;
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
static bool FastPathTransferRelationLocks(LockMethod lockMethodTable,
- const LOCKTAG *locktag, uint32 hashcode);
+ const LOCKTAG *locktag, uint32 hashcode);
static PROCLOCK *FastPathGetRelationLockEntry(LOCALLOCK *locallock);
static void VirtualXactLockTableCleanup(void);
@@ -234,8 +234,8 @@ static void VirtualXactLockTableCleanup(void);
typedef struct
{
- slock_t mutex;
- uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
+ slock_t mutex;
+ uint32 count[FAST_PATH_STRONG_LOCK_HASH_PARTITIONS];
} FastPathStrongRelationLockData;
FastPathStrongRelationLockData *FastPathStrongRelationLocks;
@@ -339,7 +339,7 @@ PROCLOCK_PRINT(const char *where, const PROCLOCK *proclockP)
static uint32 proclock_hash(const void *key, Size keysize);
static void RemoveLocalLock(LOCALLOCK *locallock);
static PROCLOCK *SetupLockInTable(LockMethod lockMethodTable, PGPROC *proc,
- const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
+ const LOCKTAG *locktag, uint32 hashcode, LOCKMODE lockmode);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static void BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode);
static void FinishStrongLockAcquire(void);
@@ -425,7 +425,7 @@ InitLocks(void)
*/
FastPathStrongRelationLocks =
ShmemInitStruct("Fast Path Strong Relation Lock Data",
- sizeof(FastPathStrongRelationLockData), &found);
+ sizeof(FastPathStrongRelationLockData), &found);
if (!found)
SpinLockInit(&FastPathStrongRelationLocks->mutex);
@@ -713,12 +713,12 @@ LockAcquireExtended(const LOCKTAG *locktag,
if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
{
- uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
- bool acquired;
+ uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+ bool acquired;
/*
- * LWLockAcquire acts as a memory sequencing point, so it's safe
- * to assume that any strong locker whose increment to
+ * LWLockAcquire acts as a memory sequencing point, so it's safe to
+ * assume that any strong locker whose increment to
* FastPathStrongRelationLocks->counts becomes visible after we test
* it has yet to begin to transfer fast-path locks.
*/
@@ -744,7 +744,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
*/
if (ConflictsWithRelationFastPath(locktag, lockmode))
{
- uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+ uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
BeginStrongLockAcquire(locallock, fasthashcode);
if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
@@ -762,9 +762,9 @@ LockAcquireExtended(const LOCKTAG *locktag,
}
/*
- * We didn't find the lock in our LOCALLOCK table, and we didn't manage
- * to take it via the fast-path, either, so we've got to mess with the
- * shared lock table.
+ * We didn't find the lock in our LOCALLOCK table, and we didn't manage to
+ * take it via the fast-path, either, so we've got to mess with the shared
+ * lock table.
*/
partitionLock = LockHashPartitionLock(hashcode);
@@ -1102,7 +1102,8 @@ RemoveLocalLock(LOCALLOCK *locallock)
locallock->lockOwners = NULL;
if (locallock->holdsStrongLockCount)
{
- uint32 fasthashcode;
+ uint32 fasthashcode;
+
fasthashcode = FastPathStrongLockHashPartition(locallock->hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
@@ -1367,9 +1368,9 @@ BeginStrongLockAcquire(LOCALLOCK *locallock, uint32 fasthashcode)
Assert(locallock->holdsStrongLockCount == FALSE);
/*
- * Adding to a memory location is not atomic, so we take a
- * spinlock to ensure we don't collide with someone else trying
- * to bump the count at the same time.
+ * Adding to a memory location is not atomic, so we take a spinlock to
+ * ensure we don't collide with someone else trying to bump the count at
+ * the same time.
*
* XXX: It might be worth considering using an atomic fetch-and-add
* instruction here, on architectures where that is supported.
@@ -1399,9 +1400,9 @@ FinishStrongLockAcquire(void)
void
AbortStrongLockAcquire(void)
{
- uint32 fasthashcode;
+ uint32 fasthashcode;
LOCALLOCK *locallock = StrongLockInProgress;
-
+
if (locallock == NULL)
return;
@@ -1699,11 +1700,11 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount > 0)
{
- bool released;
+ bool released;
/*
- * We might not find the lock here, even if we originally entered
- * it here. Another backend may have moved it to the main table.
+ * We might not find the lock here, even if we originally entered it
+ * here. Another backend may have moved it to the main table.
*/
LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
released = FastPathUnGrantRelationLock(locktag->locktag_field2,
@@ -1816,8 +1817,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
#endif
/*
- * Get rid of our fast-path VXID lock, if appropriate. Note that this
- * is the only way that the lock we hold on our own VXID can ever get
+ * Get rid of our fast-path VXID lock, if appropriate. Note that this is
+ * the only way that the lock we hold on our own VXID can ever get
* released: it is always and only released when a toplevel transaction
* ends.
*/
@@ -1898,8 +1899,8 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* If we don't currently hold the LWLock that protects our
- * fast-path data structures, we must acquire it before
- * attempting to release the lock via the fast-path.
+ * fast-path data structures, we must acquire it before attempting
+ * to release the lock via the fast-path.
*/
if (!have_fast_path_lwlock)
{
@@ -1917,7 +1918,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Our lock, originally taken via the fast path, has been
- * transferred to the main lock table. That's going to require
+ * transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(MyProc->backendLock);
@@ -1926,7 +1927,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/*
* Now dump the lock. We haven't got a pointer to the LOCK or
* PROCLOCK in this case, so we have to handle this a bit
- * differently than a normal lock release. Unfortunately, this
+ * differently than a normal lock release. Unfortunately, this
* requires an extra LWLock acquire-and-release cycle on the
* partitionLock, but hopefully it shouldn't happen often.
*/
@@ -2268,16 +2269,16 @@ FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode)
*/
static bool
FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag,
- uint32 hashcode)
+ uint32 hashcode)
{
- LWLockId partitionLock = LockHashPartitionLock(hashcode);
- Oid relid = locktag->locktag_field2;
- uint32 i;
+ LWLockId partitionLock = LockHashPartitionLock(hashcode);
+ Oid relid = locktag->locktag_field2;
+ uint32 i;
/*
- * Every PGPROC that can potentially hold a fast-path lock is present
- * in ProcGlobal->allProcs. Prepared transactions are not, but
- * any outstanding fast-path locks held by prepared transactions are
+ * Every PGPROC that can potentially hold a fast-path lock is present in
+ * ProcGlobal->allProcs. Prepared transactions are not, but any
+ * outstanding fast-path locks held by prepared transactions are
* transferred to the main lock table.
*/
for (i = 0; i < ProcGlobal->allProcCount; i++)
@@ -2288,19 +2289,19 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
LWLockAcquire(proc->backendLock, LW_EXCLUSIVE);
/*
- * If the target backend isn't referencing the same database as we are,
- * then we needn't examine the individual relation IDs at all; none of
- * them can be relevant.
+ * If the target backend isn't referencing the same database as we
+ * are, then we needn't examine the individual relation IDs at all;
+ * none of them can be relevant.
*
* proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before
* acquiring proc->backendLock. In particular, it's certainly safe to
- * assume that if the target backend holds any fast-path locks, it must
- * have performed a memory-fencing operation (in particular, an LWLock
- * acquisition) since setting proc->databaseId. However, it's less
- * clear that our backend is certain to have performed a memory fencing
- * operation since the other backend set proc->databaseId. So for now,
- * we test it after acquiring the LWLock just to be safe.
+ * assume that if the target backend holds any fast-path locks, it
+ * must have performed a memory-fencing operation (in particular, an
+ * LWLock acquisition) since setting proc->databaseId. However, it's
+ * less clear that our backend is certain to have performed a memory
+ * fencing operation since the other backend set proc->databaseId. So
+ * for now, we test it after acquiring the LWLock just to be safe.
*/
if (proc->databaseId != MyDatabaseId)
{
@@ -2319,7 +2320,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/* Find or create lock object. */
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
for (lockmode = FAST_PATH_LOCKNUMBER_OFFSET;
- lockmode < FAST_PATH_LOCKNUMBER_OFFSET+FAST_PATH_BITS_PER_SLOT;
+ lockmode < FAST_PATH_LOCKNUMBER_OFFSET + FAST_PATH_BITS_PER_SLOT;
++lockmode)
{
PROCLOCK *proclock;
@@ -2346,17 +2347,17 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/*
* FastPathGetLockEntry
* Return the PROCLOCK for a lock originally taken via the fast-path,
- * transferring it to the primary lock table if necessary.
+ * transferring it to the primary lock table if necessary.
*/
static PROCLOCK *
FastPathGetRelationLockEntry(LOCALLOCK *locallock)
{
- LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
- LOCKTAG *locktag = &locallock->tag.lock;
- PROCLOCK *proclock = NULL;
- LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
- Oid relid = locktag->locktag_field2;
- uint32 f;
+ LockMethod lockMethodTable = LockMethods[DEFAULT_LOCKMETHOD];
+ LOCKTAG *locktag = &locallock->tag.lock;
+ PROCLOCK *proclock = NULL;
+ LWLockId partitionLock = LockHashPartitionLock(locallock->hashcode);
+ Oid relid = locktag->locktag_field2;
+ uint32 f;
LWLockAcquire(MyProc->backendLock, LW_EXCLUSIVE);
@@ -2383,7 +2384,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
@@ -2397,7 +2398,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
if (proclock == NULL)
{
LOCK *lock;
- PROCLOCKTAG proclocktag;
+ PROCLOCKTAG proclocktag;
uint32 proclock_hashcode;
LWLockAcquire(partitionLock, LW_SHARED);
@@ -2495,15 +2496,15 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
{
int i;
Oid relid = locktag->locktag_field2;
- VirtualTransactionId vxid;
+ VirtualTransactionId vxid;
/*
* Iterate over relevant PGPROCs. Anything held by a prepared
* transaction will have been transferred to the primary lock table,
- * so we need not worry about those. This is all a bit fuzzy,
- * because new locks could be taken after we've visited a particular
- * partition, but the callers had better be prepared to deal with
- * that anyway, since the locks could equally well be taken between the
+ * so we need not worry about those. This is all a bit fuzzy, because
+ * new locks could be taken after we've visited a particular
+ * partition, but the callers had better be prepared to deal with that
+ * anyway, since the locks could equally well be taken between the
* time we return the value and the time the caller does something
* with it.
*/
@@ -2520,8 +2521,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
/*
* If the target backend isn't referencing the same database as we
- * are, then we needn't examine the individual relation IDs at all;
- * none of them can be relevant.
+ * are, then we needn't examine the individual relation IDs at
+ * all; none of them can be relevant.
*
* See FastPathTransferLocks() for discussion of why we do this
* test after acquiring the lock.
@@ -2545,9 +2546,8 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
lockmask <<= FAST_PATH_LOCKNUMBER_OFFSET;
/*
- * There can only be one entry per relation, so if we found
- * it and it doesn't conflict, we can skip the rest of the
- * slots.
+ * There can only be one entry per relation, so if we found it
+ * and it doesn't conflict, we can skip the rest of the slots.
*/
if ((lockmask & conflictMask) == 0)
break;
@@ -2621,7 +2621,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
*/
if (VirtualTransactionIdIsValid(vxid))
{
- int i;
+ int i;
/* Avoid duplicate entries. */
for (i = 0; i < fast_count; ++i)
@@ -2650,7 +2650,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
* responsibility to verify that this is a sane thing to do. (For example, it
* would be bad to release a lock here if there might still be a LOCALLOCK
* object with pointers to it.)
- *
+ *
* We currently use this in two situations: first, to release locks held by
* prepared transactions on commit (see lock_twophase_postcommit); and second,
* to release locks taken via the fast-path, transferred to the main hash
@@ -2725,13 +2725,14 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
LWLockRelease(partitionLock);
- /*
+ /*
* Decrement strong lock count. This logic is needed only for 2PC.
*/
if (decrement_strong_lock_count
&& ConflictsWithRelationFastPath(&lock->tag, lockmode))
{
- uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+ uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
FastPathStrongRelationLocks->count[fasthashcode]--;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
@@ -2760,8 +2761,8 @@ AtPrepare_Locks(void)
/*
* For the most part, we don't need to touch shared memory for this ---
* all the necessary state information is in the locallock table.
- * Fast-path locks are an exception, however: we move any such locks
- * to the main table before allowing PREPARE TRANSACTION to succeed.
+ * Fast-path locks are an exception, however: we move any such locks to
+ * the main table before allowing PREPARE TRANSACTION to succeed.
*/
hash_seq_init(&status, LockMethodLocalHash);
@@ -2799,7 +2800,7 @@ AtPrepare_Locks(void)
continue;
/*
- * If we have both session- and transaction-level locks, fail. This
+ * If we have both session- and transaction-level locks, fail. This
* should never happen with regular locks, since we only take those at
* session level in some special operations like VACUUM. It's
* possible to hit this with advisory locks, though.
@@ -2808,7 +2809,7 @@ AtPrepare_Locks(void)
* the transactional hold to the prepared xact. However, that would
* require two PROCLOCK objects, and we cannot be sure that another
* PROCLOCK will be available when it comes time for PostPrepare_Locks
- * to do the deed. So for now, we error out while we can still do so
+ * to do the deed. So for now, we error out while we can still do so
* safely.
*/
if (haveSessionLock)
@@ -2819,7 +2820,8 @@ AtPrepare_Locks(void)
/*
* If the local lock was taken via the fast-path, we need to move it
* to the primary lock table, or just get a pointer to the existing
- * primary lock table entry if by chance it's already been transferred.
+ * primary lock table entry if by chance it's already been
+ * transferred.
*/
if (locallock->proclock == NULL)
{
@@ -2829,8 +2831,8 @@ AtPrepare_Locks(void)
/*
* Arrange to not release any strong lock count held by this lock
- * entry. We must retain the count until the prepared transaction
- * is committed or rolled back.
+ * entry. We must retain the count until the prepared transaction is
+ * committed or rolled back.
*/
locallock->holdsStrongLockCount = FALSE;
@@ -3114,12 +3116,12 @@ GetLockStatusData(void)
/*
* First, we iterate through the per-backend fast-path arrays, locking
- * them one at a time. This might produce an inconsistent picture of the
+ * them one at a time. This might produce an inconsistent picture of the
* system state, but taking all of those LWLocks at the same time seems
* impractical (in particular, note MAX_SIMUL_LWLOCKS). It shouldn't
* matter too much, because none of these locks can be involved in lock
- * conflicts anyway - anything that might must be present in the main
- * lock table.
+ * conflicts anyway - anything that might must be present in the main lock
+ * table.
*/
for (i = 0; i < ProcGlobal->allProcCount; ++i)
{
@@ -3130,7 +3132,7 @@ GetLockStatusData(void)
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
{
- LockInstanceData *instance;
+ LockInstanceData *instance;
uint32 lockbits = FAST_PATH_GET_BITS(proc, f);
/* Skip unallocated slots. */
@@ -3159,8 +3161,8 @@ GetLockStatusData(void)
if (proc->fpVXIDLock)
{
- VirtualTransactionId vxid;
- LockInstanceData *instance;
+ VirtualTransactionId vxid;
+ LockInstanceData *instance;
if (el >= els)
{
@@ -3219,7 +3221,7 @@ GetLockStatusData(void)
{
PGPROC *proc = proclock->tag.myProc;
LOCK *lock = proclock->tag.myLock;
- LockInstanceData *instance = &data->locks[el];
+ LockInstanceData *instance = &data->locks[el];
memcpy(&instance->locktag, &lock->tag, sizeof(LOCKTAG));
instance->holdMask = proclock->holdMask;
@@ -3304,10 +3306,10 @@ GetRunningTransactionLocks(int *nlocks)
TransactionId xid = pgxact->xid;
/*
- * Don't record locks for transactions if we know they have already
- * issued their WAL record for commit but not yet released lock.
- * It is still possible that we see locks held by already complete
- * transactions, if they haven't yet zeroed their xids.
+ * Don't record locks for transactions if we know they have
+ * already issued their WAL record for commit but not yet released
+ * lock. It is still possible that we see locks held by already
+ * complete transactions, if they haven't yet zeroed their xids.
*/
if (!TransactionIdIsValid(xid))
continue;
@@ -3607,13 +3609,14 @@ lock_twophase_recover(TransactionId xid, uint16 info,
*/
GrantLock(lock, proclock, lockmode);
- /*
+ /*
* Bump strong lock count, to make sure any fast-path lock requests won't
* be granted without consulting the primary lock table.
*/
if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
{
- uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+ uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
+
SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
FastPathStrongRelationLocks->count[fasthashcode]++;
SpinLockRelease(&FastPathStrongRelationLocks->mutex);
@@ -3701,7 +3704,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
- * backendLock and is used only by the locking subsystem. Doing it this
+ * backendLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
@@ -3734,8 +3737,8 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
static void
VirtualXactLockTableCleanup()
{
- bool fastpath;
- LocalTransactionId lxid;
+ bool fastpath;
+ LocalTransactionId lxid;
Assert(MyProc->backendId != InvalidBackendId);
@@ -3757,8 +3760,8 @@ VirtualXactLockTableCleanup()
*/
if (!fastpath && LocalTransactionIdIsValid(lxid))
{
- VirtualTransactionId vxid;
- LOCKTAG locktag;
+ VirtualTransactionId vxid;
+ LOCKTAG locktag;
vxid.backendId = MyBackendId;
vxid.localTransactionId = lxid;
@@ -3766,7 +3769,7 @@ VirtualXactLockTableCleanup()
LockRefindAndRelease(LockMethods[DEFAULT_LOCKMETHOD], MyProc,
&locktag, ExclusiveLock, false);
- }
+ }
}
/*
@@ -3802,8 +3805,8 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
/*
* We must acquire this lock before checking the backendId and lxid
- * against the ones we're waiting for. The target backend will only
- * set or clear lxid while holding this lock.
+ * against the ones we're waiting for. The target backend will only set
+ * or clear lxid while holding this lock.
*/
LWLockAcquire(proc->backendLock, LW_EXCLUSIVE);
@@ -3841,7 +3844,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase max_locks_per_transaction.")));
+ errhint("You might need to increase max_locks_per_transaction.")));
GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
proc->fpVXIDLock = false;
}
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index f1523760e5..95d4b37bef 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -574,7 +574,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
/*
* LWLockAcquireOrWait - Acquire lock, or wait until it's free
*
- * The semantics of this function are a bit funky. If the lock is currently
+ * The semantics of this function are a bit funky. If the lock is currently
* free, it is acquired in the given mode, and the function returns true. If
* the lock isn't immediately free, the function waits until it is released
* and returns false, but does not acquire the lock.
@@ -769,7 +769,7 @@ LWLockRelease(LWLockId lockid)
/*
* Remove the to-be-awakened PGPROCs from the queue.
*/
- bool releaseOK = true;
+ bool releaseOK = true;
proc = head;
@@ -797,6 +797,7 @@ LWLockRelease(LWLockId lockid)
/* proc is now the last PGPROC to be released */
lock->head = proc->lwWaitLink;
proc->lwWaitLink = NULL;
+
/*
* Prevent additional wakeups until retryer gets to run. Backends
* that are just waiting for the lock to become free don't retry
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index 6322a608cb..0c3c7f089b 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -1509,7 +1509,7 @@ GetSafeSnapshot(Snapshot origSnapshot)
* one passed to it, but we avoid assuming that here.
*/
snapshot = GetSerializableTransactionSnapshotInt(origSnapshot,
- InvalidTransactionId);
+ InvalidTransactionId);
if (MySerializableXact == InvalidSerializableXact)
return snapshot; /* no concurrent r/w xacts; it's safe */
@@ -1600,9 +1600,9 @@ SetSerializableTransactionSnapshot(Snapshot snapshot,
/*
* We do not allow SERIALIZABLE READ ONLY DEFERRABLE transactions to
- * import snapshots, since there's no way to wait for a safe snapshot
- * when we're using the snap we're told to. (XXX instead of throwing
- * an error, we could just ignore the XactDeferrable flag?)
+ * import snapshots, since there's no way to wait for a safe snapshot when
+ * we're using the snap we're told to. (XXX instead of throwing an error,
+ * we could just ignore the XactDeferrable flag?)
*/
if (XactReadOnly && XactDeferrable)
ereport(ERROR,
@@ -1646,11 +1646,11 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
*
* We must hold SerializableXactHashLock when taking/checking the snapshot
* to avoid race conditions, for much the same reasons that
- * GetSnapshotData takes the ProcArrayLock. Since we might have to release
- * SerializableXactHashLock to call SummarizeOldestCommittedSxact, this
- * means we have to create the sxact first, which is a bit annoying (in
- * particular, an elog(ERROR) in procarray.c would cause us to leak the
- * sxact). Consider refactoring to avoid this.
+ * GetSnapshotData takes the ProcArrayLock. Since we might have to
+ * release SerializableXactHashLock to call SummarizeOldestCommittedSxact,
+ * this means we have to create the sxact first, which is a bit annoying
+ * (in particular, an elog(ERROR) in procarray.c would cause us to leak
+ * the sxact). Consider refactoring to avoid this.
*/
#ifdef TEST_OLDSERXID
SummarizeOldestCommittedSxact();
@@ -1678,8 +1678,8 @@ GetSerializableTransactionSnapshotInt(Snapshot snapshot,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
- errdetail("The source transaction %u is not running anymore.",
- sourcexid)));
+ errdetail("The source transaction %u is not running anymore.",
+ sourcexid)));
}
/*
@@ -2704,8 +2704,8 @@ TransferPredicateLocksToNewTarget(PREDICATELOCKTARGETTAG oldtargettag,
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
- PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
- newtargettaghash),
+ PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
+ newtargettaghash),
HASH_ENTER_NULL,
&found);
if (!newpredlock)
@@ -2945,8 +2945,8 @@ DropAllPredicateLocksFromTable(Relation relation, bool transfer)
newpredlock = (PREDICATELOCK *)
hash_search_with_hash_value(PredicateLockHash,
&newpredlocktag,
- PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
- heaptargettaghash),
+ PredicateLockHashCodeFromTargetHashCode(&newpredlocktag,
+ heaptargettaghash),
HASH_ENTER,
&found);
if (!found)
@@ -3253,6 +3253,7 @@ ReleasePredicateLocks(bool isCommit)
*/
MySerializableXact->flags |= SXACT_FLAG_DOOMED;
MySerializableXact->flags |= SXACT_FLAG_ROLLED_BACK;
+
/*
* If the transaction was previously prepared, but is now failing due
* to a ROLLBACK PREPARED or (hopefully very rare) error after the
@@ -3544,9 +3545,9 @@ ClearOldPredicateLocks(void)
else
{
/*
- * A read-write transaction can only be partially
- * cleared. We need to keep the SERIALIZABLEXACT but
- * can release the SIREAD locks and conflicts in.
+ * A read-write transaction can only be partially cleared. We
+ * need to keep the SERIALIZABLEXACT but can release the
+ * SIREAD locks and conflicts in.
*/
ReleaseOneSerializableXact(finishedSxact, true, false);
}
@@ -4003,7 +4004,7 @@ CheckForSerializableConflictOut(bool visible, Relation relation,
ereport(ERROR,
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
errmsg("could not serialize access due to read/write dependencies among transactions"),
- errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
+ errdetail_internal("Reason code: Canceled on conflict out to old pivot."),
errhint("The transaction might succeed if retried.")));
}
}
@@ -4507,7 +4508,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(writer)
|| t2->prepareSeqNo <= writer->commitSeqNo)
&& (!SxactIsReadOnly(reader)
- || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
+ || t2->prepareSeqNo <= reader->SeqNo.lastCommitBeforeSnapshot))
{
failure = true;
break;
@@ -4552,7 +4553,7 @@ OnConflict_CheckForSerializationFailure(const SERIALIZABLEXACT *reader,
&& (!SxactIsCommitted(t0)
|| t0->commitSeqNo >= writer->prepareSeqNo)
&& (!SxactIsReadOnly(t0)
- || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
+ || t0->SeqNo.lastCommitBeforeSnapshot >= writer->prepareSeqNo))
{
failure = true;
break;
@@ -4730,10 +4731,10 @@ AtPrepare_PredicateLocks(void)
xactRecord->flags = MySerializableXact->flags;
/*
- * Note that we don't include the list of conflicts in our out in
- * the statefile, because new conflicts can be added even after the
- * transaction prepares. We'll just make a conservative assumption
- * during recovery instead.
+ * Note that we don't include the list of conflicts in our out in the
+ * statefile, because new conflicts can be added even after the
+ * transaction prepares. We'll just make a conservative assumption during
+ * recovery instead.
*/
RegisterTwoPhaseRecord(TWOPHASE_RM_PREDICATELOCK_ID, 0,
@@ -4891,10 +4892,9 @@ predicatelock_twophase_recover(TransactionId xid, uint16 info,
}
/*
- * We don't know whether the transaction had any conflicts or
- * not, so we'll conservatively assume that it had both a
- * conflict in and a conflict out, and represent that with the
- * summary conflict flags.
+ * We don't know whether the transaction had any conflicts or not, so
+ * we'll conservatively assume that it had both a conflict in and a
+ * conflict out, and represent that with the summary conflict flags.
*/
SHMQueueInit(&(sxact->outConflicts));
SHMQueueInit(&(sxact->inConflicts));
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 458cd27a38..327f43bb35 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -70,9 +70,9 @@ PGXACT *MyPgXact = NULL;
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
/* Pointers to shared-memory structures */
-PROC_HDR *ProcGlobal = NULL;
+PROC_HDR *ProcGlobal = NULL;
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
-PGPROC *PreparedXactProcs = NULL;
+PGPROC *PreparedXactProcs = NULL;
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
@@ -222,9 +222,9 @@ InitProcGlobal(void)
/* Common initialization for all PGPROCs, regardless of type. */
/*
- * Set up per-PGPROC semaphore, latch, and backendLock. Prepared
- * xact dummy PGPROCs don't need these though - they're never
- * associated with a real process
+ * Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
+ * dummy PGPROCs don't need these though - they're never associated
+ * with a real process
*/
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
{
@@ -235,12 +235,12 @@ InitProcGlobal(void)
procs[i].pgprocno = i;
/*
- * Newly created PGPROCs for normal backends or for autovacuum must
- * be queued up on the appropriate free list. Because there can only
- * ever be a small, fixed number of auxiliary processes, no free
- * list is used in that case; InitAuxiliaryProcess() instead uses a
- * linear search. PGPROCs for prepared transactions are added to a
- * free list by TwoPhaseShmemInit().
+ * Newly created PGPROCs for normal backends or for autovacuum must be
+ * queued up on the appropriate free list. Because there can only
+ * ever be a small, fixed number of auxiliary processes, no free list
+ * is used in that case; InitAuxiliaryProcess() instead uses a linear
+ * search. PGPROCs for prepared transactions are added to a free list
+ * by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
{
@@ -261,8 +261,8 @@ InitProcGlobal(void)
}
/*
- * Save pointers to the blocks of PGPROC structures reserved for
- * auxiliary processes and prepared transactions.
+ * Save pointers to the blocks of PGPROC structures reserved for auxiliary
+ * processes and prepared transactions.
*/
AuxiliaryProcs = &procs[MaxBackends];
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
@@ -340,8 +340,8 @@ InitProcess(void)
MarkPostmasterChildActive();
/*
- * Initialize all fields of MyProc, except for those previously initialized
- * by InitProcGlobal.
+ * Initialize all fields of MyProc, except for those previously
+ * initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -366,7 +366,7 @@ InitProcess(void)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- int i;
+ int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -500,8 +500,8 @@ InitAuxiliaryProcess(void)
SpinLockRelease(ProcStructLock);
/*
- * Initialize all fields of MyProc, except for those previously initialized
- * by InitProcGlobal.
+ * Initialize all fields of MyProc, except for those previously
+ * initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
@@ -521,7 +521,7 @@ InitAuxiliaryProcess(void)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- int i;
+ int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -751,7 +751,7 @@ ProcKill(int code, Datum arg)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- int i;
+ int i;
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
@@ -1031,8 +1031,8 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
/*
* Also, now that we will successfully clean up after an ereport, it's
* safe to check to see if there's a buffer pin deadlock against the
- * Startup process. Of course, that's only necessary if we're doing
- * Hot Standby and are not the Startup process ourselves.
+ * Startup process. Of course, that's only necessary if we're doing Hot
+ * Standby and are not the Startup process ourselves.
*/
if (RecoveryInProgress() && !InRecovery)
CheckRecoveryConflictDeadlock();
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index aa99f495f6..bc8d89f8c1 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -20,7 +20,7 @@
#include "storage/s_lock.h"
-slock_t dummy_spinlock;
+slock_t dummy_spinlock;
static int spins_per_delay = DEFAULT_SPINS_PER_DELAY;
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
index eeb20fdf63..e5dec9d2a3 100644
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -325,7 +325,7 @@ mdcreate(SMgrRelation reln, ForkNumber forkNum, bool isRedo)
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
- * relfilenode number from being recycled. Also, we do not carefully
+ * relfilenode number from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
@@ -767,9 +767,10 @@ mdnblocks(SMgrRelation reln, ForkNumber forknum)
* NOTE: this assumption could only be wrong if another backend has
* truncated the relation. We rely on higher code levels to handle that
* scenario by closing and re-opening the md fd, which is handled via
- * relcache flush. (Since the checkpointer doesn't participate in relcache
- * flush, it could have segment chain entries for inactive segments;
- * that's OK because the checkpointer never needs to compute relation size.)
+ * relcache flush. (Since the checkpointer doesn't participate in
+ * relcache flush, it could have segment chain entries for inactive
+ * segments; that's OK because the checkpointer never needs to compute
+ * relation size.)
*/
while (v->mdfd_chain != NULL)
{
@@ -1072,12 +1073,13 @@ mdsync(void)
* say "but an unreferenced SMgrRelation is still a leak!" Not
* really, because the only case in which a checkpoint is done
* by a process that isn't about to shut down is in the
- * checkpointer, and it will periodically do smgrcloseall(). This
- * fact justifies our not closing the reln in the success path
- * either, which is a good thing since in non-checkpointer cases
- * we couldn't safely do that.) Furthermore, in many cases
- * the relation will have been dirtied through this same smgr
- * relation, and so we can save a file open/close cycle.
+ * checkpointer, and it will periodically do smgrcloseall().
+ * This fact justifies our not closing the reln in the success
+ * path either, which is a good thing since in
+ * non-checkpointer cases we couldn't safely do that.)
+ * Furthermore, in many cases the relation will have been
+ * dirtied through this same smgr relation, and so we can save
+ * a file open/close cycle.
*/
reln = smgropen(entry->tag.rnode.node,
entry->tag.rnode.backend);
@@ -1470,8 +1472,8 @@ ForgetRelationFsyncRequests(RelFileNodeBackend rnode, ForkNumber forknum)
pg_usleep(10000L); /* 10 msec seems a good number */
/*
- * Note we don't wait for the checkpointer to actually absorb the revoke
- * message; see mdsync() for the implications.
+ * Note we don't wait for the checkpointer to actually absorb the
+ * revoke message; see mdsync() for the implications.
*/
}
}
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 60eb81f774..407942ace4 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -405,8 +405,8 @@ smgrdounlinkfork(SMgrRelation reln, ForkNumber forknum, bool isRedo)
(*(smgrsw[which].smgr_close)) (reln, forknum);
/*
- * Get rid of any remaining buffers for the fork. bufmgr will just
- * drop them without bothering to write the contents.
+ * Get rid of any remaining buffers for the fork. bufmgr will just drop
+ * them without bothering to write the contents.
*/
DropRelFileNodeBuffers(rnode, forknum, 0);
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 89de154bc6..51b6df54f4 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -350,9 +350,9 @@ SocketBackend(StringInfo inBuf)
else
{
/*
- * Can't send DEBUG log messages to client at this point.
- * Since we're disconnecting right away, we don't need to
- * restore whereToSendOutput.
+ * Can't send DEBUG log messages to client at this point. Since
+ * we're disconnecting right away, we don't need to restore
+ * whereToSendOutput.
*/
whereToSendOutput = DestNone;
ereport(DEBUG1,
@@ -393,7 +393,7 @@ SocketBackend(StringInfo inBuf)
whereToSendOutput = DestNone;
ereport(DEBUG1,
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
}
return EOF;
}
@@ -999,12 +999,12 @@ exec_simple_query(const char *query_string)
/*
* Start the portal.
- *
- * If we took a snapshot for parsing/planning, the portal may be
- * able to reuse it for the execution phase. Currently, this will only
+ *
+ * If we took a snapshot for parsing/planning, the portal may be able
+ * to reuse it for the execution phase. Currently, this will only
* happen in PORTAL_ONE_SELECT mode. But even if PortalStart doesn't
- * end up being able to do this, keeping the parse/plan snapshot around
- * until after we start the portal doesn't cost much.
+ * end up being able to do this, keeping the parse/plan snapshot
+ * around until after we start the portal doesn't cost much.
*/
PortalStart(portal, NULL, 0, snapshot_set);
@@ -1263,8 +1263,8 @@ exec_parse_message(const char *query_string, /* string to execute */
errdetail_abort()));
/*
- * Create the CachedPlanSource before we do parse analysis, since
- * it needs to see the unmodified raw parse tree.
+ * Create the CachedPlanSource before we do parse analysis, since it
+ * needs to see the unmodified raw parse tree.
*/
psrc = CreateCachedPlan(raw_parse_tree, query_string, commandTag);
@@ -1325,8 +1325,8 @@ exec_parse_message(const char *query_string, /* string to execute */
/*
* CachedPlanSource must be a direct child of MessageContext before we
* reparent unnamed_stmt_context under it, else we have a disconnected
- * circular subgraph. Klugy, but less so than flipping contexts even
- * more above.
+ * circular subgraph. Klugy, but less so than flipping contexts even more
+ * above.
*/
if (unnamed_stmt_context)
MemoryContextSetParent(psrc->context, MessageContext);
@@ -1549,9 +1549,9 @@ exec_bind_message(StringInfo input_message)
/*
* Set a snapshot if we have parameters to fetch (since the input
* functions might need it) or the query isn't a utility command (and
- * hence could require redoing parse analysis and planning). We keep
- * the snapshot active till we're done, so that plancache.c doesn't have
- * to take new ones.
+ * hence could require redoing parse analysis and planning). We keep the
+ * snapshot active till we're done, so that plancache.c doesn't have to
+ * take new ones.
*/
if (numParams > 0 || analyze_requires_snapshot(psrc->raw_parse_tree))
{
@@ -1687,8 +1687,8 @@ exec_bind_message(StringInfo input_message)
params->params[paramno].isnull = isNull;
/*
- * We mark the params as CONST. This ensures that any custom
- * plan makes full use of the parameter values.
+ * We mark the params as CONST. This ensures that any custom plan
+ * makes full use of the parameter values.
*/
params->params[paramno].pflags = PARAM_FLAG_CONST;
params->params[paramno].ptype = ptype;
@@ -1736,9 +1736,9 @@ exec_bind_message(StringInfo input_message)
/*
* And we're ready to start portal execution.
*
- * If we took a snapshot for parsing/planning, we'll try to reuse it
- * for query execution (currently, reuse will only occur if
- * PORTAL_ONE_SELECT mode is chosen).
+ * If we took a snapshot for parsing/planning, we'll try to reuse it for
+ * query execution (currently, reuse will only occur if PORTAL_ONE_SELECT
+ * mode is chosen).
*/
PortalStart(portal, params, 0, snapshot_set);
@@ -2601,7 +2601,7 @@ die(SIGNAL_ARGS)
/* bump holdoff count to make ProcessInterrupts() a no-op */
/* until we are done getting ready for it */
InterruptHoldoffCount++;
- LockErrorCleanup(); /* prevent CheckDeadLock from running */
+ LockErrorCleanup(); /* prevent CheckDeadLock from running */
DisableNotifyInterrupt();
DisableCatchupInterrupt();
InterruptHoldoffCount--;
@@ -2643,7 +2643,7 @@ StatementCancelHandler(SIGNAL_ARGS)
/* bump holdoff count to make ProcessInterrupts() a no-op */
/* until we are done getting ready for it */
InterruptHoldoffCount++;
- LockErrorCleanup(); /* prevent CheckDeadLock from running */
+ LockErrorCleanup(); /* prevent CheckDeadLock from running */
DisableNotifyInterrupt();
DisableCatchupInterrupt();
InterruptHoldoffCount--;
@@ -2802,7 +2802,7 @@ RecoveryConflictInterrupt(ProcSignalReason reason)
/* bump holdoff count to make ProcessInterrupts() a no-op */
/* until we are done getting ready for it */
InterruptHoldoffCount++;
- LockErrorCleanup(); /* prevent CheckDeadLock from running */
+ LockErrorCleanup(); /* prevent CheckDeadLock from running */
DisableNotifyInterrupt();
DisableCatchupInterrupt();
InterruptHoldoffCount--;
@@ -3269,9 +3269,12 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx)
}
#ifdef HAVE_INT_OPTERR
- /* Turn this off because it's either printed to stderr and not the log
- * where we'd want it, or argv[0] is now "--single", which would make for a
- * weird error message. We print our own error message below. */
+
+ /*
+ * Turn this off because it's either printed to stderr and not the log
+ * where we'd want it, or argv[0] is now "--single", which would make for
+ * a weird error message. We print our own error message below.
+ */
opterr = 0;
#endif
@@ -3471,7 +3474,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx)
if (IsUnderPostmaster)
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid command-line argument for server process: %s", argv[optind]),
+ errmsg("invalid command-line argument for server process: %s", argv[optind]),
errhint("Try \"%s --help\" for more information.", progname)));
else
ereport(FATAL,
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 89f78f2e82..8b73858300 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -82,8 +82,8 @@ CheckRelationOwnership(RangeVar *rel, bool noCatalogs)
* XXX: This is unsafe in the presence of concurrent DDL, since it is
* called before acquiring any lock on the target relation. However,
* locking the target relation (especially using something like
- * AccessExclusiveLock) before verifying that the user has permissions
- * is not appealing either.
+ * AccessExclusiveLock) before verifying that the user has permissions is
+ * not appealing either.
*/
relOid = RangeVarGetRelid(rel, NoLock, false);
@@ -634,7 +634,7 @@ standard_ProcessUtility(Node *parsetree,
case OBJECT_INDEX:
if (((DropStmt *) parsetree)->concurrent)
PreventTransactionChain(isTopLevel,
- "DROP INDEX CONCURRENTLY");
+ "DROP INDEX CONCURRENTLY");
/* fall through */
case OBJECT_TABLE:
@@ -712,7 +712,7 @@ standard_ProcessUtility(Node *parsetree,
LOCKMODE lockmode;
/*
- * Figure out lock mode, and acquire lock. This also does
+ * Figure out lock mode, and acquire lock. This also does
* basic permissions checks, so that we won't wait for a lock
* on (for example) a relation on which we have no
* permissions.
@@ -753,8 +753,8 @@ standard_ProcessUtility(Node *parsetree,
}
else
ereport(NOTICE,
- (errmsg("relation \"%s\" does not exist, skipping",
- atstmt->relation->relname)));
+ (errmsg("relation \"%s\" does not exist, skipping",
+ atstmt->relation->relname)));
}
break;
diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c
index 37cf575a85..7e641ef64f 100644
--- a/src/backend/tsearch/dict_thesaurus.c
+++ b/src/backend/tsearch/dict_thesaurus.c
@@ -372,8 +372,8 @@ cmpLexemeQ(const void *a, const void *b)
static int
cmpTheLexeme(const void *a, const void *b)
{
- const TheLexeme *la = (const TheLexeme *) a;
- const TheLexeme *lb = (const TheLexeme *) b;
+ const TheLexeme *la = (const TheLexeme *) a;
+ const TheLexeme *lb = (const TheLexeme *) b;
int res;
if ((res = cmpLexeme(la, lb)) != 0)
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
index 2fe40ed973..449aa6a0a5 100644
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -148,12 +148,12 @@ static char *VoidString = "";
static int
cmpspell(const void *s1, const void *s2)
{
- return (strcmp((*(SPELL * const *) s1)->word, (*(SPELL * const *) s2)->word));
+ return (strcmp((*(SPELL *const *) s1)->word, (*(SPELL *const *) s2)->word));
}
static int
cmpspellaffix(const void *s1, const void *s2)
{
- return (strncmp((*(SPELL * const *) s1)->p.flag, (*(SPELL * const *) s2)->p.flag, MAXFLAGLEN));
+ return (strncmp((*(SPELL *const *) s1)->p.flag, (*(SPELL *const *) s2)->p.flag, MAXFLAGLEN));
}
static char *
diff --git a/src/backend/tsearch/to_tsany.c b/src/backend/tsearch/to_tsany.c
index da9ae8d5ba..bb886ee584 100644
--- a/src/backend/tsearch/to_tsany.c
+++ b/src/backend/tsearch/to_tsany.c
@@ -33,8 +33,8 @@ compareWORD(const void *a, const void *b)
int res;
res = tsCompareString(
- ((const ParsedWord *) a)->word, ((const ParsedWord *) a)->len,
- ((const ParsedWord *) b)->word, ((const ParsedWord *) b)->len,
+ ((const ParsedWord *) a)->word, ((const ParsedWord *) a)->len,
+ ((const ParsedWord *) b)->word, ((const ParsedWord *) b)->len,
false);
if (res == 0)
diff --git a/src/backend/tsearch/ts_utils.c b/src/backend/tsearch/ts_utils.c
index 582afde167..6a4888e5f4 100644
--- a/src/backend/tsearch/ts_utils.c
+++ b/src/backend/tsearch/ts_utils.c
@@ -62,7 +62,7 @@ get_tsearch_config_filename(const char *basename,
static int
comparestr(const void *a, const void *b)
{
- return strcmp(*(char * const *) a, *(char * const *) b);
+ return strcmp(*(char *const *) a, *(char *const *) b);
}
/*
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index a8bf2bfffb..77322a115f 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -835,15 +835,15 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
Datum
acldefault_sql(PG_FUNCTION_ARGS)
{
- char objtypec = PG_GETARG_CHAR(0);
- Oid owner = PG_GETARG_OID(1);
+ char objtypec = PG_GETARG_CHAR(0);
+ Oid owner = PG_GETARG_OID(1);
GrantObjectType objtype = 0;
switch (objtypec)
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index bc4ebd2074..1b7d46f858 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -172,7 +172,7 @@ scalararraysel_containment(PlannerInfo *root,
selec = mcelem_array_contain_overlap_selec(values, nvalues,
numbers, nnumbers,
&constval, 1,
- OID_ARRAY_CONTAINS_OP,
+ OID_ARRAY_CONTAINS_OP,
cmpfunc);
else
selec = mcelem_array_contained_selec(values, nvalues,
@@ -193,7 +193,7 @@ scalararraysel_containment(PlannerInfo *root,
selec = mcelem_array_contain_overlap_selec(NULL, 0,
NULL, 0,
&constval, 1,
- OID_ARRAY_CONTAINS_OP,
+ OID_ARRAY_CONTAINS_OP,
cmpfunc);
else
selec = mcelem_array_contained_selec(NULL, 0,
@@ -285,8 +285,8 @@ arraycontsel(PG_FUNCTION_ARGS)
}
/*
- * If var is on the right, commute the operator, so that we can assume
- * the var is on the left in what follows.
+ * If var is on the right, commute the operator, so that we can assume the
+ * var is on the left in what follows.
*/
if (!varonleft)
{
@@ -451,7 +451,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
float4 *hist, int nhist,
Oid operator, FmgrInfo *cmpfunc)
{
- Selectivity selec;
+ Selectivity selec;
int num_elems;
Datum *elem_values;
bool *elem_nulls;
@@ -500,7 +500,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
if (operator == OID_ARRAY_CONTAINS_OP || operator == OID_ARRAY_OVERLAP_OP)
selec = mcelem_array_contain_overlap_selec(mcelem, nmcelem,
numbers, nnumbers,
- elem_values, nonnull_nitems,
+ elem_values, nonnull_nitems,
operator, cmpfunc);
else if (operator == OID_ARRAY_CONTAINED_OP)
selec = mcelem_array_contained_selec(mcelem, nmcelem,
@@ -626,7 +626,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
else
{
if (cmp == 0)
- match = true; /* mcelem is found */
+ match = true; /* mcelem is found */
break;
}
}
@@ -687,7 +687,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -806,7 +806,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
else
{
if (cmp == 0)
- match = true; /* mcelem is found */
+ match = true; /* mcelem is found */
break;
}
}
@@ -854,7 +854,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -866,7 +866,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -876,11 +876,11 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
- double b = (double) nmcelem;
- int n;
+ double b = (double) nmcelem;
+ int n;
n = (int) ((sqrt(b * b + 4 * EFFORT * b) - b) / 2);
@@ -891,9 +891,9 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
}
/*
- * Calculate probabilities of each distinct element count for both
- * mcelems and constant elements. At this point, assume independent
- * element occurrence.
+ * Calculate probabilities of each distinct element count for both mcelems
+ * and constant elements. At this point, assume independent element
+ * occurrence.
*/
dist = calc_distr(elem_selec, unique_nitems, unique_nitems, 0.0f);
mcelem_dist = calc_distr(numbers, nmcelem, unique_nitems, rest);
@@ -906,8 +906,8 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
{
/*
* mult * dist[i] / mcelem_dist[i] gives us probability of qual
- * matching from assumption of independent element occurrence with
- * the condition that distinct element count = i.
+ * matching from assumption of independent element occurrence with the
+ * condition that distinct element count = i.
*/
if (mcelem_dist[i] > 0)
selec += hist_part[i] * mult * dist[i] / mcelem_dist[i];
@@ -951,7 +951,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1018,7 +1018,7 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
* probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index fa79d9fa6b..604b86ca64 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -42,9 +42,9 @@ typedef struct
char typalign;
/*
- * Lookup data for element type's comparison and hash functions (these
- * are in the type's typcache entry, which we expect to remain valid
- * over the lifespan of the ANALYZE run)
+ * Lookup data for element type's comparison and hash functions (these are
+ * in the type's typcache entry, which we expect to remain valid over the
+ * lifespan of the ANALYZE run)
*/
FmgrInfo *cmp;
FmgrInfo *hash;
@@ -149,8 +149,8 @@ array_typanalyze(PG_FUNCTION_ARGS)
stats->extra_data = extra_data;
/*
- * Note we leave stats->minrows set as std_typanalyze set it. Should
- * it be increased for array analysis purposes?
+ * Note we leave stats->minrows set as std_typanalyze set it. Should it
+ * be increased for array analysis purposes?
*/
PG_RETURN_BOOL(true);
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -242,8 +242,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Invoke analyze.c's standard analysis function to create scalar-style
- * stats for the column. It will expect its own extra_data pointer,
- * so temporarily install that.
+ * stats for the column. It will expect its own extra_data pointer, so
+ * temporarily install that.
*/
stats->extra_data = extra_data->std_extra_data;
(*extra_data->std_compute_stats) (stats, fetchfunc, samplerows, totalrows);
@@ -373,8 +373,8 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* The element value is already on the tracking list */
/*
- * The operators we assist ignore duplicate array elements,
- * so count a given distinct element only once per array.
+ * The operators we assist ignore duplicate array elements, so
+ * count a given distinct element only once per array.
*/
if (item->last_container == array_no)
continue;
@@ -387,11 +387,11 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/* Initialize new tracking list element */
/*
- * If element type is pass-by-reference, we must copy it
- * into palloc'd space, so that we can release the array
- * below. (We do this so that the space needed for element
- * values is limited by the size of the hashtable; if we
- * kept all the array values around, it could be much more.)
+ * If element type is pass-by-reference, we must copy it into
+ * palloc'd space, so that we can release the array below.
+ * (We do this so that the space needed for element values is
+ * limited by the size of the hashtable; if we kept all the
+ * array values around, it could be much more.)
*/
item->key = datumCopy(elem_value,
extra_data->typbyval,
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
@@ -655,7 +655,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
frac += (int64) sorted_count_items[j]->frequency * (num_hist - 1);
}
hist[i] = sorted_count_items[j]->count;
- frac -= delta; /* update y for upcoming i increment */
+ frac -= delta; /* update y for upcoming i increment */
}
Assert(j == count_items_count - 1);
@@ -775,8 +775,8 @@ trackitem_compare_element(const void *e1, const void *e2)
static int
countitem_compare_count(const void *e1, const void *e2)
{
- const DECountItem * const *t1 = (const DECountItem * const *) e1;
- const DECountItem * const *t2 = (const DECountItem * const *) e2;
+ const DECountItem *const * t1 = (const DECountItem *const *) e1;
+ const DECountItem *const * t2 = (const DECountItem *const *) e2;
if ((*t1)->count < (*t2)->count)
return -1;
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 4a2d413ba2..82551c5f30 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -133,7 +133,7 @@ cash_in(PG_FUNCTION_ARGS)
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
- else /* ssymbol should not equal dsymbol */
+ else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
psymbol = (*lconvert->positive_sign != '\0') ? lconvert->positive_sign : "+";
@@ -301,7 +301,7 @@ cash_out(PG_FUNCTION_ARGS)
dsymbol = '.';
if (*lconvert->mon_thousands_sep != '\0')
ssymbol = lconvert->mon_thousands_sep;
- else /* ssymbol should not equal dsymbol */
+ else /* ssymbol should not equal dsymbol */
ssymbol = (dsymbol != ',') ? "," : ".";
csymbol = (*lconvert->currency_symbol != '\0') ? lconvert->currency_symbol : "$";
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 0fc187e0d6..6e29ebb784 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -337,7 +337,7 @@ date_fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
date_sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = date_fastcmp;
PG_RETURN_VOID();
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index d5d34da552..1c2c39b2e2 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -4170,7 +4170,7 @@ TemporalTransform(int32 max_precis, Node *node)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_precis = exprTypmod(source);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index fd19de72cb..2ccdc0cee6 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -555,6 +555,7 @@ static char *
numeric_to_cstring(Numeric n)
{
Datum d = NumericGetDatum(n);
+
return DatumGetCString(DirectFunctionCall1(numeric_out, d));
}
@@ -562,6 +563,7 @@ static Numeric
int64_to_numeric(int64 v)
{
Datum d = Int64GetDatum(v);
+
return DatumGetNumeric(DirectFunctionCall1(int8_numeric, d));
}
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index ca0042a176..b7ce9357f4 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -217,7 +217,7 @@ float4in(PG_FUNCTION_ARGS)
/* did we not see anything that looks like a double? */
if (endptr == num || errno != 0)
{
- int save_errno = errno;
+ int save_errno = errno;
/*
* C99 requires that strtod() accept NaN and [-]Infinity, but not all
@@ -244,9 +244,9 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try
- * to detect whether it's a "real" out-of-range condition by
- * checking to see if the result is zero or huge.
+ * precision). We'd prefer not to throw error for that, so try to
+ * detect whether it's a "real" out-of-range condition by checking
+ * to see if the result is zero or huge.
*/
if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL)
ereport(ERROR,
@@ -422,7 +422,7 @@ float8in(PG_FUNCTION_ARGS)
/* did we not see anything that looks like a double? */
if (endptr == num || errno != 0)
{
- int save_errno = errno;
+ int save_errno = errno;
/*
* C99 requires that strtod() accept NaN and [-]Infinity, but not all
@@ -449,15 +449,15 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try
- * to detect whether it's a "real" out-of-range condition by
- * checking to see if the result is zero or huge.
+ * precision). We'd prefer not to throw error for that, so try to
+ * detect whether it's a "real" out-of-range condition by checking
+ * to see if the result is zero or huge.
*/
if (val == 0.0 || val >= HUGE_VAL || val <= -HUGE_VAL)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
}
else
ereport(ERROR,
@@ -973,7 +973,7 @@ btfloat4fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btfloat4sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btfloat4fastcmp;
PG_RETURN_VOID();
@@ -1087,7 +1087,7 @@ btfloat8fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
btfloat8sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = btfloat8fastcmp;
PG_RETURN_VOID();
@@ -2750,7 +2750,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
if (isnan(operand) || isnan(bound1) || isnan(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
+ errmsg("operand, lower bound, and upper bound cannot be NaN")));
/* Note that we allow "operand" to be infinite */
if (isinf(bound1) || isinf(bound2))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index d848739d4a..765c6aa8d5 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1987,8 +1987,8 @@ static int
adjust_partial_year_to_2020(int year)
{
/*
- * Adjust all dates toward 2020; this is effectively what happens
- * when we assume '70' is 1970 and '69' is 2069.
+ * Adjust all dates toward 2020; this is effectively what happens when we
+ * assume '70' is 1970 and '69' is 2069.
*/
/* Force 0-69 into the 2000's */
if (year < 70)
@@ -4485,7 +4485,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
*/
if (Np->last_relevant && Np->Num->zero_end > Np->num_pre)
{
- char *last_zero;
+ char *last_zero;
last_zero = Np->number + (Np->Num->zero_end - Np->num_pre);
if (Np->last_relevant < last_zero)
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index 66cdacecb5..9064eaf64b 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -30,7 +30,8 @@ static const char rcsid[] = "Id: inet_net_pton.c,v 1.4.2.3 2004/03/17 00:40:11 m
#include <assert.h>
#include <ctype.h>
-#include "utils/builtins.h" /* pgrminclude ignore */ /* needed on some platforms */
+#include "utils/builtins.h" /* pgrminclude ignore */ /* needed on some
+ * platforms */
#include "utils/inet.h"
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 61ae62eb8a..e494630d60 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -42,7 +42,7 @@ typedef struct
char *input;
char *token_start;
char *token_terminator;
- JsonValueType token_type;
+ JsonValueType token_type;
int line_number;
char *line_start;
} JsonLexContext;
@@ -60,7 +60,7 @@ typedef enum
typedef struct JsonParseStack
{
- JsonParseState state;
+ JsonParseState state;
} JsonParseStack;
typedef enum
@@ -80,9 +80,9 @@ static void report_invalid_token(JsonLexContext *lex);
static char *extract_mb_char(char *s);
static void composite_to_json(Datum composite, StringInfo result, bool use_line_feeds);
static void array_dim_to_json(StringInfo result, int dim, int ndims, int *dims,
- Datum *vals, bool *nulls, int *valcount,
- TYPCATEGORY tcategory, Oid typoutputfunc,
- bool use_line_feeds);
+ Datum *vals, bool *nulls, int *valcount,
+ TYPCATEGORY tcategory, Oid typoutputfunc,
+ bool use_line_feeds);
static void array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds);
/* fake type category for JSON so we can distinguish it in datum_to_json */
@@ -95,7 +95,7 @@ static void array_to_json_internal(Datum array, StringInfo result, bool use_line
Datum
json_in(PG_FUNCTION_ARGS)
{
- char *text = PG_GETARG_CSTRING(0);
+ char *text = PG_GETARG_CSTRING(0);
json_validate_cstring(text);
@@ -108,7 +108,7 @@ json_in(PG_FUNCTION_ARGS)
Datum
json_out(PG_FUNCTION_ARGS)
{
- Datum txt = PG_GETARG_DATUM(0);
+ Datum txt = PG_GETARG_DATUM(0);
PG_RETURN_CSTRING(TextDatumGetCString(txt));
}
@@ -120,7 +120,7 @@ Datum
json_send(PG_FUNCTION_ARGS)
{
StringInfoData buf;
- text *t = PG_GETARG_TEXT_PP(0);
+ text *t = PG_GETARG_TEXT_PP(0);
pq_begintypsend(&buf);
pq_sendtext(&buf, VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t));
@@ -163,10 +163,10 @@ json_recv(PG_FUNCTION_ARGS)
static void
json_validate_cstring(char *input)
{
- JsonLexContext lex;
+ JsonLexContext lex;
JsonParseStack *stack,
- *stacktop;
- int stacksize;
+ *stacktop;
+ int stacksize;
/* Set up lexing context. */
lex.input = input;
@@ -183,7 +183,7 @@ json_validate_cstring(char *input)
/* Main parsing loop. */
for (;;)
{
- JsonStackOp op;
+ JsonStackOp op;
/* Fetch next token. */
json_lex(&lex);
@@ -213,7 +213,7 @@ redo:
else if (lex.token_start[0] == ']')
op = JSON_STACKOP_POP;
else if (lex.token_start[0] == '['
- || lex.token_start[0] == '{')
+ || lex.token_start[0] == '{')
{
stack->state = JSON_PARSE_ARRAY_NEXT;
op = JSON_STACKOP_PUSH_WITH_PUSHBACK;
@@ -235,7 +235,7 @@ redo:
if (lex.token_type == JSON_VALUE_STRING)
stack->state = JSON_PARSE_OBJECT_LABEL;
else if (lex.token_type == JSON_VALUE_INVALID
- && lex.token_start[0] == '}')
+ && lex.token_start[0] == '}')
op = JSON_STACKOP_POP;
else
report_parse_error(stack, &lex);
@@ -268,7 +268,7 @@ redo:
break;
default:
elog(ERROR, "unexpected json parse state: %d",
- (int) stack->state);
+ (int) stack->state);
}
/* Push or pop the stack, if needed. */
@@ -279,7 +279,8 @@ redo:
++stack;
if (stack >= &stacktop[stacksize])
{
- int stackoffset = stack - stacktop;
+ int stackoffset = stack - stacktop;
+
stacksize = stacksize + 32;
stacktop = repalloc(stacktop,
sizeof(JsonParseStack) * stacksize);
@@ -362,19 +363,19 @@ json_lex(JsonLexContext *lex)
}
else
{
- char *p;
+ char *p;
/*
- * We're not dealing with a string, number, legal punctuation mark,
- * or end of string. The only legal tokens we might find here are
- * true, false, and null, but for error reporting purposes we scan
- * until we see a non-alphanumeric character. That way, we can report
- * the whole word as an unexpected token, rather than just some
+ * We're not dealing with a string, number, legal punctuation mark, or
+ * end of string. The only legal tokens we might find here are true,
+ * false, and null, but for error reporting purposes we scan until we
+ * see a non-alphanumeric character. That way, we can report the
+ * whole word as an unexpected token, rather than just some
* unintuitive prefix thereof.
*/
- for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
- || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p);
- ++p)
+ for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
+ || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p);
+ ++p)
;
/*
@@ -431,7 +432,7 @@ json_lex_string(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: Character with value \"0x%02x\" must be escaped.",
- lex->line_number, (unsigned char) *s)));
+ lex->line_number, (unsigned char) *s)));
}
else if (*s == '\\')
{
@@ -444,8 +445,8 @@ json_lex_string(JsonLexContext *lex)
}
else if (*s == 'u')
{
- int i;
- int ch = 0;
+ int i;
+ int ch = 0;
for (i = 1; i <= 4; ++i)
{
@@ -466,7 +467,7 @@ json_lex_string(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: \"\\u\" must be followed by four hexadecimal digits.",
- lex->line_number)));
+ lex->line_number)));
}
}
@@ -479,8 +480,8 @@ json_lex_string(JsonLexContext *lex)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
- errdetail_internal("line %d: Invalid escape \"\\%s\".",
- lex->line_number, extract_mb_char(s))));
+ errdetail_internal("line %d: Invalid escape \"\\%s\".",
+ lex->line_number, extract_mb_char(s))));
}
}
}
@@ -497,17 +498,17 @@ json_lex_string(JsonLexContext *lex)
* (1) An optional minus sign ('-').
*
* (2) Either a single '0', or a string of one or more digits that does not
- * begin with a '0'.
+ * begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
- * completely, it's not OK to have only the decimal point without
- * any digits afterwards.)
+ * one or more digits. (Note: While this part can be omitted
+ * completely, it's not OK to have only the decimal point without
+ * any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
- * As with the decimal part, if 'e' or 'E' is present, it must be
- * followed by at least one digit.)
+ * followed by '+' or '-', followed by one or more digits. (Note:
+ * As with the decimal part, if 'e' or 'E' is present, it must be
+ * followed by at least one digit.)
*
* The 's' argument to this function points to the ostensible beginning
* of part 2 - i.e. the character after any optional minus sign, and the
@@ -518,8 +519,8 @@ json_lex_string(JsonLexContext *lex)
static void
json_lex_number(JsonLexContext *lex, char *s)
{
- bool error = false;
- char *p;
+ bool error = false;
+ char *p;
/* Part (1): leading sign indicator. */
/* Caller already did this for us; so do nothing. */
@@ -571,7 +572,7 @@ json_lex_number(JsonLexContext *lex, char *s)
/* Check for trailing garbage. */
for (p = s; (*p >= 'a' && *p <= 'z') || (*p >= 'A' && *p <= 'Z')
- || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p); ++p)
+ || (*p >= '0' && *p <= '9') || *p == '_' || IS_HIGHBIT_SET(*p); ++p)
;
lex->token_terminator = p;
if (p > s || error)
@@ -584,17 +585,17 @@ json_lex_number(JsonLexContext *lex, char *s)
static void
report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
{
- char *detail = NULL;
- char *token = NULL;
- int toklen;
+ char *detail = NULL;
+ char *token = NULL;
+ int toklen;
/* Handle case where the input ended prematurely. */
if (lex->token_start == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json: \"%s\"",
- lex->input),
- errdetail_internal("The input string ended unexpectedly.")));
+ lex->input),
+ errdetail_internal("The input string ended unexpectedly.")));
/* Work out the offending token. */
toklen = lex->token_terminator - lex->token_start;
@@ -636,8 +637,8 @@ report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json: \"%s\"",
- lex->input),
- detail ? errdetail_internal(detail, lex->line_number, token) : 0));
+ lex->input),
+ detail ? errdetail_internal(detail, lex->line_number, token) : 0));
}
/*
@@ -646,8 +647,8 @@ report_parse_error(JsonParseStack *stack, JsonLexContext *lex)
static void
report_invalid_token(JsonLexContext *lex)
{
- char *token;
- int toklen;
+ char *token;
+ int toklen;
toklen = lex->token_terminator - lex->token_start;
token = palloc(toklen + 1);
@@ -658,7 +659,7 @@ report_invalid_token(JsonLexContext *lex)
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("invalid input syntax for type json"),
errdetail_internal("line %d: Token \"%s\" is invalid.",
- lex->line_number, token)));
+ lex->line_number, token)));
}
/*
@@ -667,8 +668,8 @@ report_invalid_token(JsonLexContext *lex)
static char *
extract_mb_char(char *s)
{
- char *res;
- int len;
+ char *res;
+ int len;
len = pg_mblen(s);
res = palloc(len + 1);
@@ -687,11 +688,11 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
Oid typoutputfunc)
{
- char *outputstr;
+ char *outputstr;
if (is_null)
{
- appendStringInfoString(result,"null");
+ appendStringInfoString(result, "null");
return;
}
@@ -705,19 +706,20 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
break;
case TYPCATEGORY_BOOLEAN:
if (DatumGetBool(val))
- appendStringInfoString(result,"true");
+ appendStringInfoString(result, "true");
else
- appendStringInfoString(result,"false");
+ appendStringInfoString(result, "false");
break;
case TYPCATEGORY_NUMERIC:
outputstr = OidOutputFunctionCall(typoutputfunc, val);
+
/*
- * Don't call escape_json here if it's a valid JSON
- * number. Numeric output should usually be a valid
- * JSON number and JSON numbers shouldn't be quoted.
- * Quote cases like "Nan" and "Infinity", however.
+ * Don't call escape_json here if it's a valid JSON number.
+ * Numeric output should usually be a valid JSON number and JSON
+ * numbers shouldn't be quoted. Quote cases like "Nan" and
+ * "Infinity", however.
*/
- if (strpbrk(outputstr,NON_NUMERIC_LETTER) == NULL)
+ if (strpbrk(outputstr, NON_NUMERIC_LETTER) == NULL)
appendStringInfoString(result, outputstr);
else
escape_json(result, outputstr);
@@ -742,13 +744,13 @@ datum_to_json(Datum val, bool is_null, StringInfo result, TYPCATEGORY tcategory,
* ourselves recursively to process the next dimension.
*/
static void
-array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
- bool *nulls, int * valcount, TYPCATEGORY tcategory,
+array_dim_to_json(StringInfo result, int dim, int ndims, int *dims, Datum *vals,
+ bool *nulls, int *valcount, TYPCATEGORY tcategory,
Oid typoutputfunc, bool use_line_feeds)
{
- int i;
- char *sep;
+ int i;
+ char *sep;
Assert(dim < ndims);
@@ -759,7 +761,7 @@ array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
for (i = 1; i <= dims[dim]; i++)
{
if (i > 1)
- appendStringInfoString(result,sep);
+ appendStringInfoString(result, sep);
if (dim + 1 == ndims)
{
@@ -770,10 +772,10 @@ array_dim_to_json(StringInfo result, int dim, int ndims,int * dims, Datum *vals,
else
{
/*
- * Do we want line feeds on inner dimensions of arrays?
- * For now we'll say no.
+ * Do we want line feeds on inner dimensions of arrays? For now
+ * we'll say no.
*/
- array_dim_to_json(result, dim+1, ndims, dims, vals, nulls,
+ array_dim_to_json(result, dim + 1, ndims, dims, vals, nulls,
valcount, tcategory, typoutputfunc, false);
}
}
@@ -792,9 +794,9 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
int *dim;
int ndim;
int nitems;
- int count = 0;
+ int count = 0;
Datum *elements;
- bool *nulls;
+ bool *nulls;
int16 typlen;
bool typbyval;
@@ -810,7 +812,7 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
if (nitems <= 0)
{
- appendStringInfoString(result,"[]");
+ appendStringInfoString(result, "[]");
return;
}
@@ -842,52 +844,54 @@ array_to_json_internal(Datum array, StringInfo result, bool use_line_feeds)
static void
composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
{
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup, *tuple;
- int i;
- bool needsep = false;
- char *sep;
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup,
+ *tuple;
+ int i;
+ bool needsep = false;
+ char *sep;
sep = use_line_feeds ? ",\n " : ",";
- td = DatumGetHeapTupleHeader(composite);
+ td = DatumGetHeapTupleHeader(composite);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- tmptup.t_data = td;
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ tmptup.t_data = td;
tuple = &tmptup;
- appendStringInfoChar(result,'{');
+ appendStringInfoChar(result, '{');
- for (i = 0; i < tupdesc->natts; i++)
- {
- Datum val, origval;
- bool isnull;
- char *attname;
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ Datum val,
+ origval;
+ bool isnull;
+ char *attname;
TYPCATEGORY tcategory;
Oid typoutput;
bool typisvarlena;
if (tupdesc->attrs[i]->attisdropped)
- continue;
+ continue;
if (needsep)
- appendStringInfoString(result,sep);
+ appendStringInfoString(result, sep);
needsep = true;
- attname = NameStr(tupdesc->attrs[i]->attname);
- escape_json(result,attname);
- appendStringInfoChar(result,':');
+ attname = NameStr(tupdesc->attrs[i]->attname);
+ escape_json(result, attname);
+ appendStringInfoChar(result, ':');
- origval = heap_getattr(tuple, i + 1, tupdesc, &isnull);
+ origval = heap_getattr(tuple, i + 1, tupdesc, &isnull);
if (tupdesc->attrs[i]->atttypid == RECORDARRAYOID)
tcategory = TYPCATEGORY_ARRAY;
@@ -902,10 +906,10 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
&typoutput, &typisvarlena);
/*
- * If we have a toasted datum, forcibly detoast it here to avoid memory
- * leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
- if (typisvarlena && ! isnull)
+ if (typisvarlena && !isnull)
val = PointerGetDatum(PG_DETOAST_DATUM(origval));
else
val = origval;
@@ -917,8 +921,8 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
pfree(DatumGetPointer(val));
}
- appendStringInfoChar(result,'}');
- ReleaseTupleDesc(tupdesc);
+ appendStringInfoChar(result, '}');
+ ReleaseTupleDesc(tupdesc);
}
/*
@@ -927,7 +931,7 @@ composite_to_json(Datum composite, StringInfo result, bool use_line_feeds)
extern Datum
array_to_json(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
+ Datum array = PG_GETARG_DATUM(0);
StringInfo result;
result = makeStringInfo();
@@ -943,8 +947,8 @@ array_to_json(PG_FUNCTION_ARGS)
extern Datum
array_to_json_pretty(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
- bool use_line_feeds = PG_GETARG_BOOL(1);
+ Datum array = PG_GETARG_DATUM(0);
+ bool use_line_feeds = PG_GETARG_BOOL(1);
StringInfo result;
result = makeStringInfo();
@@ -960,7 +964,7 @@ array_to_json_pretty(PG_FUNCTION_ARGS)
extern Datum
row_to_json(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
+ Datum array = PG_GETARG_DATUM(0);
StringInfo result;
result = makeStringInfo();
@@ -976,8 +980,8 @@ row_to_json(PG_FUNCTION_ARGS)
extern Datum
row_to_json_pretty(PG_FUNCTION_ARGS)
{
- Datum array = PG_GETARG_DATUM(0);
- bool use_line_feeds = PG_GETARG_BOOL(1);
+ Datum array = PG_GETARG_DATUM(0);
+ bool use_line_feeds = PG_GETARG_BOOL(1);
StringInfo result;
result = makeStringInfo();
@@ -1031,4 +1035,3 @@ escape_json(StringInfo buf, const char *str)
}
appendStringInfoCharMacro(buf, '\"');
}
-
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index ca1b1db18a..33c5b64f50 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -160,7 +160,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
bool nulls[NUM_LOCK_STATUS_COLUMNS];
HeapTuple tuple;
Datum result;
- LockInstanceData *instance;
+ LockInstanceData *instance;
instance = &(lockData->locks[mystatus->currIdx]);
@@ -375,8 +375,8 @@ pg_lock_status(PG_FUNCTION_ARGS)
nulls[11] = true;
/*
- * Lock mode. Currently all predicate locks are SIReadLocks, which
- * are always held (never waiting) and have no fast path
+ * Lock mode. Currently all predicate locks are SIReadLocks, which are
+ * always held (never waiting) and have no fast path
*/
values[12] = CStringGetTextDatum("SIReadLock");
values[13] = BoolGetDatum(true);
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 958ff54d73..aa9993fa5c 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -247,8 +247,8 @@ hashmacaddr(PG_FUNCTION_ARGS)
Datum
macaddr_not(PG_FUNCTION_ARGS)
{
- macaddr *addr = PG_GETARG_MACADDR_P(0);
- macaddr *result;
+ macaddr *addr = PG_GETARG_MACADDR_P(0);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = ~addr->a;
@@ -263,9 +263,9 @@ macaddr_not(PG_FUNCTION_ARGS)
Datum
macaddr_and(PG_FUNCTION_ARGS)
{
- macaddr *addr1 = PG_GETARG_MACADDR_P(0);
- macaddr *addr2 = PG_GETARG_MACADDR_P(1);
- macaddr *result;
+ macaddr *addr1 = PG_GETARG_MACADDR_P(0);
+ macaddr *addr2 = PG_GETARG_MACADDR_P(1);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = addr1->a & addr2->a;
@@ -280,9 +280,9 @@ macaddr_and(PG_FUNCTION_ARGS)
Datum
macaddr_or(PG_FUNCTION_ARGS)
{
- macaddr *addr1 = PG_GETARG_MACADDR_P(0);
- macaddr *addr2 = PG_GETARG_MACADDR_P(1);
- macaddr *result;
+ macaddr *addr1 = PG_GETARG_MACADDR_P(0);
+ macaddr *addr2 = PG_GETARG_MACADDR_P(1);
+ macaddr *result;
result = (macaddr *) palloc(sizeof(macaddr));
result->a = addr1->a | addr2->a;
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 6bd7d531bb..96e692766b 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -329,14 +329,14 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
Datum
pg_tablespace_location(PG_FUNCTION_ARGS)
{
- Oid tablespaceOid = PG_GETARG_OID(0);
- char sourcepath[MAXPGPATH];
- char targetpath[MAXPGPATH];
- int rllen;
+ Oid tablespaceOid = PG_GETARG_OID(0);
+ char sourcepath[MAXPGPATH];
+ char targetpath[MAXPGPATH];
+ int rllen;
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -350,9 +350,10 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
PG_RETURN_TEXT_P(cstring_to_text(""));
#if defined(HAVE_READLINK) || defined(WIN32)
+
/*
- * Find the location of the tablespace by reading the symbolic link that is
- * in pg_tblspc/<oid>.
+ * Find the location of the tablespace by reading the symbolic link that
+ * is in pg_tblspc/<oid>.
*/
snprintf(sourcepath, sizeof(sourcepath), "pg_tblspc/%u", tablespaceOid);
@@ -510,8 +511,8 @@ pg_typeof(PG_FUNCTION_ARGS)
Datum
pg_collation_for(PG_FUNCTION_ARGS)
{
- Oid typeid;
- Oid collid;
+ Oid typeid;
+ Oid collid;
typeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!typeid)
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 14bbdad93b..68c1f1de3b 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -718,7 +718,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -734,7 +734,7 @@ numeric_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
@@ -748,7 +748,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -1222,7 +1222,7 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
NUMERIC_IS_NAN(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
+ errmsg("operand, lower bound, and upper bound cannot be NaN")));
init_var(&result_var);
init_var(&count_var);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 0920c13cd9..de881bf634 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -224,7 +224,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -578,7 +578,7 @@ strftime_win32(char *dst, size_t dstlen, const wchar_t *format, const struct tm
len = WideCharToMultiByte(CP_UTF8, 0, wbuf, len, dst, dstlen, NULL, NULL);
if (len == 0)
elog(ERROR,
- "could not convert string to UTF-8: error code %lu", GetLastError());
+ "could not convert string to UTF-8: error code %lu", GetLastError());
dst[len] = '\0';
if (encoding != PG_UTF8)
@@ -970,7 +970,7 @@ report_newlocale_failure(const char *localename)
errdetail("The operating system could not find any locale data for the locale name \"%s\".",
localename) : 0)));
}
-#endif /* HAVE_LOCALE_T */
+#endif /* HAVE_LOCALE_T */
/*
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 83d0c22991..7c0705abcc 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1225,8 +1225,8 @@ pg_stat_get_db_stat_reset_time(PG_FUNCTION_ARGS)
Datum
pg_stat_get_db_temp_files(PG_FUNCTION_ARGS)
{
- Oid dbid = PG_GETARG_OID(0);
- int64 result;
+ Oid dbid = PG_GETARG_OID(0);
+ int64 result;
PgStat_StatDBEntry *dbentry;
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
@@ -1241,8 +1241,8 @@ pg_stat_get_db_temp_files(PG_FUNCTION_ARGS)
Datum
pg_stat_get_db_temp_bytes(PG_FUNCTION_ARGS)
{
- Oid dbid = PG_GETARG_OID(0);
- int64 result;
+ Oid dbid = PG_GETARG_OID(0);
+ int64 result;
PgStat_StatDBEntry *dbentry;
if ((dbentry = pgstat_fetch_stat_dbentry(dbid)) == NULL)
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 0994fa77cf..22ceb3c01d 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -54,19 +54,19 @@ typedef struct RangeIOData
static RangeIOData *get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid,
- IOFuncSelector func);
+ IOFuncSelector func);
static char range_parse_flags(const char *flags_str);
static void range_parse(const char *input_str, char *flags, char **lbound_str,
char **ubound_str);
static const char *range_parse_bound(const char *string, const char *ptr,
char **bound_str, bool *infinite);
static char *range_deparse(char flags, const char *lbound_str,
- const char *ubound_str);
+ const char *ubound_str);
static char *range_bound_escape(const char *value);
static bool range_contains_internal(TypeCacheEntry *typcache,
- RangeType *r1, RangeType *r2);
+ RangeType *r1, RangeType *r2);
static bool range_contains_elem_internal(TypeCacheEntry *typcache,
- RangeType *r, Datum val);
+ RangeType *r, Datum val);
static Size datum_compute_size(Size sz, Datum datum, bool typbyval,
char typalign, int16 typlen, char typstorage);
static Pointer datum_write(Pointer ptr, Datum datum, bool typbyval,
@@ -299,10 +299,10 @@ get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid, IOFuncSelector func)
if (cache == NULL || cache->typcache->type_id != rngtypid)
{
- int16 typlen;
- bool typbyval;
- char typalign;
- char typdelim;
+ int16 typlen;
+ bool typbyval;
+ char typalign;
+ char typdelim;
cache = (RangeIOData *) MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
sizeof(RangeIOData));
@@ -326,13 +326,13 @@ get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid, IOFuncSelector func)
if (func == IOFunc_receive)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(cache->typcache->rngelemtype->type_id))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(cache->typcache->rngelemtype->type_id))));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(cache->typcache->rngelemtype->type_id))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(cache->typcache->rngelemtype->type_id))));
}
fmgr_info_cxt(cache->typiofunc, &cache->proc,
fcinfo->flinfo->fn_mcxt);
@@ -397,7 +397,7 @@ range_constructor3(PG_FUNCTION_ARGS)
if (PG_ARGISNULL(2))
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("range constructor flags argument must not be NULL")));
+ errmsg("range constructor flags argument must not be NULL")));
flags = range_parse_flags(text_to_cstring(PG_GETARG_TEXT_P(2)));
@@ -716,9 +716,9 @@ range_adjacent(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(false);
/*
- * Given two ranges A..B and C..D, where B < C, the ranges are adjacent
- * if and only if the range B..C is empty, where inclusivity of these two
- * bounds is inverted compared to the original bounds. For discrete
+ * Given two ranges A..B and C..D, where B < C, the ranges are adjacent if
+ * and only if the range B..C is empty, where inclusivity of these two
+ * bounds is inverted compared to the original bounds. For discrete
* ranges, we have to rely on the canonicalization function to normalize
* B..C to empty if it contains no elements of the subtype. (If there is
* no canonicalization function, it's impossible for such a range to
@@ -920,7 +920,7 @@ range_minus(PG_FUNCTION_ARGS)
if (cmp_l1l2 < 0 && cmp_u1u2 > 0)
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("result of range difference would not be contiguous")));
+ errmsg("result of range difference would not be contiguous")));
if (cmp_l1u2 > 0 || cmp_u1l2 < 0)
PG_RETURN_RANGE(r1);
@@ -1180,11 +1180,11 @@ Datum
range_typanalyze(PG_FUNCTION_ARGS)
{
/*
- * For the moment, just punt and don't analyze range columns. If we
- * get close to release without having a better answer, we could
- * consider letting std_typanalyze do what it can ... but those stats
- * are probably next door to useless for most activity with range
- * columns, so it's not clear it's worth gathering them.
+ * For the moment, just punt and don't analyze range columns. If we get
+ * close to release without having a better answer, we could consider
+ * letting std_typanalyze do what it can ... but those stats are probably
+ * next door to useless for most activity with range columns, so it's not
+ * clear it's worth gathering them.
*/
PG_RETURN_BOOL(false);
}
@@ -1392,7 +1392,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1416,7 +1416,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1753,7 +1753,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -1971,7 +1971,7 @@ range_parse(const char *string, char *flags, char **lbound_str,
}
else if (*ptr == ')')
ptr++;
- else /* must be a comma */
+ else /* must be a comma */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("malformed range literal: \"%s\"",
@@ -2224,7 +2224,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 87f71e6812..16103f854b 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -57,7 +57,7 @@
#define LIMIT_RATIO 0.3
/* Constants for fixed penalty values */
-#define INFINITE_BOUND_PENALTY 2.0
+#define INFINITE_BOUND_PENALTY 2.0
#define CONTAIN_EMPTY_PENALTY 1.0
#define DEFAULT_SUBTYPE_DIFF_PENALTY 1.0
@@ -66,8 +66,8 @@
*/
typedef struct
{
- int index;
- RangeBound bound;
+ int index;
+ RangeBound bound;
} SingleBoundSortItem;
/* place on left or right side of split? */
@@ -83,15 +83,15 @@ typedef enum
typedef struct
{
TypeCacheEntry *typcache; /* typcache for range type */
- bool has_subtype_diff; /* does it have subtype_diff? */
+ bool has_subtype_diff; /* does it have subtype_diff? */
int entries_count; /* total number of entries being split */
/* Information about currently selected split follows */
bool first; /* true if no split was selected yet */
- RangeBound *left_upper; /* upper bound of left interval */
- RangeBound *right_lower; /* lower bound of right interval */
+ RangeBound *left_upper; /* upper bound of left interval */
+ RangeBound *right_lower; /* lower bound of right interval */
float4 ratio; /* split ratio */
float4 overlap; /* overlap between left and right predicate */
@@ -146,8 +146,8 @@ typedef struct
((RangeType *) DatumGetPointer(datumCopy(PointerGetDatum(r), \
false, -1)))
-static RangeType *range_super_union(TypeCacheEntry *typcache, RangeType * r1,
- RangeType * r2);
+static RangeType *range_super_union(TypeCacheEntry *typcache, RangeType *r1,
+ RangeType *r2);
static bool range_gist_consistent_int(FmgrInfo *flinfo,
StrategyNumber strategy, RangeType *key,
Datum query);
@@ -155,19 +155,19 @@ static bool range_gist_consistent_leaf(FmgrInfo *flinfo,
StrategyNumber strategy, RangeType *key,
Datum query);
static void range_gist_fallback_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v);
static void range_gist_class_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v,
- SplitLR *classes_groups);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v,
+ SplitLR *classes_groups);
static void range_gist_single_sorting_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v,
- bool use_upper_bound);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v,
+ bool use_upper_bound);
static void range_gist_double_sorting_split(TypeCacheEntry *typcache,
- GistEntryVector *entryvec,
- GIST_SPLITVEC *v);
+ GistEntryVector *entryvec,
+ GIST_SPLITVEC *v);
static void range_gist_consider_split(ConsiderSplitContext *context,
RangeBound *right_lower, int min_left_count,
RangeBound *left_upper, int max_left_count);
@@ -177,7 +177,7 @@ static int interval_cmp_lower(const void *a, const void *b, void *arg);
static int interval_cmp_upper(const void *a, const void *b, void *arg);
static int common_entry_cmp(const void *i1, const void *i2);
static float8 call_subtype_diff(TypeCacheEntry *typcache,
- Datum val1, Datum val2);
+ Datum val1, Datum val2);
/* GiST query consistency check */
@@ -187,6 +187,7 @@ range_gist_consistent(PG_FUNCTION_ARGS)
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
Datum query = PG_GETARG_DATUM(1);
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
/* Oid subtype = PG_GETARG_OID(3); */
bool *recheck = (bool *) PG_GETARG_POINTER(4);
RangeType *key = DatumGetRangeType(entry->key);
@@ -280,9 +281,9 @@ range_gist_penalty(PG_FUNCTION_ARGS)
range_deserialize(typcache, new, &new_lower, &new_upper, &new_empty);
/*
- * Distinct branches for handling distinct classes of ranges. Note
- * that penalty values only need to be commensurate within the same
- * class of new range.
+ * Distinct branches for handling distinct classes of ranges. Note that
+ * penalty values only need to be commensurate within the same class of
+ * new range.
*/
if (new_empty)
{
@@ -290,9 +291,9 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_empty)
{
/*
- * The best case is to insert it to empty original
- * range. Insertion here means no broadening of original range.
- * Also original range is the most narrow.
+ * The best case is to insert it to empty original range.
+ * Insertion here means no broadening of original range. Also
+ * original range is the most narrow.
*/
*penalty = 0.0;
}
@@ -309,7 +310,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -360,8 +361,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (RangeIsOrContainsEmpty(orig))
{
/*
- * Original range is narrower when it doesn't contain empty ranges.
- * Add additional penalty otherwise.
+ * Original range is narrower when it doesn't contain empty
+ * ranges. Add additional penalty otherwise.
*/
*penalty += CONTAIN_EMPTY_PENALTY;
}
@@ -374,11 +375,11 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_upper.infinite)
{
/*
- * (-inf, +inf) range won't be extended by insertion of
- * (-inf, x) range. It's a less desirable case than insertion
- * to (-inf, y) original range without extension, because in
- * that case original range is narrower. But we can't express
- * that in single float value.
+ * (-inf, +inf) range won't be extended by insertion of (-inf,
+ * x) range. It's a less desirable case than insertion to
+ * (-inf, y) original range without extension, because in that
+ * case original range is narrower. But we can't express that
+ * in single float value.
*/
*penalty = 0.0;
}
@@ -387,8 +388,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (range_cmp_bounds(typcache, &new_upper, &orig_upper) > 0)
{
/*
- * Get extension of original range using subtype_diff.
- * Use constant if subtype_diff unavailable.
+ * Get extension of original range using subtype_diff. Use
+ * constant if subtype_diff unavailable.
*/
if (has_subtype_diff)
*penalty = call_subtype_diff(typcache,
@@ -407,8 +408,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else
{
/*
- * If lower bound of original range is not -inf, then extension
- * of it is infinity.
+ * If lower bound of original range is not -inf, then extension of
+ * it is infinity.
*/
*penalty = get_float4_infinity();
}
@@ -421,11 +422,11 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (orig_lower.infinite)
{
/*
- * (-inf, +inf) range won't be extended by insertion of
- * (x, +inf) range. It's a less desirable case than insertion
- * to (y, +inf) original range without extension, because in
- * that case original range is narrower. But we can't express
- * that in single float value.
+ * (-inf, +inf) range won't be extended by insertion of (x,
+ * +inf) range. It's a less desirable case than insertion to
+ * (y, +inf) original range without extension, because in that
+ * case original range is narrower. But we can't express that
+ * in single float value.
*/
*penalty = 0.0;
}
@@ -434,8 +435,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
if (range_cmp_bounds(typcache, &new_lower, &orig_lower) < 0)
{
/*
- * Get extension of original range using subtype_diff.
- * Use constant if subtype_diff unavailable.
+ * Get extension of original range using subtype_diff. Use
+ * constant if subtype_diff unavailable.
*/
if (has_subtype_diff)
*penalty = call_subtype_diff(typcache,
@@ -454,8 +455,8 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else
{
/*
- * If upper bound of original range is not +inf, then extension
- * of it is infinity.
+ * If upper bound of original range is not +inf, then extension of
+ * it is infinity.
*/
*penalty = get_float4_infinity();
}
@@ -506,7 +507,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -541,7 +542,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
memset(count_in_classes, 0, sizeof(count_in_classes));
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
count_in_classes[get_gist_range_class(range)]++;
}
@@ -597,7 +598,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
* To which side of the split should each class go? Initialize them
* all to go to the left side.
*/
- SplitLR classes_groups[CLS_COUNT];
+ SplitLR classes_groups[CLS_COUNT];
memset(classes_groups, 0, sizeof(classes_groups));
@@ -610,16 +611,18 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
{
/*----------
* Try to split classes in one of two ways:
- * 1) containing infinities - not containing infinities
- * 2) containing empty - not containing empty
+ * 1) containing infinities - not containing infinities
+ * 2) containing empty - not containing empty
*
* Select the way which balances the ranges between left and right
* the best. If split in these ways is not possible, there are at
* most 3 classes, so just separate biggest class.
*----------
*/
- int infCount, nonInfCount;
- int emptyCount, nonEmptyCount;
+ int infCount,
+ nonInfCount;
+ int emptyCount,
+ nonEmptyCount;
nonInfCount =
count_in_classes[CLS_NORMAL] +
@@ -628,7 +631,7 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
infCount = total_count - nonInfCount;
nonEmptyCount =
- count_in_classes[CLS_NORMAL] +
+ count_in_classes[CLS_NORMAL] +
count_in_classes[CLS_LOWER_INF] +
count_in_classes[CLS_UPPER_INF] +
count_in_classes[CLS_LOWER_INF | CLS_UPPER_INF];
@@ -638,21 +641,22 @@ range_gist_picksplit(PG_FUNCTION_ARGS)
(Abs(infCount - nonInfCount) <=
Abs(emptyCount - nonEmptyCount)))
{
- classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
+ classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
classes_groups[CLS_CONTAIN_EMPTY] = SPLIT_RIGHT;
- classes_groups[CLS_EMPTY] = SPLIT_RIGHT;
+ classes_groups[CLS_EMPTY] = SPLIT_RIGHT;
}
else if (emptyCount > 0 && nonEmptyCount > 0)
{
- classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
- classes_groups[CLS_LOWER_INF] = SPLIT_RIGHT;
- classes_groups[CLS_UPPER_INF] = SPLIT_RIGHT;
+ classes_groups[CLS_NORMAL] = SPLIT_RIGHT;
+ classes_groups[CLS_LOWER_INF] = SPLIT_RIGHT;
+ classes_groups[CLS_UPPER_INF] = SPLIT_RIGHT;
classes_groups[CLS_LOWER_INF | CLS_UPPER_INF] = SPLIT_RIGHT;
}
else
{
/*
- * Either total_count == emptyCount or total_count == infCount.
+ * Either total_count == emptyCount or total_count ==
+ * infCount.
*/
classes_groups[biggest_class] = SPLIT_RIGHT;
}
@@ -673,10 +677,10 @@ range_gist_same(PG_FUNCTION_ARGS)
bool *result = (bool *) PG_GETARG_POINTER(2);
/*
- * range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to
- * check that for ourselves. More generally, if the entries have been
- * properly normalized, then unequal flags bytes must mean unequal ranges
- * ... so let's just test all the flag bits at once.
+ * range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
+ * that for ourselves. More generally, if the entries have been properly
+ * normalized, then unequal flags bytes must mean unequal ranges ... so
+ * let's just test all the flag bits at once.
*/
if (range_get_flags(r1) != range_get_flags(r2))
*result = false;
@@ -710,7 +714,7 @@ range_gist_same(PG_FUNCTION_ARGS)
* that *all* unions formed within the GiST index must go through here.
*/
static RangeType *
-range_super_union(TypeCacheEntry *typcache, RangeType * r1, RangeType * r2)
+range_super_union(TypeCacheEntry *typcache, RangeType *r1, RangeType *r2)
{
RangeType *result;
RangeBound lower1,
@@ -862,9 +866,10 @@ range_gist_consistent_int(FmgrInfo *flinfo, StrategyNumber strategy,
proc = range_contains;
break;
case RANGESTRAT_CONTAINED_BY:
+
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
@@ -875,6 +880,7 @@ range_gist_consistent_int(FmgrInfo *flinfo, StrategyNumber strategy,
proc = range_contains_elem;
break;
case RANGESTRAT_EQ:
+
/*
* If query is empty, descend only if the key is or contains any
* empty ranges. Otherwise, descend if key contains query.
@@ -959,9 +965,11 @@ range_gist_fallback_split(TypeCacheEntry *typcache,
GistEntryVector *entryvec,
GIST_SPLITVEC *v)
{
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff, split_idx;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff,
+ split_idx;
maxoff = entryvec->n - 1;
/* Split entries before this to left page, after to right: */
@@ -971,7 +979,7 @@ range_gist_fallback_split(TypeCacheEntry *typcache,
v->spl_nright = 0;
for (i = FirstOffsetNumber; i <= maxoff; i++)
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
if (i < split_idx)
PLACE_LEFT(range, i);
@@ -996,9 +1004,10 @@ range_gist_class_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v,
SplitLR *classes_groups)
{
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff;
maxoff = entryvec->n - 1;
@@ -1006,8 +1015,8 @@ range_gist_class_split(TypeCacheEntry *typcache,
v->spl_nright = 0;
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- int class;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ int class;
/* Get class of range */
class = get_gist_range_class(range);
@@ -1038,10 +1047,12 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v,
bool use_upper_bound)
{
- SingleBoundSortItem *sortItems;
- RangeType *left_range = NULL;
- RangeType *right_range = NULL;
- OffsetNumber i, maxoff, split_idx;
+ SingleBoundSortItem *sortItems;
+ RangeType *left_range = NULL;
+ RangeType *right_range = NULL;
+ OffsetNumber i,
+ maxoff,
+ split_idx;
maxoff = entryvec->n - 1;
@@ -1053,9 +1064,9 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
*/
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- RangeBound bound2;
- bool empty;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ RangeBound bound2;
+ bool empty;
sortItems[i - 1].index = i;
/* Put appropriate bound into array */
@@ -1078,8 +1089,8 @@ range_gist_single_sorting_split(TypeCacheEntry *typcache,
for (i = 0; i < maxoff; i++)
{
- int idx = sortItems[i].index;
- RangeType *range = DatumGetRangeType(entryvec->vector[idx].key);
+ int idx = sortItems[i].index;
+ RangeType *range = DatumGetRangeType(entryvec->vector[idx].key);
if (i < split_idx)
PLACE_LEFT(range, idx);
@@ -1125,16 +1136,20 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
GIST_SPLITVEC *v)
{
ConsiderSplitContext context;
- OffsetNumber i, maxoff;
- RangeType *range,
- *left_range = NULL,
- *right_range = NULL;
- int common_entries_count;
+ OffsetNumber i,
+ maxoff;
+ RangeType *range,
+ *left_range = NULL,
+ *right_range = NULL;
+ int common_entries_count;
NonEmptyRange *by_lower,
- *by_upper;
+ *by_upper;
CommonEntry *common_entries;
- int nentries, i1, i2;
- RangeBound *right_lower, *left_upper;
+ int nentries,
+ i1,
+ i2;
+ RangeBound *right_lower,
+ *left_upper;
memset(&context, 0, sizeof(ConsiderSplitContext));
context.typcache = typcache;
@@ -1151,8 +1166,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
/* Fill arrays of bounds */
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
- bool empty;
+ RangeType *range = DatumGetRangeType(entryvec->vector[i].key);
+ bool empty;
range_deserialize(typcache, range,
&by_lower[i - FirstOffsetNumber].lower,
@@ -1209,7 +1224,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
i1 = 0;
i2 = 0;
right_lower = &by_lower[i1].lower;
- left_upper = &by_upper[i2].lower;
+ left_upper = &by_upper[i2].lower;
while (true)
{
/*
@@ -1229,8 +1244,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
right_lower = &by_lower[i1].lower;
/*
- * Find count of ranges which anyway should be placed to the
- * left group.
+ * Find count of ranges which anyway should be placed to the left
+ * group.
*/
while (i2 < nentries &&
range_cmp_bounds(typcache, &by_upper[i2].upper,
@@ -1244,13 +1259,13 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
}
/*
- * Iterate over upper bound of left group finding greatest possible
- * lower bound of right group.
+ * Iterate over upper bound of left group finding greatest possible lower
+ * bound of right group.
*/
i1 = nentries - 1;
i2 = nentries - 1;
right_lower = &by_lower[i1].upper;
- left_upper = &by_upper[i2].upper;
+ left_upper = &by_upper[i2].upper;
while (true)
{
/*
@@ -1270,8 +1285,8 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
left_upper = &by_upper[i2].upper;
/*
- * Find count of intervals which anyway should be placed to the
- * right group.
+ * Find count of intervals which anyway should be placed to the right
+ * group.
*/
while (i1 >= 0 &&
range_cmp_bounds(typcache, &by_lower[i1].lower,
@@ -1295,9 +1310,9 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
}
/*
- * Ok, we have now selected bounds of the groups. Now we have to distribute
- * entries themselves. At first we distribute entries which can be placed
- * unambiguously and collect "common entries" to array.
+ * Ok, we have now selected bounds of the groups. Now we have to
+ * distribute entries themselves. At first we distribute entries which can
+ * be placed unambiguously and collect "common entries" to array.
*/
/* Allocate vectors for results */
@@ -1394,7 +1409,7 @@ range_gist_double_sorting_split(TypeCacheEntry *typcache,
*/
for (i = 0; i < common_entries_count; i++)
{
- int idx = common_entries[i].index;
+ int idx = common_entries[i].index;
range = DatumGetRangeType(entryvec->vector[idx].key);
@@ -1530,8 +1545,8 @@ get_gist_range_class(RangeType *range)
static int
single_bound_cmp(const void *a, const void *b, void *arg)
{
- SingleBoundSortItem *i1 = (SingleBoundSortItem *) a;
- SingleBoundSortItem *i2 = (SingleBoundSortItem *) b;
+ SingleBoundSortItem *i1 = (SingleBoundSortItem *) a;
+ SingleBoundSortItem *i2 = (SingleBoundSortItem *) b;
TypeCacheEntry *typcache = (TypeCacheEntry *) arg;
return range_cmp_bounds(typcache, &i1->bound, &i2->bound);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 7ad99a0ec3..9ca3b9d0c4 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -73,7 +73,7 @@
#define PRETTYFLAG_PAREN 1
#define PRETTYFLAG_INDENT 2
-#define PRETTY_WRAP_DEFAULT 79
+#define PRETTY_WRAP_DEFAULT 79
/* macro to test if pretty action needed */
#define PRETTY_PAREN(context) ((context)->prettyFlags & PRETTYFLAG_PAREN)
@@ -138,7 +138,7 @@ static SPIPlanPtr plan_getrulebyoid = NULL;
static const char *query_getrulebyoid = "SELECT * FROM pg_catalog.pg_rewrite WHERE oid = $1";
static SPIPlanPtr plan_getviewrule = NULL;
static const char *query_getviewrule = "SELECT * FROM pg_catalog.pg_rewrite WHERE ev_class = $1 AND rulename = $2";
-static int pretty_wrap = PRETTY_WRAP_DEFAULT;
+static int pretty_wrap = PRETTY_WRAP_DEFAULT;
/* GUC parameters */
bool quote_all_identifiers = false;
@@ -388,9 +388,9 @@ pg_get_viewdef_wrap(PG_FUNCTION_ARGS)
{
/* By OID */
Oid viewoid = PG_GETARG_OID(0);
- int wrap = PG_GETARG_INT32(1);
+ int wrap = PG_GETARG_INT32(1);
int prettyFlags;
- char *result;
+ char *result;
/* calling this implies we want pretty printing */
prettyFlags = PRETTYFLAG_PAREN | PRETTYFLAG_INDENT;
@@ -1335,10 +1335,10 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
* Now emit the constraint definition, adding NO INHERIT if
* necessary.
*
- * There are cases where
- * the constraint expression will be fully parenthesized and
- * we don't need the outer parens ... but there are other
- * cases where we do need 'em. Be conservative for now.
+ * There are cases where the constraint expression will be
+ * fully parenthesized and we don't need the outer parens ...
+ * but there are other cases where we do need 'em. Be
+ * conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
* would NOT be good enough, consider "(x > 0) AND (y > 0)".
@@ -1599,7 +1599,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -3038,7 +3038,7 @@ get_target_list(List *targetList, deparse_context *context,
char *sep;
int colno;
ListCell *l;
- bool last_was_multiline = false;
+ bool last_was_multiline = false;
sep = " ";
colno = 0;
@@ -3048,9 +3048,9 @@ get_target_list(List *targetList, deparse_context *context,
char *colname;
char *attname;
StringInfoData targetbuf;
- int leading_nl_pos = -1;
- char *trailing_nl;
- int pos;
+ int leading_nl_pos = -1;
+ char *trailing_nl;
+ int pos;
if (tle->resjunk)
continue; /* ignore junk entries */
@@ -3060,9 +3060,8 @@ get_target_list(List *targetList, deparse_context *context,
colno++;
/*
- * Put the new field spec into targetbuf so we can
- * decide after we've got it whether or not it needs
- * to go on a new line.
+ * Put the new field spec into targetbuf so we can decide after we've
+ * got it whether or not it needs to go on a new line.
*/
initStringInfo(&targetbuf);
@@ -3112,7 +3111,7 @@ get_target_list(List *targetList, deparse_context *context,
/* Does the new field start with whitespace plus a new line? */
- for (pos=0; pos < targetbuf.len; pos++)
+ for (pos = 0; pos < targetbuf.len; pos++)
{
if (targetbuf.data[pos] == '\n')
{
@@ -3123,30 +3122,29 @@ get_target_list(List *targetList, deparse_context *context,
break;
}
- /* Locate the start of the current line in the buffer */
+ /* Locate the start of the current line in the buffer */
- trailing_nl = (strrchr(buf->data,'\n'));
+ trailing_nl = (strrchr(buf->data, '\n'));
if (trailing_nl == NULL)
trailing_nl = buf->data;
- else
+ else
trailing_nl++;
/*
- * If the field we're adding is the first in the list, or it already
- * has a leading newline, or wrap mode is disabled (pretty_wrap < 0),
- * don't add anything.
- * Otherwise, add a newline, plus some indentation, if either the
- * new field would cause an overflow or the last field used more than
- * one line.
+ * If the field we're adding is the first in the list, or it already
+ * has a leading newline, or wrap mode is disabled (pretty_wrap < 0),
+ * don't add anything. Otherwise, add a newline, plus some
+ * indentation, if either the new field would cause an overflow or the
+ * last field used more than one line.
*/
if (colno > 1 &&
- leading_nl_pos == -1 &&
+ leading_nl_pos == -1 &&
pretty_wrap >= 0 &&
((strlen(trailing_nl) + strlen(targetbuf.data) > pretty_wrap) ||
last_was_multiline))
{
- appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD, PRETTYINDENT_VAR);
}
@@ -3157,12 +3155,12 @@ get_target_list(List *targetList, deparse_context *context,
/* Keep track of this field's status for next iteration */
- last_was_multiline =
- (strchr(targetbuf.data + leading_nl_pos + 1,'\n') != NULL);
+ last_was_multiline =
+ (strchr(targetbuf.data + leading_nl_pos + 1, '\n') != NULL);
/* cleanup */
- pfree (targetbuf.data);
+ pfree(targetbuf.data);
}
}
@@ -4049,7 +4047,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -4059,7 +4057,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -4483,7 +4481,7 @@ find_rte_by_refname(const char *refname, deparse_context *context)
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -4615,7 +4613,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6567,10 +6565,10 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
else
{
StringInfoData targetbuf;
- char *trailing_nl;
+ char *trailing_nl;
appendStringInfoString(buf, ", ");
-
+
initStringInfo(&targetbuf);
context->buf = &targetbuf;
@@ -6578,33 +6576,33 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
context->buf = buf;
- /* Locate the start of the current line in the buffer */
+ /* Locate the start of the current line in the buffer */
- trailing_nl = (strrchr(buf->data,'\n'));
+ trailing_nl = (strrchr(buf->data, '\n'));
if (trailing_nl == NULL)
trailing_nl = buf->data;
- else
+ else
trailing_nl++;
-
+
/*
- * Add a newline, plus some indentation, if pretty_wrap is on and the
- * new from-clause item would cause an overflow.
+ * Add a newline, plus some indentation, if pretty_wrap is on and
+ * the new from-clause item would cause an overflow.
*/
-
+
if (pretty_wrap >= 0 &&
(strlen(trailing_nl) + strlen(targetbuf.data) > pretty_wrap))
{
- appendContextKeyword(context, "", -PRETTYINDENT_STD,
+ appendContextKeyword(context, "", -PRETTYINDENT_STD,
PRETTYINDENT_STD, PRETTYINDENT_VAR);
}
/* Add the new item */
appendStringInfoString(buf, targetbuf.data);
-
+
/* cleanup */
- pfree (targetbuf.data);
+ pfree(targetbuf.data);
}
}
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 83e43a9997..95e46276f0 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -258,7 +258,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -393,7 +393,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -1743,8 +1743,8 @@ scalararraysel(PlannerInfo *root,
}
/*
- * If it is equality or inequality, we might be able to estimate this as
- * a form of array containment; for instance "const = ANY(column)" can be
+ * If it is equality or inequality, we might be able to estimate this as a
+ * form of array containment; for instance "const = ANY(column)" can be
* treated as "ARRAY[const] <@ column". scalararraysel_containment tries
* that, and returns the selectivity estimate if successful, or -1 if not.
*/
@@ -1819,7 +1819,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2132,6 +2132,7 @@ eqjoinsel(PG_FUNCTION_ARGS)
break;
case JOIN_SEMI:
case JOIN_ANTI:
+
/*
* Look up the join's inner relation. min_righthand is sufficient
* information because neither SEMI nor ANTI joins permit any
@@ -2423,7 +2424,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -3879,7 +3880,7 @@ convert_string_datum(Datum value, Oid typid)
{
char *xfrmstr;
size_t xfrmlen;
- size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
+ size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY;
/*
* Note: originally we guessed at a suitable output buffer size, and
@@ -4475,7 +4476,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
* Punt if subquery uses set operations or GROUP BY, as these will
* mash underlying columns' stats beyond recognition. (Set ops are
* particularly nasty; if we forged ahead, we would return stats
- * relevant to only the leftmost subselect...) DISTINCT is also
+ * relevant to only the leftmost subselect...) DISTINCT is also
* problematic, but we check that later because there is a possibility
* of learning something even with it.
*/
@@ -4496,12 +4497,12 @@ examine_simple_variable(PlannerInfo *root, Var *var,
Assert(rel->subroot && IsA(rel->subroot, PlannerInfo));
/*
- * Switch our attention to the subquery as mangled by the planner.
- * It was okay to look at the pre-planning version for the tests
- * above, but now we need a Var that will refer to the subroot's
- * live RelOptInfos. For instance, if any subquery pullup happened
- * during planning, Vars in the targetlist might have gotten replaced,
- * and we need to see the replacement expressions.
+ * Switch our attention to the subquery as mangled by the planner. It
+ * was okay to look at the pre-planning version for the tests above,
+ * but now we need a Var that will refer to the subroot's live
+ * RelOptInfos. For instance, if any subquery pullup happened during
+ * planning, Vars in the targetlist might have gotten replaced, and we
+ * need to see the replacement expressions.
*/
subquery = rel->subroot->parse;
Assert(IsA(subquery, Query));
@@ -4530,13 +4531,13 @@ examine_simple_variable(PlannerInfo *root, Var *var,
/*
* If the sub-query originated from a view with the security_barrier
- * attribute, we must not look at the variable's statistics, though
- * it seems all right to notice the existence of a DISTINCT clause.
- * So stop here.
+ * attribute, we must not look at the variable's statistics, though it
+ * seems all right to notice the existence of a DISTINCT clause. So
+ * stop here.
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4563,7 +4564,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
/*
* Otherwise, the Var comes from a FUNCTION, VALUES, or CTE RTE. (We
* won't see RTE_JOIN here because join alias Vars have already been
- * flattened.) There's not much we can do with function outputs, but
+ * flattened.) There's not much we can do with function outputs, but
* maybe someday try to be smarter about VALUES and/or CTEs.
*/
}
@@ -4679,8 +4680,8 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
/*
* With no data, estimate ndistinct = ntuples if the table is small, else
- * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small"
- * so that the behavior isn't discontinuous.
+ * use default. We use DEFAULT_NUM_DISTINCT as the cutoff for "small" so
+ * that the behavior isn't discontinuous.
*/
if (ntuples < DEFAULT_NUM_DISTINCT)
return ntuples;
@@ -6094,16 +6095,16 @@ string_to_bytea_const(const char *str, size_t str_len)
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6392,7 +6393,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6531,8 +6532,8 @@ btcostestimate(PG_FUNCTION_ARGS)
/*
* If the index is partial, AND the index predicate with the
- * index-bound quals to produce a more accurate idea of the number
- * of rows covered by the bound conditions.
+ * index-bound quals to produce a more accurate idea of the number of
+ * rows covered by the bound conditions.
*/
selectivityQuals = add_predicate_to_quals(index, indexBoundQuals);
@@ -6767,17 +6768,17 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
int32 i;
/*
- * Get the operator's strategy number and declared input data types
- * within the index opfamily. (We don't need the latter, but we use
- * get_op_opfamily_properties because it will throw error if it fails
- * to find a matching pg_amop entry.)
+ * Get the operator's strategy number and declared input data types within
+ * the index opfamily. (We don't need the latter, but we use
+ * get_op_opfamily_properties because it will throw error if it fails to
+ * find a matching pg_amop entry.)
*/
get_op_opfamily_properties(clause_op, index->opfamily[indexcol], false,
&strategy_op, &lefttype, &righttype);
/*
- * GIN always uses the "default" support functions, which are those
- * with lefttype == righttype == the opclass' opcintype (see
+ * GIN always uses the "default" support functions, which are those with
+ * lefttype == righttype == the opclass' opcintype (see
* IndexSupportInitialize in relcache.c).
*/
extractProcOid = get_opfamily_proc(index->opfamily[indexcol],
@@ -6864,7 +6865,7 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
else
{
elog(ERROR, "could not match index to operand");
- operand = NULL; /* keep compiler quiet */
+ operand = NULL; /* keep compiler quiet */
}
if (IsA(operand, RelabelType))
@@ -6872,8 +6873,8 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
/*
* It's impossible to call extractQuery method for unknown operand. So
- * unless operand is a Const we can't do much; just assume there will
- * be one ordinary search entry from the operand at runtime.
+ * unless operand is a Const we can't do much; just assume there will be
+ * one ordinary search entry from the operand at runtime.
*/
if (!IsA(operand, Const))
{
@@ -6901,7 +6902,7 @@ gincost_opexpr(IndexOptInfo *index, OpExpr *clause, GinQualCounts *counts)
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -6935,9 +6936,9 @@ gincost_scalararrayopexpr(IndexOptInfo *index, ScalarArrayOpExpr *clause,
/*
* It's impossible to call extractQuery method for unknown operand. So
- * unless operand is a Const we can't do much; just assume there will
- * be one ordinary search entry from each array entry at runtime, and
- * fall back on a probably-bad estimate of the number of array entries.
+ * unless operand is a Const we can't do much; just assume there will be
+ * one ordinary search entry from each array entry at runtime, and fall
+ * back on a probably-bad estimate of the number of array entries.
*/
if (!IsA(rightop, Const))
{
@@ -7156,7 +7157,7 @@ gincostestimate(PG_FUNCTION_ARGS)
else if (IsA(clause, ScalarArrayOpExpr))
{
matchPossible = gincost_scalararrayopexpr(index,
- (ScalarArrayOpExpr *) clause,
+ (ScalarArrayOpExpr *) clause,
numEntries,
&counts);
if (!matchPossible)
@@ -7194,7 +7195,8 @@ gincostestimate(PG_FUNCTION_ARGS)
outer_scans = loop_count;
/*
- * Compute cost to begin scan, first of all, pay attention to pending list.
+ * Compute cost to begin scan, first of all, pay attention to pending
+ * list.
*/
entryPagesFetched = numPendingPages;
@@ -7247,7 +7249,8 @@ gincostestimate(PG_FUNCTION_ARGS)
*indexStartupCost = (entryPagesFetched + dataPagesFetched) * spc_random_page_cost;
/*
- * Now we compute the number of data pages fetched while the scan proceeds.
+ * Now we compute the number of data pages fetched while the scan
+ * proceeds.
*/
/* data pages scanned for each exact (non-partial) matched entry */
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index a3e1e94a2b..8593b6b47f 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -757,7 +757,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -940,7 +940,7 @@ interval_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
@@ -965,7 +965,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -974,8 +974,8 @@ interval_transform(PG_FUNCTION_ARGS)
old_range_fls = fls(old_range);
if (new_typmod < 0 ||
((new_range_fls >= SECOND || new_range_fls >= old_range_fls) &&
- (old_range_fls < SECOND || new_precis >= MAX_INTERVAL_PRECISION ||
- new_precis >= old_precis)))
+ (old_range_fls < SECOND || new_precis >= MAX_INTERVAL_PRECISION ||
+ new_precis >= old_precis)))
ret = relabel_to_typmod(source, new_typmod);
}
@@ -1925,7 +1925,7 @@ timestamp_fastcmp(Datum x, Datum y, SortSupport ssup)
Datum
timestamp_sortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
ssup->comparator = timestamp_fastcmp;
PG_RETURN_VOID();
@@ -4067,11 +4067,11 @@ timestamp_part(PG_FUNCTION_ARGS)
{
case DTK_EPOCH:
#ifdef HAVE_INT64_TIMESTAMP
- result = (timestamp - SetEpochTimestamp()) / 1000000.0;
+ result = (timestamp - SetEpochTimestamp()) / 1000000.0;
#else
- result = timestamp - SetEpochTimestamp();
+ result = timestamp - SetEpochTimestamp();
#endif
- break;
+ break;
case DTK_DOW:
case DTK_ISODOW:
diff --git a/src/backend/utils/adt/tsgistidx.c b/src/backend/utils/adt/tsgistidx.c
index b427586d18..674e48c871 100644
--- a/src/backend/utils/adt/tsgistidx.c
+++ b/src/backend/utils/adt/tsgistidx.c
@@ -593,8 +593,8 @@ typedef struct
static int
comparecost(const void *va, const void *vb)
{
- const SPLITCOST *a = (const SPLITCOST *) va;
- const SPLITCOST *b = (const SPLITCOST *) vb;
+ const SPLITCOST *a = (const SPLITCOST *) va;
+ const SPLITCOST *b = (const SPLITCOST *) vb;
if (a->cost == b->cost)
return 0;
diff --git a/src/backend/utils/adt/tsquery_util.c b/src/backend/utils/adt/tsquery_util.c
index ae00f180b5..0724d33c1d 100644
--- a/src/backend/utils/adt/tsquery_util.c
+++ b/src/backend/utils/adt/tsquery_util.c
@@ -134,7 +134,7 @@ QTNodeCompare(QTNode *an, QTNode *bn)
static int
cmpQTN(const void *a, const void *b)
{
- return QTNodeCompare(*(QTNode * const *) a, *(QTNode * const *) b);
+ return QTNodeCompare(*(QTNode *const *) a, *(QTNode *const *) b);
}
void
diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c
index 960233b633..a45caf05af 100644
--- a/src/backend/utils/adt/tsrank.c
+++ b/src/backend/utils/adt/tsrank.c
@@ -134,8 +134,8 @@ static int
compareQueryOperand(const void *a, const void *b, void *arg)
{
char *operand = (char *) arg;
- QueryOperand *qa = (*(QueryOperand * const *) a);
- QueryOperand *qb = (*(QueryOperand * const *) b);
+ QueryOperand *qa = (*(QueryOperand *const *) a);
+ QueryOperand *qb = (*(QueryOperand *const *) b);
return tsCompareString(operand + qa->distance, qa->length,
operand + qb->distance, qb->length,
diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c
index bab6534fea..eb5c45b3d8 100644
--- a/src/backend/utils/adt/tsvector_op.c
+++ b/src/backend/utils/adt/tsvector_op.c
@@ -373,9 +373,9 @@ tsvector_concat(PG_FUNCTION_ARGS)
i2 = in2->size;
/*
- * Conservative estimate of space needed. We might need all the data
- * in both inputs, and conceivably add a pad byte before position data
- * for each item where there was none before.
+ * Conservative estimate of space needed. We might need all the data in
+ * both inputs, and conceivably add a pad byte before position data for
+ * each item where there was none before.
*/
output_bytes = VARSIZE(in1) + VARSIZE(in2) + i1 + i2;
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index e74e062338..2bcf5b8aa8 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -664,7 +664,7 @@ varbit_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 new_typmod = DatumGetInt32(((Const *) typmod)->constvalue);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 199330cef2..4cdb88837b 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -561,7 +561,7 @@ varchar_transform(PG_FUNCTION_ARGS)
typmod = (Node *) lsecond(expr->args);
- if (IsA(typmod, Const) && !((Const *) typmod)->constisnull)
+ if (IsA(typmod, Const) &&!((Const *) typmod)->constisnull)
{
Node *source = (Node *) linitial(expr->args);
int32 old_typmod = exprTypmod(source);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 53989d1ecb..e1b57ba3fc 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -1353,6 +1353,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
char a2buf[STACKBUFLEN];
char *a1p,
*a2p;
+
#ifdef HAVE_LOCALE_T
pg_locale_t mylocale = 0;
#endif
@@ -1413,8 +1414,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
(LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error code %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error code %lu",
+ GetLastError())));
}
((LPWSTR) a1p)[r] = 0;
@@ -1426,8 +1427,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
(LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error code %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error code %lu",
+ GetLastError())));
}
((LPWSTR) a2p)[r] = 0;
@@ -4001,7 +4002,7 @@ text_format_string_conversion(StringInfo buf, char conversion,
else if (conversion == 'I')
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("null values cannot be formatted as an SQL identifier")));
+ errmsg("null values cannot be formatted as an SQL identifier")));
return;
}
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index c51a9f76ce..44d327d760 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -126,8 +126,8 @@ static bool print_xml_decl(StringInfo buf, const xmlChar *version,
static xmlDocPtr xml_parse(text *data, XmlOptionType xmloption_arg,
bool preserve_whitespace, int encoding);
static text *xml_xmlnodetoxmltype(xmlNodePtr cur);
-static int xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
- ArrayBuildState **astate);
+static int xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
+ ArrayBuildState **astate);
#endif /* USE_LIBXML */
static StringInfo query_to_xml_internal(const char *query, char *tablename,
@@ -913,7 +913,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -943,9 +943,9 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Save original error handler and install ours. libxml originally didn't
* distinguish between the contexts for generic and for structured error
- * handlers. If we're using an old libxml version, we must thus save
- * the generic error context, even though we're using a structured
- * error handler.
+ * handlers. If we're using an old libxml version, we must thus save the
+ * generic error context, even though we're using a structured error
+ * handler.
*/
errcxt->saved_errfunc = xmlStructuredError;
@@ -959,7 +959,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1014,9 +1014,9 @@ pg_xml_done(PgXmlErrorContext *errcxt, bool isError)
Assert(!errcxt->err_occurred || isError);
/*
- * Check that libxml's global state is correct, warn if not. This is
- * a real test and not an Assert because it has a higher probability
- * of happening.
+ * Check that libxml's global state is correct, warn if not. This is a
+ * real test and not an Assert because it has a higher probability of
+ * happening.
*/
#ifdef HAVE_XMLSTRUCTUREDERRORCONTEXT
cur_errcxt = xmlStructuredErrorContext;
@@ -1108,7 +1108,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1516,9 +1516,9 @@ xml_errorHandler(void *data, xmlErrorPtr error)
PgXmlErrorContext *xmlerrcxt = (PgXmlErrorContext *) data;
xmlParserCtxtPtr ctxt = (xmlParserCtxtPtr) error->ctxt;
xmlParserInputPtr input = (ctxt != NULL) ? ctxt->input : NULL;
- xmlNodePtr node = error->node;
+ xmlNodePtr node = error->node;
const xmlChar *name = (node != NULL &&
- node->type == XML_ELEMENT_NODE) ? node->name : NULL;
+ node->type == XML_ELEMENT_NODE) ? node->name : NULL;
int domain = error->domain;
int level = error->level;
StringInfo errorBuf;
@@ -1599,7 +1599,7 @@ xml_errorHandler(void *data, xmlErrorPtr error)
if (input != NULL)
{
xmlGenericErrorFunc errFuncSaved = xmlGenericError;
- void *errCtxSaved = xmlGenericErrorContext;
+ void *errCtxSaved = xmlGenericErrorContext;
xmlSetGenericErrorFunc((void *) errorBuf,
(xmlGenericErrorFunc) appendStringInfo);
@@ -1617,8 +1617,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -3574,7 +3574,7 @@ xml_xmlnodetoxmltype(xmlNodePtr cur)
PG_TRY();
{
/* Here we rely on XML having the same representation as TEXT */
- char *escaped = escape_xml((char *) str);
+ char *escaped = escape_xml((char *) str);
result = (xmltype *) cstring_to_text(escaped);
pfree(escaped);
@@ -3623,7 +3623,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
result = xpathobj->nodesetval->nodeNr;
if (astate != NULL)
{
- int i;
+ int i;
for (i = 0; i < result; i++)
{
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index ea3daa599c..0307b9652d 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -1637,8 +1637,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
/*
* If there are any out-of-line toasted fields in the tuple, expand them
- * in-line. This saves cycles during later use of the catcache entry,
- * and also protects us against the possibility of the toast tuples being
+ * in-line. This saves cycles during later use of the catcache entry, and
+ * also protects us against the possibility of the toast tuples being
* freed before we attempt to fetch them, in case of something using a
* slightly stale catcache entry.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index d5fe85abbf..9ccfc4f114 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -820,7 +820,7 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
- *
+ *
* In any case, reset the various lists to empty. We need not physically
* free memory here, since TopTransactionContext is about to be emptied
* anyway.
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 44dab82264..64b413bb6a 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -283,7 +283,7 @@ get_sort_function_for_ordering_op(Oid opno, Oid *sortfunc,
opcintype,
opcintype,
BTORDER_PROC);
- if (!OidIsValid(*sortfunc)) /* should not happen */
+ if (!OidIsValid(*sortfunc)) /* should not happen */
elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
BTORDER_PROC, opcintype, opcintype, opfamily);
*issupport = false;
@@ -1549,7 +1549,7 @@ func_volatile(Oid funcid)
/*
* get_func_leakproof
- * Given procedure id, return the function's leakproof field.
+ * Given procedure id, return the function's leakproof field.
*/
bool
get_func_leakproof(Oid funcid)
@@ -2914,8 +2914,8 @@ get_range_subtype(Oid rangeOid)
tp = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(rangeOid));
if (HeapTupleIsValid(tp))
{
- Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
- Oid result;
+ Form_pg_range rngtup = (Form_pg_range) GETSTRUCT(tp);
+ Oid result;
result = rngtup->rngsubtype;
ReleaseSysCache(tp);
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 6292f8dc6c..c42765c25a 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -77,9 +77,9 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource);
static bool CheckCachedPlan(CachedPlanSource *plansource);
static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
static bool choose_custom_plan(CachedPlanSource *plansource,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
static double cached_plan_cost(CachedPlan *plan);
static void AcquireExecutorLocks(List *stmt_list, bool acquire);
static void AcquirePlannerLocks(List *stmt_list, bool acquire);
@@ -111,7 +111,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -198,13 +198,13 @@ CreateCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -277,8 +277,8 @@ CompleteCachedPlan(CachedPlanSource *plansource,
/*
* Use the planner machinery to extract dependencies. Data is saved in
- * query_context. (We assume that not a lot of extra cruft is created
- * by this call.)
+ * query_context. (We assume that not a lot of extra cruft is created by
+ * this call.)
*/
extract_query_dependencies((Node *) querytree_list,
&plansource->relationOids,
@@ -327,7 +327,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
*
* This is guaranteed not to throw error; callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -341,16 +341,16 @@ SaveCachedPlan(CachedPlanSource *plansource)
/*
* In typical use, this function would be called before generating any
- * plans from the CachedPlanSource. If there is a generic plan, moving
- * it into CacheMemoryContext would be pretty risky since it's unclear
+ * plans from the CachedPlanSource. If there is a generic plan, moving it
+ * into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
- * Reparent the source memory context under CacheMemoryContext so that
- * it will live indefinitely. The query_context follows along since it's
+ * Reparent the source memory context under CacheMemoryContext so that it
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -474,8 +474,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
}
/*
- * Discard the no-longer-useful query tree. (Note: we don't want to
- * do this any earlier, else we'd not have been able to release locks
+ * Discard the no-longer-useful query tree. (Note: we don't want to do
+ * this any earlier, else we'd not have been able to release locks
* correctly in the race condition case.)
*/
plansource->is_valid = false;
@@ -484,14 +484,14 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->invalItems = NIL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
- * reasonably sane state. (The generic plan won't get unlinked yet,
- * but that's acceptable.)
+ * reasonably sane state. (The generic plan won't get unlinked yet, but
+ * that's acceptable.)
*/
if (plansource->query_context)
{
- MemoryContext qcxt = plansource->query_context;
+ MemoryContext qcxt = plansource->query_context;
plansource->query_context = NULL;
MemoryContextDelete(qcxt);
@@ -553,7 +553,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopOverrideSearchPath();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -596,8 +596,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
/*
* Use the planner machinery to extract dependencies. Data is saved in
- * query_context. (We assume that not a lot of extra cruft is created
- * by this call.)
+ * query_context. (We assume that not a lot of extra cruft is created by
+ * this call.)
*/
extract_query_dependencies((Node *) qlist,
&plansource->relationOids,
@@ -612,12 +612,12 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->query_list = qlist;
/*
- * Note: we do not reset generic_cost or total_custom_cost, although
- * we could choose to do so. If the DDL or statistics change that
- * prompted the invalidation meant a significant change in the cost
- * estimates, it would be better to reset those variables and start
- * fresh; but often it doesn't, and we're better retaining our hard-won
- * knowledge about the relative costs.
+ * Note: we do not reset generic_cost or total_custom_cost, although we
+ * could choose to do so. If the DDL or statistics change that prompted
+ * the invalidation meant a significant change in the cost estimates, it
+ * would be better to reset those variables and start fresh; but often it
+ * doesn't, and we're better retaining our hard-won knowledge about the
+ * relative costs.
*/
plansource->is_valid = true;
@@ -728,7 +728,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -770,10 +770,10 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
}
/*
- * The planner may try to call SPI-using functions, which causes a
- * problem if we're already inside one. Rather than expect all
- * SPI-using code to do SPI_push whenever a replan could happen,
- * it seems best to take care of the case here.
+ * The planner may try to call SPI-using functions, which causes a problem
+ * if we're already inside one. Rather than expect all SPI-using code to
+ * do SPI_push whenever a replan could happen, it seems best to take care
+ * of the case here.
*/
spi_pushed = SPI_push_conditional();
@@ -865,8 +865,8 @@ choose_custom_plan(CachedPlanSource *plansource, ParamListInfo boundParams)
/*
* Prefer generic plan if it's less than 10% more expensive than average
* custom plan. This threshold is a bit arbitrary; it'd be better if we
- * had some means of comparing planning time to the estimated runtime
- * cost differential.
+ * had some means of comparing planning time to the estimated runtime cost
+ * differential.
*
* Note that if generic_cost is -1 (indicating we've not yet determined
* the generic plan cost), we'll always prefer generic at this point.
@@ -966,7 +966,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
{
/* otherwise, it should be a sibling of the plansource */
MemoryContextSetParent(plan->context,
- MemoryContextGetParent(plansource->context));
+ MemoryContextGetParent(plansource->context));
}
/* Update generic_cost whenever we make a new generic plan */
plansource->generic_cost = cached_plan_cost(plan);
@@ -984,7 +984,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1089,7 +1089,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1233,7 +1233,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1429,7 +1429,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 7f0e20ec17..4cbf812ed5 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -2540,7 +2540,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -3365,9 +3365,9 @@ RelationGetIndexList(Relation relation)
result = insert_ordered_oid(result, index->indexrelid);
/*
- * indclass cannot be referenced directly through the C struct, because
- * it comes after the variable-width indkey field. Must extract the
- * datum the hard way...
+ * indclass cannot be referenced directly through the C struct,
+ * because it comes after the variable-width indkey field. Must
+ * extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
Anum_pg_index_indclass,
@@ -4514,8 +4514,8 @@ RelationCacheInitFilePreInvalidate(void)
/*
* The file might not be there if no backend has been started since
* the last removal. But complain about failures other than ENOENT.
- * Fortunately, it's not too late to abort the transaction if we
- * can't get rid of the would-be-obsolete init file.
+ * Fortunately, it's not too late to abort the transaction if we can't
+ * get rid of the would-be-obsolete init file.
*/
if (errno != ENOENT)
ereport(ERROR,
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 4ad5e7fc0b..b408de0730 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -602,10 +602,10 @@ check_TSCurrentConfig(char **newval, void **extra, GucSource source)
cfgId = get_ts_config_oid(stringToQualifiedNameList(*newval), true);
/*
- * When source == PGC_S_TEST, we are checking the argument of an
- * ALTER DATABASE SET or ALTER USER SET command. It could be that
- * the intended use of the setting is for some other database, so
- * we should not error out if the text search configuration is not
+ * When source == PGC_S_TEST, we are checking the argument of an ALTER
+ * DATABASE SET or ALTER USER SET command. It could be that the
+ * intended use of the setting is for some other database, so we
+ * should not error out if the text search configuration is not
* present in the current database. We issue a NOTICE instead.
*/
if (!OidIsValid(cfgId))
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 65c28a7508..a40b343ebc 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -114,7 +114,7 @@ int Log_destination = LOG_DESTINATION_STDERR;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -1857,8 +1857,8 @@ setup_formatted_log_time(void)
stamp_time = (pg_time_t) tv.tv_sec;
/*
- * Note: we expect that guc.c will ensure that log_timezone is set up
- * (at least with a minimal GMT value) before Log_line_prefix can become
+ * Note: we expect that guc.c will ensure that log_timezone is set up (at
+ * least with a minimal GMT value) before Log_line_prefix can become
* nonempty or CSV mode can be selected.
*/
pg_strftime(formatted_log_time, FORMATTED_TS_LEN,
@@ -1880,8 +1880,8 @@ setup_formatted_start_time(void)
pg_time_t stamp_time = (pg_time_t) MyStartTime;
/*
- * Note: we expect that guc.c will ensure that log_timezone is set up
- * (at least with a minimal GMT value) before Log_line_prefix can become
+ * Note: we expect that guc.c will ensure that log_timezone is set up (at
+ * least with a minimal GMT value) before Log_line_prefix can become
* nonempty or CSV mode can be selected.
*/
pg_strftime(formatted_start_time, FORMATTED_TS_LEN,
@@ -2506,7 +2506,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 788f1801a8..2ec63fae56 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -408,8 +408,8 @@ fmgr_info_other_lang(Oid functionId, FmgrInfo *finfo, HeapTuple procedureTuple)
/*
* Look up the language's call handler function, ignoring any attributes
- * that would normally cause insertion of fmgr_security_definer. We
- * need to get back a bare pointer to the actual C-language function.
+ * that would normally cause insertion of fmgr_security_definer. We need
+ * to get back a bare pointer to the actual C-language function.
*/
fmgr_info_cxt_security(languageStruct->lanplcallfoid, &plfinfo,
CurrentMemoryContext, true);
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index dd914789c0..addf95bca9 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -490,9 +490,9 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
ANYARRAYOID);
if (OidIsValid(anyrange_type))
{
- Oid subtype = resolve_generic_type(ANYELEMENTOID,
- anyrange_type,
- ANYRANGEOID);
+ Oid subtype = resolve_generic_type(ANYELEMENTOID,
+ anyrange_type,
+ ANYRANGEOID);
/* check for inconsistent array and range results */
if (OidIsValid(anyelement_type) && anyelement_type != subtype)
@@ -524,8 +524,8 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
/*
* Identify the collation to use for polymorphic OUT parameters. (It'll
* necessarily be the same for both anyelement and anyarray.) Note that
- * range types are not collatable, so any possible internal collation of
- * a range type is not considered here.
+ * range types are not collatable, so any possible internal collation of a
+ * range type is not considered here.
*/
if (OidIsValid(anyelement_type))
anycollation = get_typcollation(anyelement_type);
@@ -687,9 +687,9 @@ resolve_polymorphic_argtypes(int numargs, Oid *argtypes, char *argmodes,
ANYARRAYOID);
if (OidIsValid(anyrange_type))
{
- Oid subtype = resolve_generic_type(ANYELEMENTOID,
- anyrange_type,
- ANYRANGEOID);
+ Oid subtype = resolve_generic_type(ANYELEMENTOID,
+ anyrange_type,
+ ANYRANGEOID);
/* check for inconsistent array and range results */
if (OidIsValid(anyelement_type) && anyelement_type != subtype)
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 0f734260c1..fb376a0d27 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -631,7 +631,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and a Unix-socket-file lockfile ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 7de460e0dd..03d68119d1 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1341,7 +1341,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1371,7 +1371,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
@@ -1530,7 +1530,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
return false;
}
else
- { /* ASCII, single byte */
+ { /* ASCII, single byte */
if (c1 > 0x7e)
return false;
(*charptr)++;
@@ -1540,8 +1540,7 @@ pg_eucjp_increment(unsigned char *charptr, int length)
return true;
}
-
-#endif /* !FRONTEND */
+#endif /* !FRONTEND */
/*
@@ -1675,8 +1674,8 @@ mbcharacter_incrementer
pg_database_encoding_character_incrementer(void)
{
/*
- * Eventually it might be best to add a field to pg_wchar_table[],
- * but for now we just use a switch.
+ * Eventually it might be best to add a field to pg_wchar_table[], but for
+ * now we just use a switch.
*/
switch (GetDatabaseEncoding())
{
@@ -1878,10 +1877,10 @@ report_untranslatable_char(int src_encoding, int dest_encoding,
ereport(ERROR,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"",
- buf,
- pg_enc2name_tbl[src_encoding].name,
- pg_enc2name_tbl[dest_encoding].name)));
+ errmsg("character with byte sequence %s in encoding \"%s\" has no equivalent in encoding \"%s\"",
+ buf,
+ pg_enc2name_tbl[src_encoding].name,
+ pg_enc2name_tbl[dest_encoding].name)));
}
-#endif /* !FRONTEND */
+#endif /* !FRONTEND */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index d75ab43029..b756e58a36 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3333,7 +3333,7 @@ static void InitializeOneGUCOption(struct config_generic * gconf);
static void push_old_value(struct config_generic * gconf, GucAction action);
static void ReportGUCOption(struct config_generic * record);
static void reapply_stacked_values(struct config_generic * variable,
- struct config_string *pHolder,
+ struct config_string * pHolder,
GucStack *stack,
const char *curvalue,
GucContext curscontext, GucSource cursource);
@@ -4143,8 +4143,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
/*
* If timezone_abbreviations wasn't set in the configuration file, install
- * the default value. We do it this way because we can't safely install
- * a "real" value until my_exec_path is set, which may not have happened
+ * the default value. We do it this way because we can't safely install a
+ * "real" value until my_exec_path is set, which may not have happened
* when InitializeGUCOptions runs, so the bootstrap default value cannot
* be the real desired default.
*/
@@ -4415,7 +4415,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5106,7 +5106,7 @@ config_enum_get_options(struct config_enum * record, const char *prefix,
*
* Return value:
* +1: the value is valid and was successfully applied.
- * 0: the name or value is invalid (but see below).
+ * 0: the name or value is invalid (but see below).
* -1: the value was not applied because of context, priority, or changeVal.
*
* If there is an error (non-existing option, invalid value) then an
@@ -6441,7 +6441,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -6469,7 +6469,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -6487,7 +6487,7 @@ define_custom_variable(struct config_generic * variable)
*/
static void
reapply_stacked_values(struct config_generic * variable,
- struct config_string *pHolder,
+ struct config_string * pHolder,
GucStack *stack,
const char *curvalue,
GucContext curscontext, GucSource cursource)
@@ -6526,7 +6526,7 @@ reapply_stacked_values(struct config_generic * variable,
case GUC_SET_LOCAL:
/* first, apply the masked value as SET */
(void) set_config_option(name, stack->masked.val.stringval,
- stack->masked_scontext, PGC_S_SESSION,
+ stack->masked_scontext, PGC_S_SESSION,
GUC_ACTION_SET, true, WARNING);
/* then apply the current value as LOCAL */
(void) set_config_option(name, curvalue,
@@ -6542,7 +6542,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -8028,8 +8028,8 @@ validate_option_array_item(const char *name, const char *value,
*
* name is not known, but exists or can be created as a placeholder (i.e.,
* it has a prefixed name). We allow this case if you're a superuser,
- * otherwise not. Superusers are assumed to know what they're doing.
- * We can't allow it for other users, because when the placeholder is
+ * otherwise not. Superusers are assumed to know what they're doing. We
+ * can't allow it for other users, because when the placeholder is
* resolved it might turn out to be a SUSET variable;
* define_custom_variable assumes we checked that.
*
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index cfb73c1b09..5713bbe12c 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -487,7 +487,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index 7f388fd9bf..b6d916d3e4 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -24,7 +24,7 @@
typedef struct
{
FunctionCallInfoData fcinfo; /* reusable callinfo structure */
- FmgrInfo flinfo; /* lookup data for comparison function */
+ FmgrInfo flinfo; /* lookup data for comparison function */
} SortShimExtra;
@@ -70,7 +70,6 @@ ApplySortComparator(Datum datum1, bool isNull1,
return compare;
}
-
#endif /* ! USE_INLINE */
/*
@@ -108,7 +107,7 @@ comparison_shim(Datum x, Datum y, SortSupport ssup)
void
PrepareSortSupportComparisonShim(Oid cmpFunc, SortSupport ssup)
{
- SortShimExtra *extra;
+ SortShimExtra *extra;
extra = (SortShimExtra *) MemoryContextAlloc(ssup->ssup_cxt,
sizeof(SortShimExtra));
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 89698181db..d5a2003e5b 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -195,8 +195,8 @@ typedef enum
#define TAPE_BUFFER_OVERHEAD (BLCKSZ * 3)
#define MERGE_BUFFER_SIZE (BLCKSZ * 32)
-typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+typedef int (*SortTupleComparator) (const SortTuple *a, const SortTuple *b,
+ Tuplesortstate *state);
/*
* Private state of a Tuplesort operation.
@@ -226,7 +226,7 @@ struct Tuplesortstate
* <0, 0, >0 according as a<b, a=b, a>b. The API must match
* qsort_arg_comparator.
*/
- SortTupleComparator comparetup;
+ SortTupleComparator comparetup;
/*
* Function to copy a supplied input tuple into palloc'd space and set up
@@ -342,13 +342,13 @@ struct Tuplesortstate
* tuplesort_begin_heap and used only by the MinimalTuple routines.
*/
TupleDesc tupDesc;
- SortSupport sortKeys; /* array of length nKeys */
+ SortSupport sortKeys; /* array of length nKeys */
/*
* This variable is shared by the single-key MinimalTuple case and the
* Datum case (which both use qsort_ssup()). Otherwise it's NULL.
*/
- SortSupport onlyKey;
+ SortSupport onlyKey;
/*
* These variables are specific to the CLUSTER case; they are set by
@@ -634,7 +634,7 @@ tuplesort_begin_heap(TupleDesc tupDesc,
for (i = 0; i < nkeys; i++)
{
- SortSupport sortKey = state->sortKeys + i;
+ SortSupport sortKey = state->sortKeys + i;
AssertArg(attNums[i] != 0);
AssertArg(sortOperators[i] != 0);
@@ -2685,7 +2685,7 @@ inlineApplySortFunction(FmgrInfo *sortFunction, int sk_flags, Oid collation,
static int
comparetup_heap(const SortTuple *a, const SortTuple *b, Tuplesortstate *state)
{
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
HeapTupleData ltup;
HeapTupleData rtup;
TupleDesc tupDesc;
@@ -2806,7 +2806,7 @@ readtup_heap(Tuplesortstate *state, SortTuple *stup,
static void
reversedirection_heap(Tuplesortstate *state)
{
- SortSupport sortKey = state->sortKeys;
+ SortSupport sortKey = state->sortKeys;
int nkey;
for (nkey = 0; nkey < state->nKeys; nkey++, sortKey++)
@@ -3076,9 +3076,10 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
bool isnull[INDEX_MAX_KEYS];
/*
- * Some rather brain-dead implementations of qsort (such as the one in QNX 4)
- * will sometimes call the comparison routine to compare a value to itself,
- * but we always use our own implementation, which does not.
+ * Some rather brain-dead implementations of qsort (such as the one in
+ * QNX 4) will sometimes call the comparison routine to compare a
+ * value to itself, but we always use our own implementation, which
+ * does not.
*/
Assert(tuple1 != tuple2);
@@ -3094,8 +3095,8 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b,
/*
* If key values are equal, we sort on ItemPointer. This does not affect
- * validity of the finished index, but it may be useful to have index scans
- * in physical order.
+ * validity of the finished index, but it may be useful to have index
+ * scans in physical order.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
@@ -3140,8 +3141,8 @@ comparetup_index_hash(const SortTuple *a, const SortTuple *b,
/*
* If hash values are equal, we sort on ItemPointer. This does not affect
- * validity of the finished index, but it may be useful to have index scans
- * in physical order.
+ * validity of the finished index, but it may be useful to have index
+ * scans in physical order.
*/
tuple1 = (IndexTuple) a->tuple;
tuple2 = (IndexTuple) b->tuple;
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 2d30f09ac1..8a7931b856 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -569,7 +569,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 574099dc9a..7187ca7c98 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -113,7 +113,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -269,23 +269,23 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
* Now we have to fix what GetSnapshotData did with MyPgXact->xmin and
* TransactionXmin. There is a race condition: to make sure we are not
* causing the global xmin to go backwards, we have to test that the
- * source transaction is still running, and that has to be done atomically.
- * So let procarray.c do it.
+ * source transaction is still running, and that has to be done
+ * atomically. So let procarray.c do it.
*
- * Note: in serializable mode, predicate.c will do this a second time.
- * It doesn't seem worth contorting the logic here to avoid two calls,
+ * Note: in serializable mode, predicate.c will do this a second time. It
+ * doesn't seem worth contorting the logic here to avoid two calls,
* especially since it's not clear that predicate.c *must* do this.
*/
if (!ProcArrayInstallImportedXmin(CurrentSnapshot->xmin, sourcexid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not import the requested snapshot"),
- errdetail("The source transaction %u is not running anymore.",
- sourcexid)));
+ errdetail("The source transaction %u is not running anymore.",
+ sourcexid)));
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -647,8 +647,8 @@ AtEOXact_Snapshot(bool isCommit)
* RegisteredSnapshots to keep the check below happy. But we don't bother
* to do FreeSnapshot, for two reasons: the memory will go away with
* TopTransactionContext anyway, and if someone has left the snapshot
- * stacked as active, we don't want the code below to be chasing through
- * a dangling pointer.
+ * stacked as active, we don't want the code below to be chasing through a
+ * dangling pointer.
*/
if (FirstXactSnapshot != NULL)
{
@@ -668,9 +668,9 @@ AtEOXact_Snapshot(bool isCommit)
char buf[MAXPGPATH];
/*
- * Get rid of the files. Unlink failure is only a WARNING because
- * (1) it's too late to abort the transaction, and (2) leaving a
- * leaked file around has little real consequence anyway.
+ * Get rid of the files. Unlink failure is only a WARNING because (1)
+ * it's too late to abort the transaction, and (2) leaving a leaked
+ * file around has little real consequence anyway.
*/
for (i = 1; i <= list_length(exportedSnapshots); i++)
{
@@ -745,17 +745,17 @@ ExportSnapshot(Snapshot snapshot)
char pathtmp[MAXPGPATH];
/*
- * It's tempting to call RequireTransactionChain here, since it's not
- * very useful to export a snapshot that will disappear immediately
- * afterwards. However, we haven't got enough information to do that,
- * since we don't know if we're at top level or not. For example, we
- * could be inside a plpgsql function that is going to fire off other
- * transactions via dblink. Rather than disallow perfectly legitimate
- * usages, don't make a check.
+ * It's tempting to call RequireTransactionChain here, since it's not very
+ * useful to export a snapshot that will disappear immediately afterwards.
+ * However, we haven't got enough information to do that, since we don't
+ * know if we're at top level or not. For example, we could be inside a
+ * plpgsql function that is going to fire off other transactions via
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * check.
*
* Also note that we don't make any restriction on the transaction's
- * isolation level; however, importers must check the level if they
- * are serializable.
+ * isolation level; however, importers must check the level if they are
+ * serializable.
*/
/*
@@ -798,8 +798,8 @@ ExportSnapshot(Snapshot snapshot)
/*
* Fill buf with a text serialization of the snapshot, plus identification
- * data about this transaction. The format expected by ImportSnapshot
- * is pretty rigid: each line must be fieldname:value.
+ * data about this transaction. The format expected by ImportSnapshot is
+ * pretty rigid: each line must be fieldname:value.
*/
initStringInfo(&buf);
@@ -830,8 +830,8 @@ ExportSnapshot(Snapshot snapshot)
appendStringInfo(&buf, "xip:%u\n", topXid);
/*
- * Similarly, we add our subcommitted child XIDs to the subxid data.
- * Here, we have to cope with possible overflow.
+ * Similarly, we add our subcommitted child XIDs to the subxid data. Here,
+ * we have to cope with possible overflow.
*/
if (snapshot->suboverflowed ||
snapshot->subxcnt + nchildren > GetMaxSnapshotSubxidCount())
@@ -963,16 +963,16 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
- * filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
- * This is called by "SET TRANSACTION SNAPSHOT 'foo'".
+ * Import a previously exported snapshot. The argument should be a
+ * filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
+ * This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
void
ImportSnapshot(const char *idstr)
{
char path[MAXPGPATH];
FILE *f;
- struct stat stat_buf;
+ struct stat stat_buf;
char *filebuf;
int xcnt;
int i;
@@ -985,19 +985,19 @@ ImportSnapshot(const char *idstr)
/*
* Must be at top level of a fresh transaction. Note in particular that
* we check we haven't acquired an XID --- if we have, it's conceivable
- * that the snapshot would show it as not running, making for very
- * screwy behavior.
+ * that the snapshot would show it as not running, making for very screwy
+ * behavior.
*/
if (FirstSnapshotSet ||
GetTopTransactionIdIfAny() != InvalidTransactionId ||
IsSubTransaction())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION SNAPSHOT must be called before any query")));
+ errmsg("SET TRANSACTION SNAPSHOT must be called before any query")));
/*
- * If we are in read committed mode then the next query would execute
- * with a new snapshot thus making this function call quite useless.
+ * If we are in read committed mode then the next query would execute with
+ * a new snapshot thus making this function call quite useless.
*/
if (!IsolationUsesXactSnapshot())
ereport(ERROR,
@@ -1100,8 +1100,8 @@ ImportSnapshot(const char *idstr)
/*
* If we're serializable, the source transaction must be too, otherwise
- * predicate.c has problems (SxactGlobalXmin could go backwards). Also,
- * a non-read-only transaction can't adopt a snapshot from a read-only
+ * predicate.c has problems (SxactGlobalXmin could go backwards). Also, a
+ * non-read-only transaction can't adopt a snapshot from a read-only
* transaction, as predicate.c handles the cases very differently.
*/
if (IsolationIsSerializable())
@@ -1120,15 +1120,15 @@ ImportSnapshot(const char *idstr)
* We cannot import a snapshot that was taken in a different database,
* because vacuum calculates OldestXmin on a per-database basis; so the
* source transaction's xmin doesn't protect us from data loss. This
- * restriction could be removed if the source transaction were to mark
- * its xmin as being globally applicable. But that would require some
+ * restriction could be removed if the source transaction were to mark its
+ * xmin as being globally applicable. But that would require some
* additional syntax, since that has to be known when the snapshot is
* initially taken. (See pgsql-hackers discussion of 2011-10-21.)
*/
if (src_dbid != MyDatabaseId)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot import a snapshot from a different database")));
+ errmsg("cannot import a snapshot from a different database")));
/* OK, install the snapshot */
SetTransactionSnapshot(&snapshot, src_xid);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 01f73980af..4caef9ca25 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -1226,7 +1226,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1235,10 +1235,10 @@ bool
HeapTupleIsSurelyDead(HeapTupleHeader tuple, TransactionId OldestXmin)
{
/*
- * If the inserting transaction is marked invalid, then it aborted,
- * and the tuple is definitely dead. If it's marked neither committed
- * nor invalid, then we assume it's still alive (since the presumption
- * is that all relevant hint bits were just set moments ago).
+ * If the inserting transaction is marked invalid, then it aborted, and
+ * the tuple is definitely dead. If it's marked neither committed nor
+ * invalid, then we assume it's still alive (since the presumption is that
+ * all relevant hint bits were just set moments ago).
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
return (tuple->t_infomask & HEAP_XMIN_INVALID) != 0 ? true : false;
diff --git a/src/bin/initdb/findtimezone.c b/src/bin/initdb/findtimezone.c
index 1ebad182b2..6d6f96add0 100644
--- a/src/bin/initdb/findtimezone.c
+++ b/src/bin/initdb/findtimezone.c
@@ -52,7 +52,7 @@ pg_TZDIR(void)
* This is simpler than the backend function of the same name because
* we assume that the input string has the correct case already, so there
* is no need for case-folding. (This is obviously true if we got the file
- * name from the filesystem to start with. The only other place it can come
+ * name from the filesystem to start with. The only other place it can come
* from is the environment variable TZ, and there seems no need to allow
* case variation in that; other programs aren't likely to.)
*
@@ -571,7 +571,6 @@ scan_available_timezones(char *tzdir, char *tzdirsub, struct tztry * tt,
pgfnames_cleanup(names);
}
-
#else /* WIN32 */
static const struct
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index 2707334dd5..3789948cdf 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -66,32 +66,32 @@ extern const char *select_default_timezone(const char *share_path);
static const char *auth_methods_host[] = {"trust", "reject", "md5", "password", "ident", "radius",
#ifdef ENABLE_GSS
- "gss",
+ "gss",
#endif
#ifdef ENABLE_SSPI
- "sspi",
+ "sspi",
#endif
#ifdef KRB5
- "krb5",
+ "krb5",
#endif
#ifdef USE_PAM
- "pam", "pam ",
+ "pam", "pam ",
#endif
#ifdef USE_LDAP
- "ldap",
+ "ldap",
#endif
#ifdef USE_SSL
- "cert",
+ "cert",
#endif
- NULL};
+NULL};
static const char *auth_methods_local[] = {"trust", "reject", "md5", "password", "peer", "radius",
#ifdef USE_PAM
- "pam", "pam ",
+ "pam", "pam ",
#endif
#ifdef USE_LDAP
- "ldap",
+ "ldap",
#endif
- NULL};
+NULL};
/*
* these values are passed in by makefile defines
@@ -214,7 +214,7 @@ static void check_ok(void);
static char *escape_quotes(const char *src);
static int locale_date_order(const char *locale);
static bool check_locale_name(int category, const char *locale,
- char **canonname);
+ char **canonname);
static bool check_locale_encoding(const char *locale, int encoding);
static void setlocales(void);
static void usage(const char *progname);
@@ -2244,7 +2244,7 @@ locale_date_order(const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a malloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -2548,7 +2548,7 @@ check_authmethod_unspecified(const char **authmethod)
{
authwarning = _("\nWARNING: enabling \"trust\" authentication for local connections\n"
"You can change this by editing pg_hba.conf or using the option -A, or\n"
- "--auth-local and --auth-host, the next time you run initdb.\n");
+ "--auth-local and --auth-host, the next time you run initdb.\n");
*authmethod = "trust";
}
}
@@ -2676,6 +2676,7 @@ main(int argc, char *argv[])
{
case 'A':
authmethodlocal = authmethodhost = xstrdup(optarg);
+
/*
* When ident is specified, use peer for local connections.
* Mirrored, when peer is specified, use ident for TCP/IP
@@ -2760,9 +2761,9 @@ main(int argc, char *argv[])
}
- /*
- * Non-option argument specifies data directory
- * as long as it wasn't already specified with -D / --pgdata
+ /*
+ * Non-option argument specifies data directory as long as it wasn't
+ * already specified with -D / --pgdata
*/
if (optind < argc && strlen(pg_data) == 0)
{
@@ -3017,18 +3018,18 @@ main(int argc, char *argv[])
*/
#ifdef WIN32
printf(_("Encoding \"%s\" implied by locale is not allowed as a server-side encoding.\n"
- "The default database encoding will be set to \"%s\" instead.\n"),
+ "The default database encoding will be set to \"%s\" instead.\n"),
pg_encoding_to_char(ctype_enc),
pg_encoding_to_char(PG_UTF8));
ctype_enc = PG_UTF8;
encodingid = encodingid_to_string(ctype_enc);
#else
fprintf(stderr,
- _("%s: locale \"%s\" requires unsupported encoding \"%s\"\n"),
+ _("%s: locale \"%s\" requires unsupported encoding \"%s\"\n"),
progname, lc_ctype, pg_encoding_to_char(ctype_enc));
fprintf(stderr,
- _("Encoding \"%s\" is not allowed as a server-side encoding.\n"
- "Rerun %s with a different locale selection.\n"),
+ _("Encoding \"%s\" is not allowed as a server-side encoding.\n"
+ "Rerun %s with a different locale selection.\n"),
pg_encoding_to_char(ctype_enc), progname);
exit(1);
#endif
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index c3a0d89897..66a281477a 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -63,6 +63,7 @@ static pid_t bgchild = -1;
/* End position for xlog streaming, empty string if unknown yet */
static XLogRecPtr xlogendptr;
+
#ifndef WIN32
static int has_xlogendptr = 0;
#else
@@ -223,14 +224,14 @@ typedef struct
char xlogdir[MAXPGPATH];
char *sysidentifier;
int timeline;
-} logstreamer_param;
+} logstreamer_param;
static int
-LogStreamerMain(logstreamer_param * param)
+LogStreamerMain(logstreamer_param *param)
{
if (!ReceiveXlogStream(param->bgconn, param->startptr, param->timeline,
param->sysidentifier, param->xlogdir,
- reached_end_position, standby_message_timeout, true))
+ reached_end_position, standby_message_timeout, true))
/*
* Any errors will already have been reported in the function process,
@@ -1092,7 +1093,7 @@ BaseBackup(void)
int status;
int r;
#else
- DWORD status;
+ DWORD status;
#endif
if (verbose)
diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c
index 67a70bcf71..1acb7ccb56 100644
--- a/src/bin/pg_basebackup/pg_receivexlog.c
+++ b/src/bin/pg_basebackup/pg_receivexlog.c
@@ -92,7 +92,7 @@ stop_streaming(XLogRecPtr segendpos, uint32 timeline, bool segment_finished)
/*
* Determine starting location for streaming, based on:
* 1. If there are existing xlog segments, start at the end of the last one
- * that is complete (size matches XLogSegSize)
+ * that is complete (size matches XLogSegSize)
* 2. If no valid xlog exists, start from the beginning of the current
* WAL segment.
*/
@@ -190,9 +190,10 @@ FindStreamingStart(XLogRecPtr currentpos, uint32 currenttimeline)
if (high_log > 0 || high_seg > 0)
{
XLogRecPtr high_ptr;
+
/*
- * Move the starting pointer to the start of the next segment,
- * since the highest one we've seen was completed.
+ * Move the starting pointer to the start of the next segment, since
+ * the highest one we've seen was completed.
*/
NextLogSeg(high_log, high_seg);
@@ -284,7 +285,6 @@ sigint_handler(int signum)
{
time_to_abort = true;
}
-
#endif
int
@@ -413,9 +413,10 @@ main(int argc, char **argv)
{
StreamLog();
if (time_to_abort)
+
/*
- * We've been Ctrl-C'ed. That's not an error, so exit without
- * an errorcode.
+ * We've been Ctrl-C'ed. That's not an error, so exit without an
+ * errorcode.
*/
exit(0);
else if (noloop)
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index a51a40edfd..9dd94e1140 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -52,7 +52,7 @@ open_walfile(XLogRecPtr startpoint, uint32 timeline, char *basedir, char *namebu
{
int f;
char fn[MAXPGPATH];
- struct stat statbuf;
+ struct stat statbuf;
char *zerobuf;
int bytes;
@@ -80,7 +80,7 @@ open_walfile(XLogRecPtr startpoint, uint32 timeline, char *basedir, char *namebu
return -1;
}
if (statbuf.st_size == XLogSegSize)
- return f; /* File is open and ready to use */
+ return f; /* File is open and ready to use */
if (statbuf.st_size != 0)
{
fprintf(stderr, _("%s: WAL segment %s is %d bytes, should be 0 or %d\n"),
@@ -147,8 +147,8 @@ close_walfile(int walfile, char *basedir, char *walname, bool segment_complete)
}
/*
- * Rename the .partial file only if we've completed writing the
- * whole segment or segment_complete is true.
+ * Rename the .partial file only if we've completed writing the whole
+ * segment or segment_complete is true.
*/
if (currpos == XLOG_SEG_SIZE || segment_complete)
{
@@ -202,7 +202,7 @@ localGetCurrentTimestamp(void)
*/
static void
localTimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs)
+ long *secs, int *microsecs)
{
TimestampTz diff = stop_time - start_time;
@@ -229,8 +229,8 @@ localTimestampDifference(TimestampTz start_time, TimestampTz stop_time,
*/
static bool
localTimestampDifferenceExceeds(TimestampTz start_time,
- TimestampTz stop_time,
- int msec)
+ TimestampTz stop_time,
+ int msec)
{
TimestampTz diff = stop_time - start_time;
@@ -392,14 +392,14 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, char *sysi
FD_SET(PQsocket(conn), &input_mask);
if (standby_message_timeout)
{
- TimestampTz targettime;
+ TimestampTz targettime;
targettime = TimestampTzPlusMilliseconds(last_status,
- standby_message_timeout - 1);
+ standby_message_timeout - 1);
localTimestampDifference(now,
targettime,
&timeout.tv_sec,
- (int *)&timeout.tv_usec);
+ (int *) &timeout.tv_usec);
if (timeout.tv_sec <= 0)
timeout.tv_sec = 1; /* Always sleep at least 1 sec */
timeoutptr = &timeout;
@@ -444,9 +444,8 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, char *sysi
if (copybuf[0] == 'k')
{
/*
- * keepalive message, sent in 9.2 and newer. We just ignore
- * this message completely, but need to skip past it in the
- * stream.
+ * keepalive message, sent in 9.2 and newer. We just ignore this
+ * message completely, but need to skip past it in the stream.
*/
if (r != STREAMING_KEEPALIVE_SIZE)
{
diff --git a/src/bin/pg_basebackup/receivelog.h b/src/bin/pg_basebackup/receivelog.h
index 0a803ee4ac..7176a68bea 100644
--- a/src/bin/pg_basebackup/receivelog.h
+++ b/src/bin/pg_basebackup/receivelog.h
@@ -4,13 +4,13 @@
* Called before trying to read more data or when a segment is
* finished. Return true to stop streaming.
*/
-typedef bool (*stream_stop_callback)(XLogRecPtr segendpos, uint32 timeline, bool segment_finished);
+typedef bool (*stream_stop_callback) (XLogRecPtr segendpos, uint32 timeline, bool segment_finished);
extern bool ReceiveXlogStream(PGconn *conn,
- XLogRecPtr startpos,
- uint32 timeline,
- char *sysidentifier,
- char *basedir,
- stream_stop_callback stream_stop,
- int standby_message_timeout,
- bool rename_partial);
+ XLogRecPtr startpos,
+ uint32 timeline,
+ char *sysidentifier,
+ char *basedir,
+ stream_stop_callback stream_stop,
+ int standby_message_timeout,
+ bool rename_partial);
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index 1416faa2e3..1b4a9d240b 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -164,8 +164,8 @@ GetConnection(void)
free(keywords);
/*
- * Ensure we have the same value of integer timestamps as the
- * server we are connecting to.
+ * Ensure we have the same value of integer timestamps as the server
+ * we are connecting to.
*/
tmpparam = PQparameterStatus(tmpconn, "integer_datetimes");
if (!tmpparam)
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 766ba95be1..38828ec914 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -1170,7 +1170,7 @@ do_status(void)
}
}
else
- /* must be a postmaster */
+ /* must be a postmaster */
{
if (postmaster_is_alive((pid_t) pid))
{
@@ -1188,9 +1188,12 @@ do_status(void)
}
}
printf(_("%s: no server running\n"), progname);
+
/*
- * The Linux Standard Base Core Specification 3.1 says this should return '3'
- * http://refspecs.freestandards.org/LSB_3.1.1/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
+ * The Linux Standard Base Core Specification 3.1 says this should return
+ * '3'
+ * http://refspecs.freestandards.org/LSB_3.1.1/LSB-Core-generic/LSB-Core-ge
+ * neric/iniscrptact.html
*/
exit(3);
}
@@ -1851,7 +1854,7 @@ set_sig(char *signame)
sig = SIGABRT;
#if 0
/* probably should NOT provide SIGKILL */
- else if (strcmp(signame,"KILL") == 0)
+ else if (strcmp(signame, "KILL") == 0)
sig = SIGKILL;
#endif
else if (strcmp(signame, "TERM") == 0)
@@ -1894,7 +1897,9 @@ set_starttype(char *starttypeopt)
static void
adjust_data_dir(void)
{
- char cmd[MAXPGPATH], filename[MAXPGPATH], *my_exec_path;
+ char cmd[MAXPGPATH],
+ filename[MAXPGPATH],
+ *my_exec_path;
FILE *fd;
/* If there is no postgresql.conf, it can't be a config-only dir */
@@ -2184,7 +2189,7 @@ main(int argc, char **argv)
}
adjust_data_dir();
-
+
if (pg_config == NULL &&
ctl_command != KILL_COMMAND && ctl_command != UNREGISTER_COMMAND)
{
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index 42d3645a6d..b02217e81d 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -597,8 +597,8 @@ buildIndexArray(void *objArray, int numObjs, Size objSize)
static int
DOCatalogIdCompare(const void *p1, const void *p2)
{
- const DumpableObject *obj1 = *(DumpableObject * const *) p1;
- const DumpableObject *obj2 = *(DumpableObject * const *) p2;
+ const DumpableObject *obj1 = *(DumpableObject *const *) p1;
+ const DumpableObject *obj2 = *(DumpableObject *const *) p2;
int cmpval;
/*
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index f5d7b5d859..8574380b9c 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -33,11 +33,11 @@ const char *progname = NULL;
static struct
{
- on_exit_nicely_callback function;
+ on_exit_nicely_callback function;
void *arg;
-} on_exit_nicely_list[MAX_ON_EXIT_NICELY];
+} on_exit_nicely_list[MAX_ON_EXIT_NICELY];
-static int on_exit_nicely_index;
+static int on_exit_nicely_index;
#define supports_grant_options(version) ((version) >= 70400)
@@ -1221,9 +1221,9 @@ emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer,
int i;
for (i = 0; i < PQntuples(res); i++)
- {
- char *provider = PQgetvalue(res, i, 0);
- char *label = PQgetvalue(res, i, 1);
+ {
+ char *provider = PQgetvalue(res, i, 0);
+ char *label = PQgetvalue(res, i, 1);
/* must use fmtId result before calling it again */
appendPQExpBuffer(buffer,
@@ -1233,7 +1233,7 @@ emitShSecLabels(PGconn *conn, PGresult *res, PQExpBuffer buffer,
" %s IS ",
fmtId(objname));
appendStringLiteralConn(buffer, label, conn);
- appendPQExpBuffer(buffer, ";\n");
+ appendPQExpBuffer(buffer, ";\n");
}
}
@@ -1252,11 +1252,11 @@ set_dump_section(const char *arg, int *dumpSections)
if (*dumpSections == DUMP_UNSECTIONED)
*dumpSections = 0;
- if (strcmp(arg,"pre-data") == 0)
+ if (strcmp(arg, "pre-data") == 0)
*dumpSections |= DUMP_PRE_DATA;
- else if (strcmp(arg,"data") == 0)
+ else if (strcmp(arg, "data") == 0)
*dumpSections |= DUMP_DATA;
- else if (strcmp(arg,"post-data") == 0)
+ else if (strcmp(arg, "post-data") == 0)
*dumpSections |= DUMP_POST_DATA;
else
{
@@ -1304,7 +1304,7 @@ vwrite_msg(const char *modulename, const char *fmt, va_list ap)
/*
- * Fail and die, with a message to stderr. Parameters as for write_msg.
+ * Fail and die, with a message to stderr. Parameters as for write_msg.
*/
void
exit_horribly(const char *modulename, const char *fmt,...)
@@ -1336,11 +1336,11 @@ on_exit_nicely(on_exit_nicely_callback function, void *arg)
void
exit_nicely(int code)
{
- int i;
+ int i;
for (i = on_exit_nicely_index - 1; i >= 0; i--)
- (*on_exit_nicely_list[i].function)(code,
- on_exit_nicely_list[i].arg);
+ (*on_exit_nicely_list[i].function) (code,
+ on_exit_nicely_list[i].arg);
#ifdef WIN32
if (parallel_init_done && GetCurrentThreadId() != mainThreadId)
diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h
index 3d1ed9570a..4ef8cb3a49 100644
--- a/src/bin/pg_dump/dumputils.h
+++ b/src/bin/pg_dump/dumputils.h
@@ -63,12 +63,15 @@ extern void buildShSecLabelQuery(PGconn *conn, const char *catalog_name,
extern void emitShSecLabels(PGconn *conn, PGresult *res,
PQExpBuffer buffer, const char *target, const char *objname);
extern void set_dump_section(const char *arg, int *dumpSections);
-extern void write_msg(const char *modulename, const char *fmt,...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-extern void vwrite_msg(const char *modulename, const char *fmt, va_list ap)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)));
-extern void exit_horribly(const char *modulename, const char *fmt,...)
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3), noreturn));
+extern void
+write_msg(const char *modulename, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+extern void
+vwrite_msg(const char *modulename, const char *fmt, va_list ap)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)));
+extern void
+exit_horribly(const char *modulename, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3), noreturn));
extern void on_exit_nicely(on_exit_nicely_callback function, void *arg);
extern void exit_nicely(int code) __attribute__((noreturn));
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index bf7cc1c1ac..3b49395ecb 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -110,7 +110,7 @@ typedef struct _restoreOptions
const char *filename;
int dataOnly;
int schemaOnly;
- int dumpSections;
+ int dumpSections;
int verbose;
int aclsSkip;
int tocSummary;
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index c049becf12..5826bace68 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -95,7 +95,7 @@ typedef struct _parallel_slot
typedef struct ShutdownInformation
{
ParallelState *pstate;
- Archive *AHX;
+ Archive *AHX;
} ShutdownInformation;
static ShutdownInformation shutdown_info;
@@ -529,8 +529,8 @@ restore_toc_entry(ArchiveHandle *AH, TocEntry *te,
/*
* Ignore DATABASE entry unless we should create it. We must check this
- * here, not in _tocEntryRequired, because the createDB option should
- * not affect emitting a DATABASE entry to an archive file.
+ * here, not in _tocEntryRequired, because the createDB option should not
+ * affect emitting a DATABASE entry to an archive file.
*/
if (!ropt->createDB && strcmp(te->desc, "DATABASE") == 0)
reqs = 0;
@@ -1296,7 +1296,7 @@ RestoreOutput(ArchiveHandle *AH, OutputContext savedContext)
if (res != 0)
exit_horribly(modulename, "could not close output file: %s\n",
- strerror(errno));
+ strerror(errno));
AH->gzOut = savedContext.gzOut;
AH->OF = savedContext.OF;
@@ -1317,8 +1317,8 @@ ahprintf(ArchiveHandle *AH, const char *fmt,...)
/*
* This is paranoid: deal with the possibility that vsnprintf is willing
- * to ignore trailing null or returns > 0 even if string does not fit.
- * It may be the case that it returns cnt = bufsize.
+ * to ignore trailing null or returns > 0 even if string does not fit. It
+ * may be the case that it returns cnt = bufsize.
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
@@ -1456,7 +1456,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
res = fwrite(ptr, size, nmemb, AH->OF);
if (res != nmemb)
exit_horribly(modulename, "could not write to output file: %s\n",
- strerror(errno));
+ strerror(errno));
return res;
}
}
@@ -1465,7 +1465,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH)
/* on some error, we may decide to go on... */
void
warn_or_exit_horribly(ArchiveHandle *AH,
- const char *modulename, const char *fmt,...)
+ const char *modulename, const char *fmt,...)
{
va_list ap;
@@ -1549,7 +1549,7 @@ _moveBefore(ArchiveHandle *AH, TocEntry *pos, TocEntry *te)
* items.
*
* The arrays are indexed by dump ID (so entry zero is unused). Note that the
- * array entries run only up to maxDumpId. We might see dependency dump IDs
+ * array entries run only up to maxDumpId. We might see dependency dump IDs
* beyond that (if the dump was partial); so always check the array bound
* before trying to touch an array entry.
*/
@@ -1573,7 +1573,7 @@ buildTocEntryArrays(ArchiveHandle *AH)
/*
* tableDataId provides the TABLE DATA item's dump ID for each TABLE
- * TOC entry that has a DATA item. We compute this by reversing the
+ * TOC entry that has a DATA item. We compute this by reversing the
* TABLE DATA item's dependency, knowing that a TABLE DATA item has
* just one dependency and it is the TABLE item.
*/
@@ -1925,8 +1925,8 @@ _discoverArchiveFormat(ArchiveHandle *AH)
else
{
/*
- * *Maybe* we have a tar archive format file or a text dump ...
- * So, read first 512 byte header...
+ * *Maybe* we have a tar archive format file or a text dump ... So,
+ * read first 512 byte header...
*/
cnt = fread(&AH->lookahead[AH->lookaheadLen], 1, 512 - AH->lookaheadLen, fh);
AH->lookaheadLen += cnt;
@@ -1935,7 +1935,10 @@ _discoverArchiveFormat(ArchiveHandle *AH)
(strncmp(AH->lookahead, TEXT_DUMP_HEADER, strlen(TEXT_DUMP_HEADER)) == 0 ||
strncmp(AH->lookahead, TEXT_DUMPALL_HEADER, strlen(TEXT_DUMPALL_HEADER)) == 0))
{
- /* looks like it's probably a text format dump. so suggest they try psql */
+ /*
+ * looks like it's probably a text format dump. so suggest they
+ * try psql
+ */
exit_horribly(modulename, "input file appears to be a text format dump. Please use psql.\n");
}
@@ -2217,7 +2220,7 @@ ReadToc(ArchiveHandle *AH)
/* Sanity check */
if (te->dumpId <= 0)
exit_horribly(modulename,
- "entry ID %d out of range -- perhaps a corrupt TOC\n",
+ "entry ID %d out of range -- perhaps a corrupt TOC\n",
te->dumpId);
te->hadDumper = ReadInt(AH);
@@ -2835,8 +2838,8 @@ _selectTablespace(ArchiveHandle *AH, const char *tablespace)
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
warn_or_exit_horribly(AH, modulename,
- "could not set default_tablespace to %s: %s",
- fmtId(want), PQerrorMessage(AH->connection));
+ "could not set default_tablespace to %s: %s",
+ fmtId(want), PQerrorMessage(AH->connection));
PQclear(res);
}
@@ -3043,7 +3046,7 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isDat
if (te->tablespace && !ropt->noTablespace)
{
- char *sanitized_tablespace;
+ char *sanitized_tablespace;
sanitized_tablespace = replace_line_endings(te->tablespace);
ahprintf(AH, "; Tablespace: %s", sanitized_tablespace);
@@ -3150,8 +3153,8 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isDat
static char *
replace_line_endings(const char *str)
{
- char *result;
- char *s;
+ char *result;
+ char *s;
result = pg_strdup(str);
@@ -3381,7 +3384,7 @@ unsetProcessIdentifier(ParallelStateEntry *pse)
static ParallelStateEntry *
GetMyPSEntry(ParallelState *pstate)
{
- int i;
+ int i;
for (i = 0; i < pstate->numWorkers; i++)
#ifdef WIN32
@@ -3509,8 +3512,8 @@ restore_toc_entries_parallel(ArchiveHandle *AH)
DisconnectDatabase(&AH->public);
/*
- * Set the pstate in the shutdown_info. The exit handler uses pstate if set
- * and falls back to AHX otherwise.
+ * Set the pstate in the shutdown_info. The exit handler uses pstate if
+ * set and falls back to AHX otherwise.
*/
shutdown_info.pstate = pstate;
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 4361805baa..8859bd9776 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -256,7 +256,7 @@ typedef struct _archiveHandle
DumpId maxDumpId; /* largest DumpId among all TOC entries */
/* arrays created after the TOC list is complete: */
- struct _tocEntry **tocsByDumpId; /* TOCs indexed by dumpId */
+ struct _tocEntry **tocsByDumpId; /* TOCs indexed by dumpId */
DumpId *tableDataId; /* TABLE DATA ids, indexed by table dumpId */
struct _tocEntry *currToc; /* Used when dumping data */
diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c
index 2156d0f619..204309d6ad 100644
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -466,7 +466,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
else if (!ctx->hasSeek)
exit_horribly(modulename, "could not find block ID %d in archive -- "
"possibly due to out-of-order restore request, "
- "which cannot be handled due to non-seekable input file\n",
+ "which cannot be handled due to non-seekable input file\n",
te->dumpId);
else /* huh, the dataPos led us to EOF? */
exit_horribly(modulename, "could not find block ID %d in archive -- "
@@ -572,10 +572,10 @@ _skipData(ArchiveHandle *AH)
{
if (feof(AH->FH))
exit_horribly(modulename,
- "could not read from input file: end of file\n");
+ "could not read from input file: end of file\n");
else
exit_horribly(modulename,
- "could not read from input file: %s\n", strerror(errno));
+ "could not read from input file: %s\n", strerror(errno));
}
ctx->filePos += blkLen;
@@ -646,7 +646,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
if (res != len)
exit_horribly(modulename,
- "could not write to output file: %s\n", strerror(errno));
+ "could not write to output file: %s\n", strerror(errno));
ctx->filePos += res;
return res;
diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c
index b315e68192..d912aaf77f 100644
--- a/src/bin/pg_dump/pg_backup_db.c
+++ b/src/bin/pg_dump/pg_backup_db.c
@@ -182,7 +182,7 @@ _connectDB(ArchiveHandle *AH, const char *reqdb, const char *requser)
{
if (!PQconnectionNeedsPassword(newConn))
exit_horribly(modulename, "could not reconnect to database: %s",
- PQerrorMessage(newConn));
+ PQerrorMessage(newConn));
PQfinish(newConn);
if (password)
@@ -300,7 +300,7 @@ ConnectDatabase(Archive *AHX,
/* check to see that the backend connection was successfully made */
if (PQstatus(AH->connection) == CONNECTION_BAD)
exit_horribly(modulename, "connection to database \"%s\" failed: %s",
- PQdb(AH->connection), PQerrorMessage(AH->connection));
+ PQdb(AH->connection), PQerrorMessage(AH->connection));
/* check for version mismatch */
_check_database_version(AH);
@@ -313,7 +313,7 @@ DisconnectDatabase(Archive *AHX)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
- PQfinish(AH->connection); /* noop if AH->connection is NULL */
+ PQfinish(AH->connection); /* noop if AH->connection is NULL */
AH->connection = NULL;
}
@@ -343,7 +343,7 @@ die_on_query_failure(ArchiveHandle *AH, const char *modulename, const char *quer
void
ExecuteSqlStatement(Archive *AHX, const char *query)
{
- ArchiveHandle *AH = (ArchiveHandle *) AHX;
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
PGresult *res;
res = PQexec(AH->connection, query);
@@ -355,7 +355,7 @@ ExecuteSqlStatement(Archive *AHX, const char *query)
PGresult *
ExecuteSqlQuery(Archive *AHX, const char *query, ExecStatusType status)
{
- ArchiveHandle *AH = (ArchiveHandle *) AHX;
+ ArchiveHandle *AH = (ArchiveHandle *) AHX;
PGresult *res;
res = PQexec(AH->connection, query);
@@ -436,7 +436,7 @@ ExecuteInsertCommands(ArchiveHandle *AH, const char *buf, size_t bufLen)
for (; qry < eos; qry++)
{
- char ch = *qry;
+ char ch = *qry;
/* For neatness, we skip any newlines between commands */
if (!(ch == '\n' && AH->sqlparse.curCmd->len == 0))
@@ -526,7 +526,7 @@ ExecuteSqlCommandBuf(ArchiveHandle *AH, const char *buf, size_t bufLen)
ExecuteSqlCommand(AH, buf, "could not execute query");
else
{
- char *str = (char *) pg_malloc(bufLen + 1);
+ char *str = (char *) pg_malloc(bufLen + 1);
memcpy(str, buf, bufLen);
str[bufLen] = '\0';
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 629e309abe..32dcb12fdf 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -178,7 +178,7 @@ InitArchiveFmt_Directory(ArchiveHandle *AH)
/* Nothing else in the file, so close it again... */
if (cfclose(tocFH) != 0)
exit_horribly(modulename, "could not close TOC file: %s\n",
- strerror(errno));
+ strerror(errno));
ctx->dataFH = NULL;
}
}
@@ -347,7 +347,7 @@ _PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt)
if (!cfp)
exit_horribly(modulename, "could not open input file \"%s\": %s\n",
- filename, strerror(errno));
+ filename, strerror(errno));
buf = pg_malloc(ZLIB_OUT_SIZE);
buflen = ZLIB_OUT_SIZE;
@@ -356,9 +356,9 @@ _PrintFileData(ArchiveHandle *AH, char *filename, RestoreOptions *ropt)
ahwrite(buf, 1, cnt, AH);
free(buf);
- if (cfclose(cfp) != 0)
+ if (cfclose(cfp) !=0)
exit_horribly(modulename, "could not close data file: %s\n",
- strerror(errno));
+ strerror(errno));
}
/*
@@ -417,7 +417,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt)
}
if (!cfeof(ctx->blobsTocFH))
exit_horribly(modulename, "error reading large object TOC file \"%s\"\n",
- fname);
+ fname);
if (cfclose(ctx->blobsTocFH) != 0)
exit_horribly(modulename, "could not close large object TOC file \"%s\": %s\n",
@@ -478,7 +478,7 @@ _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len)
res = cfwrite(buf, len, ctx->dataFH);
if (res != len)
exit_horribly(modulename, "could not write to output file: %s\n",
- strerror(errno));
+ strerror(errno));
return res;
}
@@ -589,7 +589,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
if (ctx->dataFH == NULL)
exit_horribly(modulename, "could not open output file \"%s\": %s\n",
- fname, strerror(errno));
+ fname, strerror(errno));
}
/*
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index 9fe2b14df5..c5e19968b7 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -111,7 +111,7 @@ static void tarClose(ArchiveHandle *AH, TAR_MEMBER *TH);
#ifdef __NOT_USED__
static char *tarGets(char *buf, size_t len, TAR_MEMBER *th);
#endif
-static int tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt, ...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
+static int tarPrintf(ArchiveHandle *AH, TAR_MEMBER *th, const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
static void _tarAddFile(ArchiveHandle *AH, TAR_MEMBER *th);
static int _tarChecksum(char *th);
@@ -177,7 +177,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
ctx->tarFH = fopen(AH->fSpec, PG_BINARY_W);
if (ctx->tarFH == NULL)
exit_horribly(modulename,
- "could not open TOC file \"%s\" for output: %s\n",
+ "could not open TOC file \"%s\" for output: %s\n",
AH->fSpec, strerror(errno));
}
else
@@ -213,7 +213,7 @@ InitArchiveFmt_Tar(ArchiveHandle *AH)
*/
if (AH->compression != 0)
exit_horribly(modulename,
- "compression is not supported by tar archive format\n");
+ "compression is not supported by tar archive format\n");
}
else
{ /* Read Mode */
@@ -585,7 +585,7 @@ tarWrite(const void *buf, size_t len, TAR_MEMBER *th)
if (res != len)
exit_horribly(modulename,
- "could not write to output file: %s\n", strerror(errno));
+ "could not write to output file: %s\n", strerror(errno));
th->pos += res;
return res;
@@ -1230,7 +1230,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
snprintf(buf2, sizeof(buf2), INT64_FORMAT, (int64) ftello(ctx->tarFHpos));
exit_horribly(modulename,
"mismatch in actual vs. predicted file position (%s vs. %s)\n",
- buf1, buf2);
+ buf1, buf2);
}
#endif
@@ -1245,7 +1245,7 @@ _tarGetHeader(ArchiveHandle *AH, TAR_MEMBER *th)
if (len != 512)
exit_horribly(modulename,
ngettext("incomplete tar header found (%lu byte)\n",
- "incomplete tar header found (%lu bytes)\n",
+ "incomplete tar header found (%lu bytes)\n",
len),
(unsigned long) len);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 3461f3e34c..d9aeee3c8d 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -221,9 +221,9 @@ static char *format_function_arguments_old(Archive *fout,
char **argmodes,
char **argnames);
static char *format_function_signature(Archive *fout,
- FuncInfo *finfo, bool honor_quotes);
+ FuncInfo *finfo, bool honor_quotes);
static const char *convertRegProcReference(Archive *fout,
- const char *proc);
+ const char *proc);
static const char *convertOperatorReference(Archive *fout, const char *opr);
static const char *convertTSFunction(Archive *fout, Oid funcOid);
static Oid findLastBuiltinOid_V71(Archive *fout, const char *);
@@ -232,7 +232,7 @@ static void selectSourceSchema(Archive *fout, const char *schemaName);
static char *getFormattedTypeName(Archive *fout, Oid oid, OidOptions opts);
static char *myFormatType(const char *typname, int32 typmod);
static const char *fmtQualifiedId(Archive *fout,
- const char *schema, const char *id);
+ const char *schema, const char *id);
static void getBlobs(Archive *fout);
static void dumpBlob(Archive *fout, BlobInfo *binfo);
static int dumpBlobs(Archive *fout, void *arg);
@@ -285,7 +285,7 @@ main(int argc, char **argv)
RestoreOptions *ropt;
ArchiveFormat archiveFormat = archUnknown;
ArchiveMode archiveMode;
- Archive *fout; /* the script file */
+ Archive *fout; /* the script file */
static int disable_triggers = 0;
static int outputNoTablespaces = 0;
@@ -495,7 +495,7 @@ main(int argc, char **argv)
use_role = optarg;
break;
- case 4: /* exclude table(s) data */
+ case 4: /* exclude table(s) data */
simple_string_list_append(&tabledata_exclude_patterns, optarg);
break;
@@ -605,7 +605,7 @@ main(int argc, char **argv)
"SERIALIZABLE, READ ONLY, DEFERRABLE");
else
ExecuteSqlStatement(fout,
- "SET TRANSACTION ISOLATION LEVEL "
+ "SET TRANSACTION ISOLATION LEVEL "
"REPEATABLE READ");
}
else
@@ -625,7 +625,7 @@ main(int argc, char **argv)
{
if (fout->remoteVersion >= 70100)
g_last_builtin_oid = findLastBuiltinOid_V71(fout,
- PQdb(GetConnection(fout)));
+ PQdb(GetConnection(fout)));
else
g_last_builtin_oid = findLastBuiltinOid_V70(fout);
if (g_verbose)
@@ -748,7 +748,7 @@ main(int argc, char **argv)
else
ropt->compression = compressLevel;
- ropt->suppressDumpWarnings = true; /* We've already shown them */
+ ropt->suppressDumpWarnings = true; /* We've already shown them */
SetArchiveRestoreOptions(fout, ropt);
@@ -1123,6 +1123,7 @@ selectDumpableType(TypeInfo *tyinfo)
if (tyinfo->isArray)
{
tyinfo->dobj.objType = DO_DUMMY_TYPE;
+
/*
* Fall through to set the dump flag; we assume that the subsequent
* rules will do the same thing as they would for the array's base
@@ -2666,7 +2667,7 @@ findNamespace(Archive *fout, Oid nsoid, Oid objoid)
else
{
/* This code depends on the dummy objects set up by getNamespaces. */
- Oid i;
+ Oid i;
if (objoid > g_last_builtin_oid)
i = 0; /* user object */
@@ -2938,7 +2939,7 @@ getTypes(Archive *fout, int *numTypes)
/*
* If it's a base type, make a DumpableObject representing a shell
* definition of the type. We will need to dump that ahead of the I/O
- * functions for the type. Similarly, range types need a shell
+ * functions for the type. Similarly, range types need a shell
* definition in case they have a canonicalize function.
*
* Note: the shell type doesn't have a catId. You might think it
@@ -3972,7 +3973,7 @@ getTables(Archive *fout, int *numTables)
"SELECT c.tableoid, c.oid, c.relname, "
"c.relacl, c.relkind, c.relnamespace, "
"(%s c.relowner) AS rolname, "
- "c.relchecks, (c.reltriggers <> 0) AS relhastriggers, "
+ "c.relchecks, (c.reltriggers <> 0) AS relhastriggers, "
"c.relhasindex, c.relhasrules, c.relhasoids, "
"c.relfrozenxid, tc.oid AS toid, "
"tc.relfrozenxid AS tfrozenxid, "
@@ -4278,9 +4279,9 @@ getTables(Archive *fout, int *numTables)
resetPQExpBuffer(query);
appendPQExpBuffer(query,
"LOCK TABLE %s IN ACCESS SHARE MODE",
- fmtQualifiedId(fout,
+ fmtQualifiedId(fout,
tblinfo[i].dobj.namespace->dobj.name,
- tblinfo[i].dobj.name));
+ tblinfo[i].dobj.name));
ExecuteSqlStatement(fout, query->data);
}
@@ -4879,7 +4880,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
for (i = 0; i < ntups; i++)
{
- bool validated = PQgetvalue(res, i, 4)[0] == 't';
+ bool validated = PQgetvalue(res, i, 4)[0] == 't';
constrinfo[i].dobj.objType = DO_CONSTRAINT;
constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
@@ -4901,7 +4902,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
/*
* Make the domain depend on the constraint, ensuring it won't be
- * output till any constraint dependencies are OK. If the constraint
+ * output till any constraint dependencies are OK. If the constraint
* has not been validated, it's going to be dumped after the domain
* anyway, so this doesn't matter.
*/
@@ -5625,11 +5626,11 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
"pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
"array_to_string(a.attoptions, ', ') AS attoptions, "
"CASE WHEN a.attcollation <> t.typcollation "
- "THEN a.attcollation ELSE 0 END AS attcollation, "
+ "THEN a.attcollation ELSE 0 END AS attcollation, "
"pg_catalog.array_to_string(ARRAY("
"SELECT pg_catalog.quote_ident(option_name) || "
"' ' || pg_catalog.quote_literal(option_value) "
- "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
+ "FROM pg_catalog.pg_options_to_table(attfdwoptions) "
"ORDER BY option_name"
"), E',\n ') AS attfdwoptions "
"FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
@@ -5654,7 +5655,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
"pg_catalog.format_type(t.oid,a.atttypmod) AS atttypname, "
"array_to_string(a.attoptions, ', ') AS attoptions, "
"CASE WHEN a.attcollation <> t.typcollation "
- "THEN a.attcollation ELSE 0 END AS attcollation, "
+ "THEN a.attcollation ELSE 0 END AS attcollation, "
"NULL AS attfdwoptions "
"FROM pg_catalog.pg_attribute a LEFT JOIN pg_catalog.pg_type t "
"ON a.atttypid = t.oid "
@@ -5898,8 +5899,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Defaults on a VIEW must always be dumped as separate ALTER
* TABLE commands. Defaults on regular tables are dumped as
- * part of the CREATE TABLE if possible, which it won't be
- * if the column is not going to be emitted explicitly.
+ * part of the CREATE TABLE if possible, which it won't be if
+ * the column is not going to be emitted explicitly.
*/
if (tbinfo->relkind == RELKIND_VIEW)
{
@@ -5919,6 +5920,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else
{
attrdefs[j].separate = false;
+
/*
* Mark the default as needing to appear before the table,
* so that any dependencies it has must be emitted before
@@ -6051,7 +6053,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
for (j = 0; j < numConstrs; j++)
{
- bool validated = PQgetvalue(res, j, 5)[0] == 't';
+ bool validated = PQgetvalue(res, j, 5)[0] == 't';
constrs[j].dobj.objType = DO_CONSTRAINT;
constrs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0));
@@ -6068,6 +6070,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
constrs[j].condeferrable = false;
constrs[j].condeferred = false;
constrs[j].conislocal = (PQgetvalue(res, j, 4)[0] == 't');
+
/*
* An unvalidated constraint needs to be dumped separately, so
* that potentially-violating existing data is loaded before
@@ -6081,10 +6084,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
* Mark the constraint as needing to appear before the table
* --- this is so that any other dependencies of the
* constraint will be emitted before we try to create the
- * table. If the constraint is to be dumped separately, it will be
- * dumped after data is loaded anyway, so don't do it. (There's
- * an automatic dependency in the opposite direction anyway, so
- * don't need to add one manually here.)
+ * table. If the constraint is to be dumped separately, it
+ * will be dumped after data is loaded anyway, so don't do it.
+ * (There's an automatic dependency in the opposite direction
+ * anyway, so don't need to add one manually here.)
*/
if (!constrs[j].separate)
addObjectDependency(&tbinfo->dobj,
@@ -6597,7 +6600,7 @@ getForeignServers(Archive *fout, int *numForeignServers)
}
/* Make sure we are in proper schema */
- selectSourceSchema(fout,"pg_catalog");
+ selectSourceSchema(fout, "pg_catalog");
appendPQExpBuffer(query, "SELECT tableoid, oid, srvname, "
"(%s srvowner) AS rolname, "
@@ -7531,7 +7534,7 @@ dumpRangeType(Archive *fout, TypeInfo *tyinfo)
selectSourceSchema(fout, tyinfo->dobj.namespace->dobj.name);
appendPQExpBuffer(query,
- "SELECT pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
+ "SELECT pg_catalog.format_type(rngsubtype, NULL) AS rngsubtype, "
"opc.opcname AS opcname, "
"(SELECT nspname FROM pg_catalog.pg_namespace nsp "
" WHERE nsp.oid = opc.opcnamespace) AS opcnsp, "
@@ -7570,8 +7573,8 @@ dumpRangeType(Archive *fout, TypeInfo *tyinfo)
/* print subtype_opclass only if not default for subtype */
if (PQgetvalue(res, 0, PQfnumber(res, "opcdefault"))[0] != 't')
{
- char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
- char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
+ char *opcname = PQgetvalue(res, 0, PQfnumber(res, "opcname"));
+ char *nspname = PQgetvalue(res, 0, PQfnumber(res, "opcnsp"));
/* always schema-qualify, don't try to be smart */
appendPQExpBuffer(q, ",\n subtype_opclass = %s.",
@@ -9409,12 +9412,12 @@ dumpCast(Archive *fout, CastInfo *cast)
labelq = createPQExpBuffer();
appendPQExpBuffer(delqry, "DROP CAST (%s AS %s);\n",
- getFormattedTypeName(fout, cast->castsource, zeroAsNone),
- getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
appendPQExpBuffer(defqry, "CREATE CAST (%s AS %s) ",
- getFormattedTypeName(fout, cast->castsource, zeroAsNone),
- getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
switch (cast->castmethod)
{
@@ -9427,14 +9430,15 @@ dumpCast(Archive *fout, CastInfo *cast)
case COERCION_METHOD_FUNCTION:
if (funcInfo)
{
- char *fsig = format_function_signature(fout, funcInfo, true);
+ char *fsig = format_function_signature(fout, funcInfo, true);
/*
* Always qualify the function name, in case it is not in
- * pg_catalog schema (format_function_signature won't qualify it).
+ * pg_catalog schema (format_function_signature won't qualify
+ * it).
*/
appendPQExpBuffer(defqry, "WITH FUNCTION %s.%s",
- fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
+ fmtId(funcInfo->dobj.namespace->dobj.name), fsig);
free(fsig);
}
else
@@ -9451,8 +9455,8 @@ dumpCast(Archive *fout, CastInfo *cast)
appendPQExpBuffer(defqry, ";\n");
appendPQExpBuffer(labelq, "CAST (%s AS %s)",
- getFormattedTypeName(fout, cast->castsource, zeroAsNone),
- getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
+ getFormattedTypeName(fout, cast->castsource, zeroAsNone),
+ getFormattedTypeName(fout, cast->casttarget, zeroAsNone));
if (binary_upgrade)
binary_upgrade_extension_member(defqry, &cast->dobj, labelq->data);
@@ -11715,7 +11719,7 @@ dumpACL(Archive *fout, CatalogId objCatId, DumpId objDumpId,
if (!buildACLCommands(name, subname, type, acls, owner,
"", fout->remoteVersion, sql))
exit_horribly(NULL,
- "could not parse ACL list (%s) for object \"%s\" (%s)\n",
+ "could not parse ACL list (%s) for object \"%s\" (%s)\n",
acls, name, type);
if (sql->len > 0)
@@ -12157,10 +12161,10 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
{
if (PQntuples(res) < 1)
exit_horribly(NULL, "query to obtain definition of view \"%s\" returned no data\n",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
else
exit_horribly(NULL, "query to obtain definition of view \"%s\" returned more than one definition\n",
- tbinfo->dobj.name);
+ tbinfo->dobj.name);
}
viewdef = PQgetvalue(res, 0, 0);
@@ -12207,7 +12211,7 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
"pg_catalog.array_to_string(ARRAY("
"SELECT pg_catalog.quote_ident(option_name) || "
"' ' || pg_catalog.quote_literal(option_value) "
- "FROM pg_catalog.pg_options_to_table(ftoptions) "
+ "FROM pg_catalog.pg_options_to_table(ftoptions) "
"ORDER BY option_name"
"), E',\n ') AS ftoptions "
"FROM pg_catalog.pg_foreign_table ft "
@@ -13152,7 +13156,7 @@ findLastBuiltinOid_V70(Archive *fout)
int last_oid;
res = ExecuteSqlQueryForSingleRow(fout,
- "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'");
+ "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'");
last_oid = atooid(PQgetvalue(res, 0, PQfnumber(res, "oid")));
PQclear(res);
return last_oid;
@@ -13882,8 +13886,8 @@ getExtensionMembership(Archive *fout, ExtensionInfo extinfo[],
continue;
/*
- * Note: config tables are dumped without OIDs regardless
- * of the --oids setting. This is because row filtering
+ * Note: config tables are dumped without OIDs regardless of
+ * the --oids setting. This is because row filtering
* conditions aren't compatible with dumping OIDs.
*/
makeTableDataInfo(configtbl, false);
@@ -14284,7 +14288,7 @@ ExecuteSqlQueryForSingleRow(Archive *fout, char *query)
exit_horribly(NULL,
ngettext("query returned %d row instead of one: %s\n",
"query returned %d rows instead of one: %s\n",
- ntups),
+ ntups),
ntups, query);
return res;
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 2a1b4299ca..9a82e4b6c5 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -111,7 +111,7 @@ static bool TopoSort(DumpableObject **objs,
static void addHeapElement(int val, int *heap, int heapLength);
static int removeHeapElement(int *heap, int heapLength);
static void findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs);
-static int findLoop(DumpableObject *obj,
+static int findLoop(DumpableObject *obj,
DumpId startPoint,
bool *processed,
DumpableObject **workspace,
@@ -139,8 +139,8 @@ sortDumpableObjectsByTypeName(DumpableObject **objs, int numObjs)
static int
DOTypeNameCompare(const void *p1, const void *p2)
{
- DumpableObject *obj1 = *(DumpableObject * const *) p1;
- DumpableObject *obj2 = *(DumpableObject * const *) p2;
+ DumpableObject *obj1 = *(DumpableObject *const *) p1;
+ DumpableObject *obj2 = *(DumpableObject *const *) p2;
int cmpval;
/* Sort by type */
@@ -171,8 +171,8 @@ DOTypeNameCompare(const void *p1, const void *p2)
/* To have a stable sort order, break ties for some object types */
if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
{
- FuncInfo *fobj1 = *(FuncInfo * const *) p1;
- FuncInfo *fobj2 = *(FuncInfo * const *) p2;
+ FuncInfo *fobj1 = *(FuncInfo *const *) p1;
+ FuncInfo *fobj2 = *(FuncInfo *const *) p2;
cmpval = fobj1->nargs - fobj2->nargs;
if (cmpval != 0)
@@ -180,8 +180,8 @@ DOTypeNameCompare(const void *p1, const void *p2)
}
else if (obj1->objType == DO_OPERATOR)
{
- OprInfo *oobj1 = *(OprInfo * const *) p1;
- OprInfo *oobj2 = *(OprInfo * const *) p2;
+ OprInfo *oobj1 = *(OprInfo *const *) p1;
+ OprInfo *oobj2 = *(OprInfo *const *) p2;
/* oprkind is 'l', 'r', or 'b'; this sorts prefix, postfix, infix */
cmpval = (oobj2->oprkind - oobj1->oprkind);
@@ -190,8 +190,8 @@ DOTypeNameCompare(const void *p1, const void *p2)
}
else if (obj1->objType == DO_ATTRDEF)
{
- AttrDefInfo *adobj1 = *(AttrDefInfo * const *) p1;
- AttrDefInfo *adobj2 = *(AttrDefInfo * const *) p2;
+ AttrDefInfo *adobj1 = *(AttrDefInfo *const *) p1;
+ AttrDefInfo *adobj2 = *(AttrDefInfo *const *) p2;
cmpval = (adobj1->adnum - adobj2->adnum);
if (cmpval != 0)
@@ -220,8 +220,8 @@ sortDumpableObjectsByTypeOid(DumpableObject **objs, int numObjs)
static int
DOTypeOidCompare(const void *p1, const void *p2)
{
- DumpableObject *obj1 = *(DumpableObject * const *) p1;
- DumpableObject *obj2 = *(DumpableObject * const *) p2;
+ DumpableObject *obj1 = *(DumpableObject *const *) p1;
+ DumpableObject *obj2 = *(DumpableObject *const *) p2;
int cmpval;
cmpval = oldObjectTypePriority[obj1->objType] -
@@ -545,7 +545,7 @@ findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
{
/*
* There's no loop starting at this object, but mark it processed
- * anyway. This is not necessary for correctness, but saves later
+ * anyway. This is not necessary for correctness, but saves later
* invocations of findLoop() from uselessly chasing references to
* such an object.
*/
@@ -587,7 +587,7 @@ findLoop(DumpableObject *obj,
int i;
/*
- * Reject if obj is already processed. This test prevents us from finding
+ * Reject if obj is already processed. This test prevents us from finding
* loops that overlap previously-processed loops.
*/
if (processed[obj->dumpId])
@@ -645,7 +645,7 @@ findLoop(DumpableObject *obj,
* A user-defined datatype will have a dependency loop with each of its
* I/O functions (since those have the datatype as input or output).
* Similarly, a range type will have a loop with its canonicalize function,
- * if any. Break the loop by making the function depend on the associated
+ * if any. Break the loop by making the function depend on the associated
* shell type, instead.
*/
static void
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 34d6920364..053e5fd36a 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -52,8 +52,8 @@ static void doShellQuoting(PQExpBuffer buf, const char *str);
static int runPgDump(const char *dbname);
static void buildShSecLabels(PGconn *conn, const char *catalog_name,
- uint32 objectId, PQExpBuffer buffer,
- const char *target, const char *objname);
+ uint32 objectId, PQExpBuffer buffer,
+ const char *target, const char *objname);
static PGconn *connectDatabase(const char *dbname, const char *pghost, const char *pgport,
const char *pguser, enum trivalue prompt_password, bool fail_on_error);
static PGresult *executeQuery(PGconn *conn, const char *query);
@@ -1663,7 +1663,7 @@ static void
buildShSecLabels(PGconn *conn, const char *catalog_name, uint32 objectId,
PQExpBuffer buffer, const char *target, const char *objname)
{
- PQExpBuffer sql = createPQExpBuffer();
+ PQExpBuffer sql = createPQExpBuffer();
PGresult *res;
buildShSecLabelQuery(conn, catalog_name, objectId, sql);
diff --git a/src/bin/pgevent/pgevent.c b/src/bin/pgevent/pgevent.c
index 669be05c22..91d35b4daf 100644
--- a/src/bin/pgevent/pgevent.c
+++ b/src/bin/pgevent/pgevent.c
@@ -113,7 +113,7 @@ DllRegisterServer(void)
"TypesSupported",
0,
REG_DWORD,
- (LPBYTE) & data,
+ (LPBYTE) &data,
sizeof(DWORD)))
{
MessageBox(NULL, "Could not set the supported types.", "PostgreSQL error", MB_OK | MB_ICONSTOP);
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 8544d15109..5614120255 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -777,7 +777,7 @@ exec_command(const char *cmd,
/* \i and \ir include files */
else if (strcmp(cmd, "i") == 0 || strcmp(cmd, "include") == 0
- || strcmp(cmd, "ir") == 0 || strcmp(cmd, "include_relative") == 0)
+ || strcmp(cmd, "ir") == 0 || strcmp(cmd, "include_relative") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
OT_NORMAL, NULL, true);
@@ -789,7 +789,7 @@ exec_command(const char *cmd,
}
else
{
- bool include_relative;
+ bool include_relative;
include_relative = (strcmp(cmd, "ir") == 0
|| strcmp(cmd, "include_relative") == 0);
@@ -1103,16 +1103,16 @@ exec_command(const char *cmd,
else if (strcmp(cmd, "setenv") == 0)
{
char *envvar = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
char *envval = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
if (!envvar)
{
psql_error("\\%s: missing required argument\n", cmd);
success = false;
}
- else if (strchr(envvar,'=') != NULL)
+ else if (strchr(envvar, '=') != NULL)
{
psql_error("\\%s: environment variable name must not contain \"=\"\n",
cmd);
@@ -1127,16 +1127,17 @@ exec_command(const char *cmd,
else
{
/* Set variable to the value of the next argument */
- int len = strlen(envvar) + strlen(envval) + 1;
+ int len = strlen(envvar) + strlen(envval) + 1;
char *newval = pg_malloc(len + 1);
- snprintf(newval, len+1, "%s=%s", envvar, envval);
+ snprintf(newval, len + 1, "%s=%s", envvar, envval);
putenv(newval);
success = true;
+
/*
- * Do not free newval here, it will screw up the environment
- * if you do. See putenv man page for details. That means we
- * leak a bit of memory here, but not enough to worry about.
+ * Do not free newval here, it will screw up the environment if
+ * you do. See putenv man page for details. That means we leak a
+ * bit of memory here, but not enough to worry about.
*/
}
free(envvar);
@@ -2046,9 +2047,9 @@ process_file(char *filename, bool single_txn, bool use_relative_path)
/*
* If we were asked to resolve the pathname relative to the location
- * of the currently executing script, and there is one, and this is
- * a relative pathname, then prepend all but the last pathname
- * component of the current script to this pathname.
+ * of the currently executing script, and there is one, and this is a
+ * relative pathname, then prepend all but the last pathname component
+ * of the current script to this pathname.
*/
if (use_relative_path && pset.inputfile && !is_absolute_path(filename)
&& !has_drive_prefix(filename))
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 33dc97e95f..3691b507a4 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -707,7 +707,7 @@ ProcessResult(PGresult **results)
/*
* Call PQgetResult() once more. In the typical case of a
- * single-command string, it will return NULL. Otherwise, we'll
+ * single-command string, it will return NULL. Otherwise, we'll
* have other results to process that may include other COPYs.
*/
PQclear(*results);
@@ -982,11 +982,12 @@ SendQuery(const char *query)
break;
case PQTRANS_INTRANS:
+
/*
* Do nothing if they are messing with savepoints themselves:
- * If the user did RELEASE or ROLLBACK, our savepoint is
- * gone. If they issued a SAVEPOINT, releasing ours would
- * remove theirs.
+ * If the user did RELEASE or ROLLBACK, our savepoint is gone.
+ * If they issued a SAVEPOINT, releasing ours would remove
+ * theirs.
*/
if (results &&
(strcmp(PQcmdStatus(results), "SAVEPOINT") == 0 ||
diff --git a/src/bin/psql/copy.c b/src/bin/psql/copy.c
index a1dea9502c..22fcc5975e 100644
--- a/src/bin/psql/copy.c
+++ b/src/bin/psql/copy.c
@@ -394,7 +394,7 @@ handleCopyOut(PGconn *conn, FILE *copystream)
/*
* Check command status and return to normal libpq state. After a
* client-side error, the server will remain ready to deliver data. The
- * cleanest thing is to fully drain and discard that data. If the
+ * cleanest thing is to fully drain and discard that data. If the
* client-side error happened early in a large file, this takes a long
* time. Instead, take advantage of the fact that PQexec() will silently
* end any ongoing PGRES_COPY_OUT state. This does cause us to lose the
@@ -405,7 +405,7 @@ handleCopyOut(PGconn *conn, FILE *copystream)
* We must not ever return with the status still PGRES_COPY_OUT. Our
* caller is unable to distinguish that situation from reaching the next
* COPY in a command string that happened to contain two consecutive COPY
- * TO STDOUT commands. We trust that no condition can make PQexec() fail
+ * TO STDOUT commands. We trust that no condition can make PQexec() fail
* indefinitely while retaining status PGRES_COPY_OUT.
*/
while (res = PQgetResult(conn), PQresultStatus(res) == PGRES_COPY_OUT)
@@ -584,6 +584,7 @@ handleCopyIn(PGconn *conn, FILE *copystream, bool isbinary)
OK = false;
copyin_cleanup:
+
/*
* Check command status and return to normal libpq state
*
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 2cfacd34e3..9170dc6982 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -142,15 +142,15 @@ describeTablespaces(const char *pattern, bool verbose)
if (pset.sversion >= 90200)
printfPQExpBuffer(&buf,
"SELECT spcname AS \"%s\",\n"
- " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
- " pg_catalog.pg_tablespace_location(oid) AS \"%s\"",
+ " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
+ " pg_catalog.pg_tablespace_location(oid) AS \"%s\"",
gettext_noop("Name"),
gettext_noop("Owner"),
gettext_noop("Location"));
else
printfPQExpBuffer(&buf,
"SELECT spcname AS \"%s\",\n"
- " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
+ " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
" spclocation AS \"%s\"",
gettext_noop("Name"),
gettext_noop("Owner"),
@@ -910,7 +910,7 @@ objectDescription(const char *pattern, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBuffer(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "o.opcname", NULL,
@@ -926,7 +926,7 @@ objectDescription(const char *pattern, bool showSystem)
/* Operator family descriptions */
appendPQExpBuffer(&buf,
"UNION ALL\n"
- " SELECT opf.oid as oid, opf.tableoid as tableoid,\n"
+ " SELECT opf.oid as oid, opf.tableoid as tableoid,\n"
" n.nspname as nspname,\n"
" CAST(opf.opfname AS pg_catalog.text) AS name,\n"
" CAST('%s' AS pg_catalog.text) as object\n"
@@ -939,7 +939,7 @@ objectDescription(const char *pattern, bool showSystem)
if (!showSystem && !pattern)
appendPQExpBuffer(&buf, " AND n.nspname <> 'pg_catalog'\n"
- " AND n.nspname <> 'information_schema'\n");
+ " AND n.nspname <> 'information_schema'\n");
processSQLNamePattern(pset.db, &buf, pattern, true, false,
"n.nspname", "opf.opfname", NULL,
@@ -1294,14 +1294,15 @@ describeOneTableDetails(const char *schemaname,
appendPQExpBuffer(&buf, ",\n NULL AS indexdef");
if (tableinfo.relkind == 'f' && pset.sversion >= 90200)
appendPQExpBuffer(&buf, ",\n CASE WHEN attfdwoptions IS NULL THEN '' ELSE "
- " '(' || array_to_string(ARRAY(SELECT quote_ident(option_name) || ' ' || quote_literal(option_value) FROM "
- " pg_options_to_table(attfdwoptions)), ', ') || ')' END AS attfdwoptions");
+ " '(' || array_to_string(ARRAY(SELECT quote_ident(option_name) || ' ' || quote_literal(option_value) FROM "
+ " pg_options_to_table(attfdwoptions)), ', ') || ')' END AS attfdwoptions");
else
appendPQExpBuffer(&buf, ",\n NULL AS attfdwoptions");
if (verbose)
{
appendPQExpBuffer(&buf, ",\n a.attstorage");
appendPQExpBuffer(&buf, ",\n CASE WHEN a.attstattarget=-1 THEN NULL ELSE a.attstattarget END AS attstattarget");
+
/*
* In 9.0+, we have column comments for: relations, views, composite
* types, and foreign tables (c.f. CommentObject() in comment.c).
@@ -1416,7 +1417,7 @@ describeOneTableDetails(const char *schemaname,
PGresult *result;
printfPQExpBuffer(&buf,
- "SELECT pg_catalog.pg_get_viewdef('%s'::pg_catalog.oid, true);",
+ "SELECT pg_catalog.pg_get_viewdef('%s'::pg_catalog.oid, true);",
oid);
result = PSQLexec(buf.data, false);
if (!result)
@@ -1651,13 +1652,13 @@ describeOneTableDetails(const char *schemaname,
"\n pg_catalog.quote_ident(relname) || '.' ||"
"\n pg_catalog.quote_ident(attname)"
"\nFROM pg_catalog.pg_class c"
- "\nINNER JOIN pg_catalog.pg_depend d ON c.oid=d.refobjid"
- "\nINNER JOIN pg_catalog.pg_namespace n ON n.oid=c.relnamespace"
+ "\nINNER JOIN pg_catalog.pg_depend d ON c.oid=d.refobjid"
+ "\nINNER JOIN pg_catalog.pg_namespace n ON n.oid=c.relnamespace"
"\nINNER JOIN pg_catalog.pg_attribute a ON ("
"\n a.attrelid=c.oid AND"
"\n a.attnum=d.refobjsubid)"
- "\nWHERE d.classid='pg_catalog.pg_class'::pg_catalog.regclass"
- "\n AND d.refclassid='pg_catalog.pg_class'::pg_catalog.regclass"
+ "\nWHERE d.classid='pg_catalog.pg_class'::pg_catalog.regclass"
+ "\n AND d.refclassid='pg_catalog.pg_class'::pg_catalog.regclass"
"\n AND d.objid=%s"
"\n AND d.deptype='a'",
oid);
@@ -1671,10 +1672,11 @@ describeOneTableDetails(const char *schemaname,
PQgetvalue(result, 0, 0));
printTableAddFooter(&cont, buf.data);
}
+
/*
- * If we get no rows back, don't show anything (obviously).
- * We should never get more than one row back, but if we do,
- * just ignore it and don't print anything.
+ * If we get no rows back, don't show anything (obviously). We should
+ * never get more than one row back, but if we do, just ignore it and
+ * don't print anything.
*/
PQclear(result);
}
@@ -1711,7 +1713,7 @@ describeOneTableDetails(const char *schemaname,
" LEFT JOIN pg_catalog.pg_constraint con ON (conrelid = i.indrelid AND conindid = i.indexrelid AND contype IN ('p','u','x'))\n");
appendPQExpBuffer(&buf,
"WHERE c.oid = '%s' AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n"
- "ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname;",
+ "ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname;",
oid);
result = PSQLexec(buf.data, false);
if (!result)
@@ -1823,7 +1825,7 @@ describeOneTableDetails(const char *schemaname,
"SELECT conname,\n"
" pg_catalog.pg_get_constraintdef(r.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
- "WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1;",
+ "WHERE r.conrelid = '%s' AND r.contype = 'f' ORDER BY 1;",
oid);
result = PSQLexec(buf.data, false);
if (!result)
@@ -1854,7 +1856,7 @@ describeOneTableDetails(const char *schemaname,
"SELECT conname, conrelid::pg_catalog.regclass,\n"
" pg_catalog.pg_get_constraintdef(c.oid, true) as condef\n"
"FROM pg_catalog.pg_constraint c\n"
- "WHERE c.confrelid = '%s' AND c.contype = 'f' ORDER BY 1;",
+ "WHERE c.confrelid = '%s' AND c.contype = 'f' ORDER BY 1;",
oid);
result = PSQLexec(buf.data, false);
if (!result)
@@ -2105,7 +2107,7 @@ describeOneTableDetails(const char *schemaname,
/* print foreign server name */
if (tableinfo.relkind == 'f')
{
- char *ftoptions;
+ char *ftoptions;
/* Footer information about foreign table */
printfPQExpBuffer(&buf,
@@ -2113,7 +2115,7 @@ describeOneTableDetails(const char *schemaname,
" array_to_string(ARRAY(SELECT "
" quote_ident(option_name) || ' ' || "
" quote_literal(option_value) FROM "
- " pg_options_to_table(ftoptions)), ', ') "
+ " pg_options_to_table(ftoptions)), ', ') "
"FROM pg_catalog.pg_foreign_table f,\n"
" pg_catalog.pg_foreign_server s\n"
"WHERE f.ftrelid = %s AND s.oid = f.ftserver;",
@@ -2841,7 +2843,7 @@ listDomains(const char *pattern, bool verbose, bool showSystem)
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_type t\n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n");
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace\n");
if (verbose)
appendPQExpBuffer(&buf,
@@ -3769,7 +3771,7 @@ listForeignDataWrappers(const char *pattern, bool verbose)
initPQExpBuffer(&buf);
printfPQExpBuffer(&buf,
"SELECT fdw.fdwname AS \"%s\",\n"
- " pg_catalog.pg_get_userbyid(fdw.fdwowner) AS \"%s\",\n",
+ " pg_catalog.pg_get_userbyid(fdw.fdwowner) AS \"%s\",\n",
gettext_noop("Name"),
gettext_noop("Owner"));
if (pset.sversion >= 90100)
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index eff0ea53b6..4a37c3414c 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -124,9 +124,9 @@ usage(void)
printf(_(" -T, --table-attr=TEXT set HTML table tag attributes (e.g., width, border)\n"));
printf(_(" -x, --expanded turn on expanded table output\n"));
printf(_(" -z, --field-separator-zero\n"
- " set field separator to zero byte\n"));
+ " set field separator to zero byte\n"));
printf(_(" -0, --record-separator-zero\n"
- " set record separator to zero byte\n"));
+ " set record separator to zero byte\n"));
printf(_("\nConnection options:\n"));
/* Display default host */
@@ -247,7 +247,7 @@ slashUsage(unsigned short int pager)
ON(pset.popt.topt.tuples_only));
fprintf(output, _(" \\T [STRING] set HTML <table> tag attributes, or unset if none\n"));
fprintf(output, _(" \\x [on|off|auto] toggle expanded output (currently %s)\n"),
- pset.popt.topt.expanded == 2 ? "auto" : ON(pset.popt.topt.expanded));
+ pset.popt.topt.expanded == 2 ? "auto" : ON(pset.popt.topt.expanded));
fprintf(output, "\n");
fprintf(output, _("Connection\n"));
diff --git a/src/bin/psql/input.c b/src/bin/psql/input.c
index 880e7e6511..1a446e2afe 100644
--- a/src/bin/psql/input.c
+++ b/src/bin/psql/input.c
@@ -288,7 +288,8 @@ initializeInput(int flags)
if (histfile == NULL)
{
- char * envhist;
+ char *envhist;
+
envhist = getenv("PSQL_HISTORY");
if (envhist != NULL && strlen(envhist) > 0)
histfile = envhist;
diff --git a/src/bin/psql/print.c b/src/bin/psql/print.c
index c431f6a437..8fa5e37128 100644
--- a/src/bin/psql/print.c
+++ b/src/bin/psql/print.c
@@ -44,8 +44,8 @@ static char *decimal_point;
static char *grouping;
static char *thousands_sep;
-static char default_footer[100];
-static printTableFooter default_footer_cell = { default_footer, NULL };
+static char default_footer[100];
+static printTableFooter default_footer_cell = {default_footer, NULL};
/* Line style control structures */
const printTextFormat pg_asciiformat =
@@ -283,7 +283,7 @@ print_separator(struct separator sep, FILE *fout)
/*
* Return the list of explicitly-requested footers or, when applicable, the
- * default "(xx rows)" footer. Always omit the default footer when given
+ * default "(xx rows)" footer. Always omit the default footer when given
* non-default footers, "\pset footer off", or a specific instruction to that
* effect from a calling backslash command. Vertical formats number each row,
* making the default footer redundant; they do not call this function.
@@ -388,6 +388,7 @@ print_unaligned_text(const printTableContent *cont, FILE *fout)
need_recordsep = true;
}
}
+
/*
* The last record is terminated by a newline, independent of the set
* record separator. But when the record separator is a zero byte, we
diff --git a/src/bin/psql/print.h b/src/bin/psql/print.h
index 25adfc5813..2b2ad0ba4e 100644
--- a/src/bin/psql/print.h
+++ b/src/bin/psql/print.h
@@ -89,7 +89,7 @@ typedef struct printTableOpt
unsigned long prior_records; /* start offset for record counters */
const printTextFormat *line_style; /* line style (NULL for default) */
struct separator fieldSep; /* field separator for unaligned text mode */
- struct separator recordSep; /* record separator for unaligned text mode */
+ struct separator recordSep; /* record separator for unaligned text mode */
bool numericLocale; /* locale-aware numeric units separator and
* decimal marker */
char *tableAttr; /* attributes for HTML <table ...> */
@@ -162,9 +162,9 @@ extern void printTableInit(printTableContent *const content,
const printTableOpt *opt, const char *title,
const int ncolumns, const int nrows);
extern void printTableAddHeader(printTableContent *const content,
- char *header, const bool translate, const char align);
+ char *header, const bool translate, const char align);
extern void printTableAddCell(printTableContent *const content,
- char *cell, const bool translate, const bool mustfree);
+ char *cell, const bool translate, const bool mustfree);
extern void printTableAddFooter(printTableContent *const content,
const char *footer);
extern void printTableSetFooter(printTableContent *const content,
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 1c2a5b3577..9a6306b8cf 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -591,7 +591,7 @@ process_psqlrc(char *argv0)
char rc_file[MAXPGPATH];
char my_exec_path[MAXPGPATH];
char etc_path[MAXPGPATH];
- char *envrc;
+ char *envrc;
find_my_exec(argv0, my_exec_path);
get_etc_path(my_exec_path, etc_path);
@@ -600,7 +600,7 @@ process_psqlrc(char *argv0)
process_psqlrc_file(rc_file);
envrc = getenv("PSQLRC");
-
+
if (envrc != NULL && strlen(envrc) > 0)
{
expand_tilde(&envrc);
@@ -618,7 +618,8 @@ process_psqlrc(char *argv0)
static void
process_psqlrc_file(char *filename)
{
- char *psqlrc_minor, *psqlrc_major;
+ char *psqlrc_minor,
+ *psqlrc_major;
#if defined(WIN32) && (!defined(__MINGW32__))
#define R_OK 4
diff --git a/src/bin/psql/stringutils.c b/src/bin/psql/stringutils.c
index 77387dcf3d..b557c5a6ba 100644
--- a/src/bin/psql/stringutils.c
+++ b/src/bin/psql/stringutils.c
@@ -277,7 +277,7 @@ strip_quotes(char *source, char quote, char escape, int encoding)
/*
* quote_if_needed
*
- * Opposite of strip_quotes(). If "source" denotes itself literally without
+ * Opposite of strip_quotes(). If "source" denotes itself literally without
* quoting or escaping, returns NULL. Otherwise, returns a malloc'd copy with
* quoting and escaping applied:
*
@@ -303,7 +303,7 @@ quote_if_needed(const char *source, const char *entails_quote,
psql_assert(quote);
src = source;
- dst = ret = pg_malloc(2 * strlen(src) + 3); /* excess */
+ dst = ret = pg_malloc(2 * strlen(src) + 3); /* excess */
*dst++ = quote;
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index a50e7356f1..061acd13b2 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -132,7 +132,7 @@ static const char *const * completion_charpp; /* to pass a list of strings */
static const char *completion_info_charp; /* to pass a second string */
static const char *completion_info_charp2; /* to pass a third string */
static const SchemaQuery *completion_squery; /* to pass a SchemaQuery */
-static bool completion_case_sensitive; /* completion is case sensitive */
+static bool completion_case_sensitive; /* completion is case sensitive */
/*
* A few macros to ease typing. You can use these to complete the given
@@ -790,9 +790,9 @@ psql_completion(char *text, int start, int end)
completion_info_charp2 = NULL;
/*
- * Scan the input line before our current position for the last few
- * words. According to those we'll make some smart decisions on what the
- * user is probably intending to type.
+ * Scan the input line before our current position for the last few words.
+ * According to those we'll make some smart decisions on what the user is
+ * probably intending to type.
*/
get_previous_words(start, previous_words, lengthof(previous_words));
@@ -1041,7 +1041,7 @@ psql_completion(char *text, int start, int end)
"ENCRYPTED", "INHERIT", "LOGIN", "NOCREATEDB", "NOCREATEROLE",
"NOCREATEUSER", "NOINHERIT", "NOLOGIN", "NOREPLICATION",
"NOSUPERUSER", "RENAME TO", "REPLICATION", "RESET", "SET",
- "SUPERUSER", "UNENCRYPTED", "VALID UNTIL", NULL};
+ "SUPERUSER", "UNENCRYPTED", "VALID UNTIL", NULL};
COMPLETE_WITH_LIST(list_ALTERUSER_WITH);
}
@@ -2017,7 +2017,7 @@ psql_completion(char *text, int start, int end)
"ENCRYPTED", "IN", "INHERIT", "LOGIN", "NOCREATEDB",
"NOCREATEROLE", "NOCREATEUSER", "NOINHERIT", "NOLOGIN",
"NOREPLICATION", "NOSUPERUSER", "REPLICATION", "ROLE",
- "SUPERUSER", "SYSID", "UNENCRYPTED", "VALID UNTIL", NULL};
+ "SUPERUSER", "SYSID", "UNENCRYPTED", "VALID UNTIL", NULL};
COMPLETE_WITH_LIST(list_CREATEROLE_WITH);
}
@@ -2317,7 +2317,11 @@ psql_completion(char *text, int start, int end)
" UNION SELECT 'USAGE'"
" UNION SELECT 'ALL'");
}
- /* Complete GRANT/REVOKE <privilege> with "ON", GRANT/REVOKE <role> with TO/FROM */
+
+ /*
+ * Complete GRANT/REVOKE <privilege> with "ON", GRANT/REVOKE <role> with
+ * TO/FROM
+ */
else if (pg_strcasecmp(prev2_wd, "GRANT") == 0 ||
pg_strcasecmp(prev2_wd, "REVOKE") == 0)
{
@@ -2901,8 +2905,11 @@ psql_completion(char *text, int start, int end)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* WITH [RECURSIVE] */
- /* Only match when WITH is the first word, as WITH may appear in many other
- contexts. */
+
+ /*
+ * Only match when WITH is the first word, as WITH may appear in many
+ * other contexts.
+ */
else if (pg_strcasecmp(prev_wd, "WITH") == 0 &&
prev2_wd[0] == '\0')
COMPLETE_WITH_CONST("RECURSIVE");
@@ -3029,7 +3036,7 @@ psql_completion(char *text, int start, int end)
strcmp(prev_wd, "\\e") == 0 || strcmp(prev_wd, "\\edit") == 0 ||
strcmp(prev_wd, "\\g") == 0 ||
strcmp(prev_wd, "\\i") == 0 || strcmp(prev_wd, "\\include") == 0 ||
- strcmp(prev_wd, "\\ir") == 0 || strcmp(prev_wd, "\\include_relative") == 0 ||
+ strcmp(prev_wd, "\\ir") == 0 || strcmp(prev_wd, "\\include_relative") == 0 ||
strcmp(prev_wd, "\\o") == 0 || strcmp(prev_wd, "\\out") == 0 ||
strcmp(prev_wd, "\\s") == 0 ||
strcmp(prev_wd, "\\w") == 0 || strcmp(prev_wd, "\\write") == 0
@@ -3412,8 +3419,11 @@ complete_from_list(const char *text, int state)
if (completion_case_sensitive)
return pg_strdup(item);
else
- /* If case insensitive matching was requested initially, adjust
- * the case according to setting. */
+
+ /*
+ * If case insensitive matching was requested initially,
+ * adjust the case according to setting.
+ */
return pg_strdup_keyword_case(item, text);
}
}
@@ -3451,8 +3461,11 @@ complete_from_const(const char *text, int state)
if (completion_case_sensitive)
return pg_strdup(completion_charp);
else
- /* If case insensitive matching was requested initially, adjust the
- * case according to setting. */
+
+ /*
+ * If case insensitive matching was requested initially, adjust
+ * the case according to setting.
+ */
return pg_strdup_keyword_case(completion_charp, text);
}
else
@@ -3500,7 +3513,7 @@ complete_from_variables(char *text, const char *prefix, const char *suffix)
}
varnames[nvars] = NULL;
- COMPLETE_WITH_LIST_CS((const char * const *) varnames);
+ COMPLETE_WITH_LIST_CS((const char *const *) varnames);
for (i = 0; i < nvars; i++)
free(varnames[i]);
@@ -3567,9 +3580,10 @@ complete_from_files(const char *text, int state)
static char *
pg_strdup_keyword_case(const char *s, const char *ref)
{
- char *ret, *p;
+ char *ret,
+ *p;
unsigned char first = ref[0];
- int tocase;
+ int tocase;
const char *varval;
varval = GetVariable(pset.vars, "COMP_KEYWORD_CASE");
@@ -3635,7 +3649,7 @@ exec_query(const char *query)
/*
- * Return the nwords word(s) before point. Words are returned right to left,
+ * Return the nwords word(s) before point. Words are returned right to left,
* that is, previous_words[0] gets the last word before point.
* If we run out of words, remaining array elements are set to empty strings.
* Each array element is filled with a malloc'd string.
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index 33d08176d0..5e41efc5bd 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -15,7 +15,7 @@
* Check whether a variable's name is allowed.
*
* We allow any non-ASCII character, as well as ASCII letters, digits, and
- * underscore. Keep this in sync with the definition of variable_char in
+ * underscore. Keep this in sync with the definition of variable_char in
* psqlscan.l.
*/
static bool
diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c
index 0f711e870b..76de70b6ef 100644
--- a/src/bin/scripts/clusterdb.c
+++ b/src/bin/scripts/clusterdb.c
@@ -112,9 +112,9 @@ main(int argc, char *argv[])
}
}
- /*
- * Non-option argument specifies database name
- * as long as it wasn't already specified with -d / --dbname
+ /*
+ * Non-option argument specifies database name as long as it wasn't
+ * already specified with -d / --dbname
*/
if (optind < argc && dbname == NULL)
{
diff --git a/src/bin/scripts/common.c b/src/bin/scripts/common.c
index 5406a98c83..0ae708b21e 100644
--- a/src/bin/scripts/common.c
+++ b/src/bin/scripts/common.c
@@ -185,7 +185,7 @@ connectMaintenanceDatabase(const char *maintenance_db, const char *pghost,
enum trivalue prompt_password,
const char *progname)
{
- PGconn *conn;
+ PGconn *conn;
/* If a maintenance database name was specified, just connect to it. */
if (maintenance_db)
diff --git a/src/bin/scripts/common.h b/src/bin/scripts/common.h
index 229b8dc596..854bc2f03a 100644
--- a/src/bin/scripts/common.h
+++ b/src/bin/scripts/common.h
@@ -10,8 +10,8 @@
#define COMMON_H
#include "libpq-fe.h"
-#include "getopt_long.h" /* pgrminclude ignore */
-#include "pqexpbuffer.h" /* pgrminclude ignore */
+#include "getopt_long.h" /* pgrminclude ignore */
+#include "pqexpbuffer.h" /* pgrminclude ignore */
enum trivalue
{
@@ -34,8 +34,8 @@ extern PGconn *connectDatabase(const char *dbname, const char *pghost,
bool fail_ok);
extern PGconn *connectMaintenanceDatabase(const char *maintenance_db,
- const char *pghost, const char *pgport, const char *pguser,
- enum trivalue prompt_password, const char *progname);
+ const char *pghost, const char *pgport, const char *pguser,
+ enum trivalue prompt_password, const char *progname);
extern PGresult *executeQuery(PGconn *conn, const char *query,
const char *progname, bool echo);
diff --git a/src/bin/scripts/createlang.c b/src/bin/scripts/createlang.c
index cc671a4976..8268eda6ed 100644
--- a/src/bin/scripts/createlang.c
+++ b/src/bin/scripts/createlang.c
@@ -92,10 +92,9 @@ main(int argc, char *argv[])
}
/*
- * We set dbname from positional arguments if it is not
- * already set by option arguments -d. If not doing
- * listlangs, positional dbname must follow positional
- * langname.
+ * We set dbname from positional arguments if it is not already set by
+ * option arguments -d. If not doing listlangs, positional dbname must
+ * follow positional langname.
*/
if (argc - optind > 0)
diff --git a/src/bin/scripts/dropdb.c b/src/bin/scripts/dropdb.c
index 444dce500e..10ed67df8b 100644
--- a/src/bin/scripts/dropdb.c
+++ b/src/bin/scripts/dropdb.c
@@ -21,7 +21,7 @@ static void help(const char *progname);
int
main(int argc, char *argv[])
{
- static int if_exists = 0;
+ static int if_exists = 0;
static struct option long_options[] = {
{"host", required_argument, NULL, 'h'},
@@ -129,7 +129,7 @@ main(int argc, char *argv[])
maintenance_db = "template1";
conn = connectMaintenanceDatabase(maintenance_db,
- host, port, username, prompt_password, progname);
+ host, port, username, prompt_password, progname);
if (echo)
printf("%s", sql.data);
diff --git a/src/bin/scripts/droplang.c b/src/bin/scripts/droplang.c
index 47ec37f5e9..74553c19fd 100644
--- a/src/bin/scripts/droplang.c
+++ b/src/bin/scripts/droplang.c
@@ -91,10 +91,9 @@ main(int argc, char *argv[])
}
/*
- * We set dbname from positional arguments if it is not
- * already set by option arguments -d. If not doing
- * listlangs, positional dbname must follow positional
- * langname.
+ * We set dbname from positional arguments if it is not already set by
+ * option arguments -d. If not doing listlangs, positional dbname must
+ * follow positional langname.
*/
if (argc - optind > 0)
diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c
index 614a6885a0..35254f20cb 100644
--- a/src/bin/scripts/reindexdb.c
+++ b/src/bin/scripts/reindexdb.c
@@ -122,9 +122,9 @@ main(int argc, char *argv[])
}
}
- /*
- * Non-option argument specifies database name
- * as long as it wasn't already specified with -d / --dbname
+ /*
+ * Non-option argument specifies database name as long as it wasn't
+ * already specified with -d / --dbname
*/
if (optind < argc && dbname == NULL)
{
diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c
index fe303ad885..6221bdc30d 100644
--- a/src/bin/scripts/vacuumdb.c
+++ b/src/bin/scripts/vacuumdb.c
@@ -135,10 +135,10 @@ main(int argc, char *argv[])
}
}
-
- /*
- * Non-option argument specifies database name
- * as long as it wasn't already specified with -d / --dbname
+
+ /*
+ * Non-option argument specifies database name as long as it wasn't
+ * already specified with -d / --dbname
*/
if (optind < argc && dbname == NULL)
{
@@ -312,7 +312,7 @@ vacuum_all_databases(bool full, bool verbose, bool and_analyze, bool analyze_onl
int i;
conn = connectMaintenanceDatabase(maintenance_db, host, port,
- username, prompt_password, progname);
+ username, prompt_password, progname);
result = executeQuery(conn, "SELECT datname FROM pg_database WHERE datallowconn ORDER BY 1;", progname, echo);
PQfinish(conn);
diff --git a/src/include/access/gist_private.h b/src/include/access/gist_private.h
index 5ad9858c22..9af9a0cf8c 100644
--- a/src/include/access/gist_private.h
+++ b/src/include/access/gist_private.h
@@ -408,7 +408,7 @@ typedef struct GiSTOptions
int32 vl_len_; /* varlena header (do not touch directly!) */
int fillfactor; /* page fill factor in percent (0..100) */
int bufferingModeOffset; /* use buffering build? */
-} GiSTOptions;
+} GiSTOptions;
/* gist.c */
extern Datum gistbuildempty(PG_FUNCTION_ARGS);
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index d554392e5a..026a19fa74 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -113,7 +113,7 @@ extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple,
extern void heap_inplace_update(Relation relation, HeapTuple tuple);
extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid);
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
- Buffer buf);
+ Buffer buf);
extern Oid simple_heap_insert(Relation relation, HeapTuple tup);
extern void simple_heap_delete(Relation relation, ItemPointer tid);
diff --git a/src/include/access/htup.h b/src/include/access/htup.h
index 39213ff849..b289e14926 100644
--- a/src/include/access/htup.h
+++ b/src/include/access/htup.h
@@ -608,7 +608,7 @@ typedef HeapTupleData *HeapTuple;
/* 0x20 is free, was XLOG_HEAP2_CLEAN_MOVE */
#define XLOG_HEAP2_CLEANUP_INFO 0x30
#define XLOG_HEAP2_VISIBLE 0x40
-#define XLOG_HEAP2_MULTI_INSERT 0x50
+#define XLOG_HEAP2_MULTI_INSERT 0x50
/*
* All what we need to find changed tuple
@@ -671,7 +671,7 @@ typedef struct xl_heap_insert
typedef struct xl_heap_multi_insert
{
RelFileNode node;
- BlockNumber blkno;
+ BlockNumber blkno;
bool all_visible_cleared;
uint16 ntuples;
OffsetNumber offsets[1];
@@ -683,7 +683,7 @@ typedef struct xl_heap_multi_insert
typedef struct xl_multi_insert_tuple
{
- uint16 datalen; /* size of tuple data that follows */
+ uint16 datalen; /* size of tuple data that follows */
uint16 t_infomask2;
uint16 t_infomask;
uint8 t_hoff;
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index cae51a384d..f23ac3559a 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -418,12 +418,12 @@ typedef struct xl_btree_newroot
/*
* When a new operator class is declared, we require that the user
* supply us with an amproc procedure (BTORDER_PROC) for determining
- * whether, for two keys a and b, a < b, a = b, or a > b. This routine
+ * whether, for two keys a and b, a < b, a = b, or a > b. This routine
* must return < 0, 0, > 0, respectively, in these three cases. (It must
* not return INT_MIN, since we may negate the result before using it.)
*
* To facilitate accelerated sorting, an operator class may choose to
- * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
+ * offer a second procedure (BTSORTSUPPORT_PROC). For full details, see
* src/include/utils/sortsupport.h.
*/
@@ -551,7 +551,7 @@ typedef struct BTScanOpaqueData
int numArrayKeys; /* number of equality-type array keys (-1 if
* there are any unsatisfiable array keys) */
BTArrayKeyInfo *arrayKeys; /* info about each equality-type array key */
- MemoryContext arrayContext; /* scan-lifespan context for array data */
+ MemoryContext arrayContext; /* scan-lifespan context for array data */
/* info about killed items if any (killedItems is NULL if never used) */
int *killedItems; /* currPos.items indexes of killed items */
@@ -559,8 +559,8 @@ typedef struct BTScanOpaqueData
/*
* If we are doing an index-only scan, these are the tuple storage
- * workspaces for the currPos and markPos respectively. Each is of
- * size BLCKSZ, so it can hold as much as a full page's worth of tuples.
+ * workspaces for the currPos and markPos respectively. Each is of size
+ * BLCKSZ, so it can hold as much as a full page's worth of tuples.
*/
char *currTuples; /* tuple storage for currPos */
char *markTuples; /* tuple storage for markPos */
diff --git a/src/include/access/slru.h b/src/include/access/slru.h
index 41cd484d80..711601ae62 100644
--- a/src/include/access/slru.h
+++ b/src/include/access/slru.h
@@ -147,7 +147,7 @@ extern void SimpleLruFlush(SlruCtl ctl, bool checkpoint);
extern void SimpleLruTruncate(SlruCtl ctl, int cutoffPage);
typedef bool (*SlruScanCallback) (SlruCtl ctl, char *filename, int segpage,
- void *data);
+ void *data);
extern bool SlruScanDirectory(SlruCtl ctl, SlruScanCallback callback, void *data);
/* SlruScanDirectory public callbacks */
diff --git a/src/include/access/spgist.h b/src/include/access/spgist.h
index 8d0205e691..50cac280a5 100644
--- a/src/include/access/spgist.h
+++ b/src/include/access/spgist.h
@@ -78,25 +78,25 @@ typedef struct spgChooseOut
{
struct /* results for spgMatchNode */
{
- int nodeN; /* descend to this node (index from 0) */
- int levelAdd; /* increment level by this much */
- Datum restDatum; /* new leaf datum */
+ int nodeN; /* descend to this node (index from 0) */
+ int levelAdd; /* increment level by this much */
+ Datum restDatum; /* new leaf datum */
} matchNode;
struct /* results for spgAddNode */
{
- Datum nodeLabel; /* new node's label */
- int nodeN; /* where to insert it (index from 0) */
+ Datum nodeLabel; /* new node's label */
+ int nodeN; /* where to insert it (index from 0) */
} addNode;
struct /* results for spgSplitTuple */
{
/* Info to form new inner tuple with one node */
- bool prefixHasPrefix; /* tuple should have a prefix? */
- Datum prefixPrefixDatum; /* if so, its value */
- Datum nodeLabel; /* node's label */
+ bool prefixHasPrefix; /* tuple should have a prefix? */
+ Datum prefixPrefixDatum; /* if so, its value */
+ Datum nodeLabel; /* node's label */
/* Info to form new lower-level inner tuple with all old nodes */
- bool postfixHasPrefix; /* tuple should have a prefix? */
- Datum postfixPrefixDatum; /* if so, its value */
+ bool postfixHasPrefix; /* tuple should have a prefix? */
+ Datum postfixPrefixDatum; /* if so, its value */
} splitTuple;
} result;
} spgChooseOut;
@@ -119,7 +119,7 @@ typedef struct spgPickSplitOut
int nNodes; /* number of nodes for new inner tuple */
Datum *nodeLabels; /* their labels (or NULL for no labels) */
- int *mapTuplesToNodes; /* node index for each leaf tuple */
+ int *mapTuplesToNodes; /* node index for each leaf tuple */
Datum *leafTupleDatums; /* datum to store in each new leaf tuple */
} spgPickSplitOut;
diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h
index aa5a602418..74267a4390 100644
--- a/src/include/access/spgist_private.h
+++ b/src/include/access/spgist_private.h
@@ -24,7 +24,7 @@
#define SPGIST_METAPAGE_BLKNO (0) /* metapage */
#define SPGIST_ROOT_BLKNO (1) /* root for normal entries */
#define SPGIST_NULL_BLKNO (2) /* root for null-value entries */
-#define SPGIST_LAST_FIXED_BLKNO SPGIST_NULL_BLKNO
+#define SPGIST_LAST_FIXED_BLKNO SPGIST_NULL_BLKNO
#define SpGistBlockIsRoot(blkno) \
((blkno) == SPGIST_ROOT_BLKNO || (blkno) == SPGIST_NULL_BLKNO)
@@ -40,7 +40,7 @@ typedef struct SpGistPageOpaqueData
uint16 nRedirection; /* number of redirection tuples on page */
uint16 nPlaceholder; /* number of placeholder tuples on page */
/* note there's no count of either LIVE or DEAD tuples ... */
- uint16 spgist_page_id; /* for identification of SP-GiST indexes */
+ uint16 spgist_page_id; /* for identification of SP-GiST indexes */
} SpGistPageOpaqueData;
typedef SpGistPageOpaqueData *SpGistPageOpaque;
@@ -91,7 +91,7 @@ typedef struct SpGistLUPCache
typedef struct SpGistMetaPageData
{
uint32 magicNumber; /* for identity cross-check */
- SpGistLUPCache lastUsedPages; /* shared storage of last-used info */
+ SpGistLUPCache lastUsedPages; /* shared storage of last-used info */
} SpGistMetaPageData;
#define SPGIST_MAGIC_NUMBER (0xBA0BABEE)
@@ -116,11 +116,11 @@ typedef struct SpGistState
{
spgConfigOut config; /* filled in by opclass config method */
- SpGistTypeDesc attType; /* type of input data and leaf values */
- SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
+ SpGistTypeDesc attType; /* type of input data and leaf values */
+ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
SpGistTypeDesc attLabelType; /* type of node label values */
- char *deadTupleStorage; /* workspace for spgFormDeadTuple */
+ char *deadTupleStorage; /* workspace for spgFormDeadTuple */
TransactionId myXid; /* XID to use when creating a redirect tuple */
bool isBuild; /* true if doing index build */
@@ -136,7 +136,7 @@ typedef struct SpGistScanOpaqueData
/* Control flags showing whether to search nulls and/or non-nulls */
bool searchNulls; /* scan matches (all) null entries */
- bool searchNonNulls; /* scan matches (some) non-null entries */
+ bool searchNonNulls; /* scan matches (some) non-null entries */
/* Index quals to be passed to opclass (null-related quals removed) */
int numberOfKeys; /* number of index qualifier conditions */
@@ -154,14 +154,14 @@ typedef struct SpGistScanOpaqueData
TupleDesc indexTupDesc; /* if so, tuple descriptor for them */
int nPtrs; /* number of TIDs found on current page */
int iPtr; /* index for scanning through same */
- ItemPointerData heapPtrs[MaxIndexTuplesPerPage]; /* TIDs from cur page */
- bool recheck[MaxIndexTuplesPerPage]; /* their recheck flags */
- IndexTuple indexTups[MaxIndexTuplesPerPage]; /* reconstructed tuples */
+ ItemPointerData heapPtrs[MaxIndexTuplesPerPage]; /* TIDs from cur page */
+ bool recheck[MaxIndexTuplesPerPage]; /* their recheck flags */
+ IndexTuple indexTups[MaxIndexTuplesPerPage]; /* reconstructed tuples */
/*
* Note: using MaxIndexTuplesPerPage above is a bit hokey since
- * SpGistLeafTuples aren't exactly IndexTuples; however, they are
- * larger, so this is safe.
+ * SpGistLeafTuples aren't exactly IndexTuples; however, they are larger,
+ * so this is safe.
*/
} SpGistScanOpaqueData;
@@ -175,17 +175,17 @@ typedef struct SpGistCache
{
spgConfigOut config; /* filled in by opclass config method */
- SpGistTypeDesc attType; /* type of input data and leaf values */
- SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
+ SpGistTypeDesc attType; /* type of input data and leaf values */
+ SpGistTypeDesc attPrefixType; /* type of inner-tuple prefix values */
SpGistTypeDesc attLabelType; /* type of node label values */
- SpGistLUPCache lastUsedPages; /* local storage of last-used info */
+ SpGistLUPCache lastUsedPages; /* local storage of last-used info */
} SpGistCache;
/*
- * SPGiST tuple types. Note: inner, leaf, and dead tuple structs
- * must have the same tupstate field in the same position! Real inner and
+ * SPGiST tuple types. Note: inner, leaf, and dead tuple structs
+ * must have the same tupstate field in the same position! Real inner and
* leaf tuples always have tupstate = LIVE; if the state is something else,
* use the SpGistDeadTuple struct to inspect the tuple.
*/
@@ -353,7 +353,7 @@ typedef SpGistDeadTupleData *SpGistDeadTuple;
* ACCEPT_RDATA_* can only use fixed-length rdata arrays, because of lengthof
*/
-#define ACCEPT_RDATA_DATA(p, s, i) \
+#define ACCEPT_RDATA_DATA(p, s, i) \
do { \
Assert((i) < lengthof(rdata)); \
rdata[i].data = (char *) (p); \
@@ -387,7 +387,7 @@ typedef SpGistDeadTupleData *SpGistDeadTuple;
#define XLOG_SPGIST_PICKSPLIT 0x50
#define XLOG_SPGIST_VACUUM_LEAF 0x60
#define XLOG_SPGIST_VACUUM_ROOT 0x70
-#define XLOG_SPGIST_VACUUM_REDIRECT 0x80
+#define XLOG_SPGIST_VACUUM_REDIRECT 0x80
/*
* Some redo functions need an SpGistState, although only a few of its fields
@@ -415,7 +415,7 @@ typedef struct spgxlogAddLeaf
bool newPage; /* init dest page? */
bool storesNulls; /* page is in the nulls tree? */
OffsetNumber offnumLeaf; /* offset where leaf tuple gets placed */
- OffsetNumber offnumHeadLeaf; /* offset of head tuple in chain, if any */
+ OffsetNumber offnumHeadLeaf; /* offset of head tuple in chain, if any */
BlockNumber blknoParent; /* where the parent downlink is, if any */
OffsetNumber offnumParent;
@@ -589,7 +589,7 @@ typedef struct spgxlogVacuumRedirect
RelFileNode node;
BlockNumber blkno; /* block number to clean */
- uint16 nToPlaceholder; /* number of redirects to make placeholders */
+ uint16 nToPlaceholder; /* number of redirects to make placeholders */
OffsetNumber firstPlaceholder; /* first placeholder tuple to remove */
/* offsets of redirect tuples to make placeholders follow */
@@ -620,24 +620,24 @@ extern void initSpGistState(SpGistState *state, Relation index);
extern Buffer SpGistNewBuffer(Relation index);
extern void SpGistUpdateMetaPage(Relation index);
extern Buffer SpGistGetBuffer(Relation index, int flags,
- int needSpace, bool *isNew);
+ int needSpace, bool *isNew);
extern void SpGistSetLastUsedPage(Relation index, Buffer buffer);
extern void SpGistInitPage(Page page, uint16 f);
extern void SpGistInitBuffer(Buffer b, uint16 f);
extern void SpGistInitMetapage(Page page);
extern unsigned int SpGistGetTypeSize(SpGistTypeDesc *att, Datum datum);
extern SpGistLeafTuple spgFormLeafTuple(SpGistState *state,
- ItemPointer heapPtr,
- Datum datum, bool isnull);
+ ItemPointer heapPtr,
+ Datum datum, bool isnull);
extern SpGistNodeTuple spgFormNodeTuple(SpGistState *state,
- Datum label, bool isnull);
+ Datum label, bool isnull);
extern SpGistInnerTuple spgFormInnerTuple(SpGistState *state,
- bool hasPrefix, Datum prefix,
- int nNodes, SpGistNodeTuple *nodes);
+ bool hasPrefix, Datum prefix,
+ int nNodes, SpGistNodeTuple *nodes);
extern SpGistDeadTuple spgFormDeadTuple(SpGistState *state, int tupstate,
BlockNumber blkno, OffsetNumber offnum);
extern Datum *spgExtractNodeLabels(SpGistState *state,
- SpGistInnerTuple innerTuple);
+ SpGistInnerTuple innerTuple);
extern OffsetNumber SpGistPageAddNewItem(SpGistState *state, Page page,
Item item, Size size,
OffsetNumber *startOffset,
@@ -645,12 +645,12 @@ extern OffsetNumber SpGistPageAddNewItem(SpGistState *state, Page page,
/* spgdoinsert.c */
extern void spgUpdateNodeLink(SpGistInnerTuple tup, int nodeN,
- BlockNumber blkno, OffsetNumber offset);
+ BlockNumber blkno, OffsetNumber offset);
extern void spgPageIndexMultiDelete(SpGistState *state, Page page,
OffsetNumber *itemnos, int nitems,
int firststate, int reststate,
BlockNumber blkno, OffsetNumber offnum);
extern void spgdoinsert(Relation index, SpGistState *state,
- ItemPointer heapPtr, Datum datum, bool isnull);
+ ItemPointer heapPtr, Datum datum, bool isnull);
#endif /* SPGIST_PRIVATE_H */
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index 50f181307f..b12d2a0068 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -55,7 +55,8 @@ typedef enum
{
SYNCHRONOUS_COMMIT_OFF, /* asynchronous commit */
SYNCHRONOUS_COMMIT_LOCAL_FLUSH, /* wait for local flush only */
- SYNCHRONOUS_COMMIT_REMOTE_WRITE, /* wait for local flush and remote write */
+ SYNCHRONOUS_COMMIT_REMOTE_WRITE, /* wait for local flush and remote
+ * write */
SYNCHRONOUS_COMMIT_REMOTE_FLUSH /* wait for local and remote flush */
} SyncCommitLevel;
diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h
index 2020a3b41f..3328a50fab 100644
--- a/src/include/access/xlog_internal.h
+++ b/src/include/access/xlog_internal.h
@@ -157,8 +157,8 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader;
#define NextLogPage(recptr) \
do { \
if ((recptr).xrecoff % XLOG_BLCKSZ != 0) \
- (recptr).xrecoff += \
- (XLOG_BLCKSZ - (recptr).xrecoff % XLOG_BLCKSZ); \
+ (recptr).xrecoff += \
+ (XLOG_BLCKSZ - (recptr).xrecoff % XLOG_BLCKSZ); \
if ((recptr).xrecoff >= XLogFileSize) \
{ \
((recptr).xlogid)++; \
diff --git a/src/include/catalog/catalog.h b/src/include/catalog/catalog.h
index 97c79841a6..678a945271 100644
--- a/src/include/catalog/catalog.h
+++ b/src/include/catalog/catalog.h
@@ -18,7 +18,7 @@
* 'pgrminclude ignore' needed here because CppAsString2() does not throw
* an error if the symbol is not defined.
*/
-#include "catalog/catversion.h" /* pgrminclude ignore */
+#include "catalog/catversion.h" /* pgrminclude ignore */
#include "catalog/pg_class.h"
#include "storage/relfilenode.h"
#include "utils/relcache.h"
diff --git a/src/include/catalog/genbki.h b/src/include/catalog/genbki.h
index bcf31e6c6b..f973580e5f 100644
--- a/src/include/catalog/genbki.h
+++ b/src/include/catalog/genbki.h
@@ -27,7 +27,7 @@
*
* Variable-length catalog fields (except possibly the first not nullable one)
* should not be visible in C structures, so they are made invisible by #ifdefs
- * of an undefined symbol. See also MARKNOTNULL in bootstrap.c for how this is
+ * of an undefined symbol. See also MARKNOTNULL in bootstrap.c for how this is
* handled.
*/
#undef CATALOG_VARLEN
diff --git a/src/include/catalog/index.h b/src/include/catalog/index.h
index 3f73a6c58c..7c8198f31e 100644
--- a/src/include/catalog/index.h
+++ b/src/include/catalog/index.h
@@ -99,6 +99,6 @@ extern bool reindex_relation(Oid relid, int flags);
extern bool ReindexIsProcessingHeap(Oid heapOid);
extern bool ReindexIsProcessingIndex(Oid indexOid);
-extern Oid IndexGetRelation(Oid indexId, bool missing_ok);
+extern Oid IndexGetRelation(Oid indexId, bool missing_ok);
#endif /* INDEX_H */
diff --git a/src/include/catalog/namespace.h b/src/include/catalog/namespace.h
index fa3ba5bd10..76215dc8a1 100644
--- a/src/include/catalog/namespace.h
+++ b/src/include/catalog/namespace.h
@@ -47,18 +47,18 @@ typedef struct OverrideSearchPath
bool addTemp; /* implicitly prepend temp schema? */
} OverrideSearchPath;
-typedef void (*RangeVarGetRelidCallback)(const RangeVar *relation, Oid relId,
- Oid oldRelId, void *callback_arg);
+typedef void (*RangeVarGetRelidCallback) (const RangeVar *relation, Oid relId,
+ Oid oldRelId, void *callback_arg);
#define RangeVarGetRelid(relation, lockmode, missing_ok) \
RangeVarGetRelidExtended(relation, lockmode, missing_ok, false, NULL, NULL)
-extern Oid RangeVarGetRelidExtended(const RangeVar *relation,
+extern Oid RangeVarGetRelidExtended(const RangeVar *relation,
LOCKMODE lockmode, bool missing_ok, bool nowait,
RangeVarGetRelidCallback callback,
void *callback_arg);
extern Oid RangeVarGetCreationNamespace(const RangeVar *newRelation);
-extern Oid RangeVarGetAndCheckCreationNamespace(RangeVar *newRelation,
+extern Oid RangeVarGetAndCheckCreationNamespace(RangeVar *newRelation,
LOCKMODE lockmode,
Oid *existing_relation_id);
extern void RangeVarAdjustRelationPersistence(RangeVar *newRelation, Oid nspid);
diff --git a/src/include/catalog/objectaccess.h b/src/include/catalog/objectaccess.h
index a5158e2bc9..3b40dbc492 100644
--- a/src/include/catalog/objectaccess.h
+++ b/src/include/catalog/objectaccess.h
@@ -36,10 +36,10 @@ typedef enum ObjectAccessType
typedef struct
{
/*
- * Flags to inform extensions the context of this deletion.
- * Also see PERFORM_DELETION_* in dependency.h
+ * Flags to inform extensions the context of this deletion. Also see
+ * PERFORM_DELETION_* in dependency.h
*/
- int dropflags;
+ int dropflags;
} ObjectAccessDrop;
/*
diff --git a/src/include/catalog/objectaddress.h b/src/include/catalog/objectaddress.h
index 9c03996885..0af09c616d 100644
--- a/src/include/catalog/objectaddress.h
+++ b/src/include/catalog/objectaddress.h
@@ -28,8 +28,8 @@ typedef struct ObjectAddress
} ObjectAddress;
extern ObjectAddress get_object_address(ObjectType objtype, List *objname,
- List *objargs, Relation *relp,
- LOCKMODE lockmode, bool missing_ok);
+ List *objargs, Relation *relp,
+ LOCKMODE lockmode, bool missing_ok);
extern void check_object_ownership(Oid roleid,
ObjectType objtype, ObjectAddress address,
diff --git a/src/include/catalog/pg_aggregate.h b/src/include/catalog/pg_aggregate.h
index 461772c27b..4f44c46f4f 100644
--- a/src/include/catalog/pg_aggregate.h
+++ b/src/include/catalog/pg_aggregate.h
@@ -44,6 +44,7 @@ CATALOG(pg_aggregate,2600) BKI_WITHOUT_OIDS
regproc aggfinalfn;
Oid aggsortop;
Oid aggtranstype;
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text agginitval;
#endif
diff --git a/src/include/catalog/pg_attrdef.h b/src/include/catalog/pg_attrdef.h
index ad770e410f..b92fd1593f 100644
--- a/src/include/catalog/pg_attrdef.h
+++ b/src/include/catalog/pg_attrdef.h
@@ -32,6 +32,7 @@ CATALOG(pg_attrdef,2604)
{
Oid adrelid; /* OID of table containing attribute */
int2 adnum; /* attnum of attribute */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
pg_node_tree adbin; /* nodeToString representation of default */
text adsrc; /* human-readable representation of default */
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index 45e38e4dfc..4ee1d90ce3 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -201,7 +201,7 @@ typedef FormData_pg_attribute *Form_pg_attribute;
#define Anum_pg_attribute_attcollation 18
#define Anum_pg_attribute_attacl 19
#define Anum_pg_attribute_attoptions 20
-#define Anum_pg_attribute_attfdwoptions 21
+#define Anum_pg_attribute_attfdwoptions 21
/* ----------------
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 3a77124b00..b9e4bf41f9 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -92,6 +92,7 @@ CATALOG(pg_constraint,2606)
bool connoinherit;
#ifdef CATALOG_VARLEN /* variable-length fields start here */
+
/*
* Columns of conrelid that the constraint applies to, if known (this is
* NULL for trigger constraints)
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 1031e56512..5cff39608b 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -33,7 +33,7 @@ typedef struct CheckPoint
XLogRecPtr redo; /* next RecPtr available when we began to
* create CheckPoint (i.e. REDO start point) */
TimeLineID ThisTimeLineID; /* current TLI */
- bool fullPageWrites; /* current full_page_writes */
+ bool fullPageWrites; /* current full_page_writes */
uint32 nextXidEpoch; /* higher-order bits of nextXid */
TransactionId nextXid; /* next free XID */
Oid nextOid; /* next free OID */
@@ -140,11 +140,11 @@ typedef struct ControlFileData
* record, to make sure the end-of-backup record corresponds the base
* backup we're recovering from.
*
- * backupEndPoint is the backup end location, if we are recovering from
- * an online backup which was taken from the standby and haven't reached
- * the end of backup yet. It is initialized to the minimum recovery point
- * in pg_control which was backed up last. It is reset to zero when
- * the end of backup is reached, and we mustn't start up before that.
+ * backupEndPoint is the backup end location, if we are recovering from an
+ * online backup which was taken from the standby and haven't reached the
+ * end of backup yet. It is initialized to the minimum recovery point in
+ * pg_control which was backed up last. It is reset to zero when the end
+ * of backup is reached, and we mustn't start up before that.
*
* If backupEndRequired is true, we know for sure that we're restoring
* from a backup, and must see a backup-end record before we can safely
diff --git a/src/include/catalog/pg_database.h b/src/include/catalog/pg_database.h
index e8509f59bb..af803bba89 100644
--- a/src/include/catalog/pg_database.h
+++ b/src/include/catalog/pg_database.h
@@ -42,6 +42,7 @@ CATALOG(pg_database,1262) BKI_SHARED_RELATION BKI_ROWTYPE_OID(1248) BKI_SCHEMA_M
Oid datlastsysoid; /* highest OID to consider a system OID */
TransactionId datfrozenxid; /* all Xids < this are frozen in this DB */
Oid dattablespace; /* default table space for this DB */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem datacl[1]; /* access permissions */
#endif
diff --git a/src/include/catalog/pg_db_role_setting.h b/src/include/catalog/pg_db_role_setting.h
index c6e2f3b4dc..c6a69c5a6d 100644
--- a/src/include/catalog/pg_db_role_setting.h
+++ b/src/include/catalog/pg_db_role_setting.h
@@ -35,6 +35,7 @@ CATALOG(pg_db_role_setting,2964) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
{
Oid setdatabase; /* database */
Oid setrole; /* role */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text setconfig[1]; /* GUC settings to apply at login */
#endif
diff --git a/src/include/catalog/pg_default_acl.h b/src/include/catalog/pg_default_acl.h
index f36ce52519..d7421007af 100644
--- a/src/include/catalog/pg_default_acl.h
+++ b/src/include/catalog/pg_default_acl.h
@@ -32,6 +32,7 @@ CATALOG(pg_default_acl,826)
Oid defaclrole; /* OID of role owning this ACL */
Oid defaclnamespace; /* OID of namespace, or 0 for all */
char defaclobjtype; /* see DEFACLOBJ_xxx constants below */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem defaclacl[1]; /* permissions to add at CREATE time */
#endif
diff --git a/src/include/catalog/pg_description.h b/src/include/catalog/pg_description.h
index 352c517369..a454194893 100644
--- a/src/include/catalog/pg_description.h
+++ b/src/include/catalog/pg_description.h
@@ -50,6 +50,7 @@ CATALOG(pg_description,2609) BKI_WITHOUT_OIDS
Oid objoid; /* OID of object itself */
Oid classoid; /* OID of table containing object */
int4 objsubid; /* column number, or 0 if not used */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text description; /* description of object */
#endif
diff --git a/src/include/catalog/pg_extension.h b/src/include/catalog/pg_extension.h
index b07b43cf7e..4807c6a733 100644
--- a/src/include/catalog/pg_extension.h
+++ b/src/include/catalog/pg_extension.h
@@ -34,7 +34,8 @@ CATALOG(pg_extension,3079)
Oid extowner; /* extension owner */
Oid extnamespace; /* namespace of contained objects */
bool extrelocatable; /* if true, allow ALTER EXTENSION SET SCHEMA */
-#ifdef CATALOG_VARLEN /* variable-length fields start here */
+
+#ifdef CATALOG_VARLEN /* variable-length fields start here */
/* extversion should never be null, but the others can be. */
text extversion; /* extension version name */
Oid extconfig[1]; /* dumpable configuration tables */
diff --git a/src/include/catalog/pg_foreign_data_wrapper.h b/src/include/catalog/pg_foreign_data_wrapper.h
index b6dd8eb7cc..18c538f49c 100644
--- a/src/include/catalog/pg_foreign_data_wrapper.h
+++ b/src/include/catalog/pg_foreign_data_wrapper.h
@@ -34,6 +34,7 @@ CATALOG(pg_foreign_data_wrapper,2328)
Oid fdwowner; /* FDW owner */
Oid fdwhandler; /* handler function, or 0 if none */
Oid fdwvalidator; /* option validation function, or 0 if none */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem fdwacl[1]; /* access permissions */
text fdwoptions[1]; /* FDW options */
diff --git a/src/include/catalog/pg_foreign_server.h b/src/include/catalog/pg_foreign_server.h
index dd1e65e02a..38830af29e 100644
--- a/src/include/catalog/pg_foreign_server.h
+++ b/src/include/catalog/pg_foreign_server.h
@@ -31,6 +31,7 @@ CATALOG(pg_foreign_server,1417)
NameData srvname; /* foreign server name */
Oid srvowner; /* server owner */
Oid srvfdw; /* server FDW */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text srvtype;
text srvversion;
diff --git a/src/include/catalog/pg_foreign_table.h b/src/include/catalog/pg_foreign_table.h
index 9af983eb93..186c49d425 100644
--- a/src/include/catalog/pg_foreign_table.h
+++ b/src/include/catalog/pg_foreign_table.h
@@ -30,6 +30,7 @@ CATALOG(pg_foreign_table,3118) BKI_WITHOUT_OIDS
{
Oid ftrelid; /* OID of foreign table */
Oid ftserver; /* OID of foreign server */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text ftoptions[1]; /* FDW-specific options */
#endif
diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h
index 92ca22c686..9a86121cf9 100644
--- a/src/include/catalog/pg_index.h
+++ b/src/include/catalog/pg_index.h
@@ -44,6 +44,7 @@ CATALOG(pg_index,2610) BKI_WITHOUT_OIDS BKI_SCHEMA_MACRO
/* variable-length fields start here, but we allow direct access to indkey */
int2vector indkey; /* column numbers of indexed cols, or 0 */
+
#ifdef CATALOG_VARLEN
oidvector indcollation; /* collation identifiers */
oidvector indclass; /* opclass identifiers */
diff --git a/src/include/catalog/pg_language.h b/src/include/catalog/pg_language.h
index eb4ae5ab2d..79cfa09d02 100644
--- a/src/include/catalog/pg_language.h
+++ b/src/include/catalog/pg_language.h
@@ -37,6 +37,7 @@ CATALOG(pg_language,2612)
Oid lanplcallfoid; /* Call handler for PL */
Oid laninline; /* Optional anonymous-block handler function */
Oid lanvalidator; /* Optional validation function */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem lanacl[1]; /* Access privileges */
#endif
diff --git a/src/include/catalog/pg_largeobject.h b/src/include/catalog/pg_largeobject.h
index b89d4ec1a9..d442ec4e4a 100644
--- a/src/include/catalog/pg_largeobject.h
+++ b/src/include/catalog/pg_largeobject.h
@@ -32,6 +32,7 @@ CATALOG(pg_largeobject,2613) BKI_WITHOUT_OIDS
{
Oid loid; /* Identifier of large object */
int4 pageno; /* Page number (starting from 0) */
+
/* data has variable length, but we allow direct access; see inv_api.c */
bytea data; /* Data for page (may be zero-length) */
} FormData_pg_largeobject;
diff --git a/src/include/catalog/pg_largeobject_metadata.h b/src/include/catalog/pg_largeobject_metadata.h
index c280176491..768497eb52 100644
--- a/src/include/catalog/pg_largeobject_metadata.h
+++ b/src/include/catalog/pg_largeobject_metadata.h
@@ -31,6 +31,7 @@
CATALOG(pg_largeobject_metadata,2995)
{
Oid lomowner; /* OID of the largeobject owner */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem lomacl[1]; /* access permissions */
#endif
diff --git a/src/include/catalog/pg_namespace.h b/src/include/catalog/pg_namespace.h
index 1daba477b4..e253921278 100644
--- a/src/include/catalog/pg_namespace.h
+++ b/src/include/catalog/pg_namespace.h
@@ -37,6 +37,7 @@ CATALOG(pg_namespace,2615)
{
NameData nspname;
Oid nspowner;
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem nspacl[1];
#endif
diff --git a/src/include/catalog/pg_opclass.h b/src/include/catalog/pg_opclass.h
index 96eaa600b3..638f8088c7 100644
--- a/src/include/catalog/pg_opclass.h
+++ b/src/include/catalog/pg_opclass.h
@@ -134,7 +134,7 @@ DATA(insert ( 405 macaddr_ops PGNSP PGUID 1985 829 t 0 ));
*/
DATA(insert ( 403 name_ops PGNSP PGUID 1986 19 t 2275 ));
DATA(insert ( 405 name_ops PGNSP PGUID 1987 19 t 0 ));
-DATA(insert OID = 3125 ( 403 numeric_ops PGNSP PGUID 1988 1700 t 0 ));
+DATA(insert OID = 3125 ( 403 numeric_ops PGNSP PGUID 1988 1700 t 0 ));
#define NUMERIC_BTREE_OPS_OID 3125
DATA(insert ( 405 numeric_ops PGNSP PGUID 1998 1700 t 0 ));
DATA(insert OID = 1981 ( 403 oid_ops PGNSP PGUID 1989 26 t 0 ));
@@ -148,7 +148,7 @@ DATA(insert OID = 3126 ( 403 text_ops PGNSP PGUID 1994 25 t 0 ));
DATA(insert ( 405 text_ops PGNSP PGUID 1995 25 t 0 ));
DATA(insert ( 403 time_ops PGNSP PGUID 1996 1083 t 0 ));
DATA(insert ( 405 time_ops PGNSP PGUID 1997 1083 t 0 ));
-DATA(insert OID = 3127 ( 403 timestamptz_ops PGNSP PGUID 434 1184 t 0 ));
+DATA(insert OID = 3127 ( 403 timestamptz_ops PGNSP PGUID 434 1184 t 0 ));
#define TIMESTAMPTZ_BTREE_OPS_OID 3127
DATA(insert ( 405 timestamptz_ops PGNSP PGUID 1999 1184 t 0 ));
DATA(insert ( 403 timetz_ops PGNSP PGUID 2000 1266 t 0 ));
diff --git a/src/include/catalog/pg_operator.h b/src/include/catalog/pg_operator.h
index 48ddd16a94..94702541f5 100644
--- a/src/include/catalog/pg_operator.h
+++ b/src/include/catalog/pg_operator.h
@@ -1702,7 +1702,7 @@ DATA(insert OID = 3895 ( "&<" PGNSP PGUID b f f 3831 3831 16 0 0 range_overl
DESCR("overlaps or is left of");
DATA(insert OID = 3896 ( "&>" PGNSP PGUID b f f 3831 3831 16 0 0 range_overright scalargtsel scalargtjoinsel ));
DESCR("overlaps or is right of");
-DATA(insert OID = 3897 ( "-|-" PGNSP PGUID b f f 3831 3831 16 3897 0 range_adjacent contsel contjoinsel ));
+DATA(insert OID = 3897 ( "-|-" PGNSP PGUID b f f 3831 3831 16 3897 0 range_adjacent contsel contjoinsel ));
DESCR("is adjacent to");
DATA(insert OID = 3898 ( "+" PGNSP PGUID b f f 3831 3831 3831 3898 0 range_union - - ));
DESCR("range union");
diff --git a/src/include/catalog/pg_pltemplate.h b/src/include/catalog/pg_pltemplate.h
index 00abd53370..d8927adcbe 100644
--- a/src/include/catalog/pg_pltemplate.h
+++ b/src/include/catalog/pg_pltemplate.h
@@ -33,6 +33,7 @@ CATALOG(pg_pltemplate,1136) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
NameData tmplname; /* name of PL */
bool tmpltrusted; /* PL is trusted? */
bool tmpldbacreate; /* PL is installable by db owner? */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text tmplhandler; /* name of call handler function */
text tmplinline; /* name of anonymous-block handler, or NULL */
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 34b77f0191..1e097ddbe6 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -54,8 +54,12 @@ CATALOG(pg_proc,1255) BKI_BOOTSTRAP BKI_ROWTYPE_OID(81) BKI_SCHEMA_MACRO
int2 pronargdefaults; /* number of arguments with defaults */
Oid prorettype; /* OID of result type */
- /* variable-length fields start here, but we allow direct access to proargtypes */
+ /*
+ * variable-length fields start here, but we allow direct access to
+ * proargtypes
+ */
oidvector proargtypes; /* parameter types (excludes OUT params) */
+
#ifdef CATALOG_VARLEN
Oid proallargtypes[1]; /* all param types (NULL if IN only) */
char proargmodes[1]; /* parameter modes (NULL if IN only) */
@@ -2664,7 +2668,7 @@ DATA(insert OID = 3151 ( pg_stat_get_db_temp_bytes PGNSP PGUID 12 1 0 0 0 f f f
DESCR("statistics: number of bytes in temporary files written");
DATA(insert OID = 2844 ( pg_stat_get_db_blk_read_time PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 701 "26" _null_ _null_ _null_ _null_ pg_stat_get_db_blk_read_time _null_ _null_ _null_ ));
DESCR("statistics: block read time, in msec");
-DATA(insert OID = 2845 ( pg_stat_get_db_blk_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 701 "26" _null_ _null_ _null_ _null_ pg_stat_get_db_blk_write_time _null_ _null_ _null_ ));
+DATA(insert OID = 2845 ( pg_stat_get_db_blk_write_time PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 701 "26" _null_ _null_ _null_ _null_ pg_stat_get_db_blk_write_time _null_ _null_ _null_ ));
DESCR("statistics: block write time, in msec");
DATA(insert OID = 2769 ( pg_stat_get_bgwriter_timed_checkpoints PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 20 "" _null_ _null_ _null_ _null_ pg_stat_get_bgwriter_timed_checkpoints _null_ _null_ _null_ ));
DESCR("statistics: number of timed checkpoints started by the bgwriter");
@@ -2904,7 +2908,7 @@ DATA(insert OID = 2082 ( pg_operator_is_visible PGNSP PGUID 12 1 0 0 0 f f f f
DESCR("is operator visible in search path?");
DATA(insert OID = 2083 ( pg_opclass_is_visible PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "26" _null_ _null_ _null_ _null_ pg_opclass_is_visible _null_ _null_ _null_ ));
DESCR("is opclass visible in search path?");
-DATA(insert OID = 3829 ( pg_opfamily_is_visible PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "26" _null_ _null_ _null_ _null_ pg_opfamily_is_visible _null_ _null_ _null_ ));
+DATA(insert OID = 3829 ( pg_opfamily_is_visible PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "26" _null_ _null_ _null_ _null_ pg_opfamily_is_visible _null_ _null_ _null_ ));
DESCR("is opfamily visible in search path?");
DATA(insert OID = 2093 ( pg_conversion_is_visible PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "26" _null_ _null_ _null_ _null_ pg_conversion_is_visible _null_ _null_ _null_ ));
DESCR("is conversion visible in search path?");
@@ -4067,9 +4071,9 @@ DATA(insert OID = 323 ( json_recv PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0
DESCR("I/O");
DATA(insert OID = 324 ( json_send PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 17 "114" _null_ _null_ _null_ _null_ json_send _null_ _null_ _null_ ));
DESCR("I/O");
-DATA(insert OID = 3153 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2277" _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
+DATA(insert OID = 3153 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2277" _null_ _null_ _null_ _null_ array_to_json _null_ _null_ _null_ ));
DESCR("map array to json");
-DATA(insert OID = 3154 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
+DATA(insert OID = 3154 ( array_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 2 0 114 "2277 16" _null_ _null_ _null_ _null_ array_to_json_pretty _null_ _null_ _null_ ));
DESCR("map array to json with optional pretty printing");
DATA(insert OID = 3155 ( row_to_json PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 114 "2249" _null_ _null_ _null_ _null_ row_to_json _null_ _null_ _null_ ));
DESCR("map row to json");
@@ -4466,13 +4470,13 @@ DATA(insert OID = 3849 ( upper PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2283 "
DESCR("upper bound of range");
DATA(insert OID = 3850 ( isempty PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_empty _null_ _null_ _null_ ));
DESCR("is the range empty?");
-DATA(insert OID = 3851 ( lower_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_lower_inc _null_ _null_ _null_ ));
+DATA(insert OID = 3851 ( lower_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_lower_inc _null_ _null_ _null_ ));
DESCR("is the range's lower bound inclusive?");
-DATA(insert OID = 3852 ( upper_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_upper_inc _null_ _null_ _null_ ));
+DATA(insert OID = 3852 ( upper_inc PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_upper_inc _null_ _null_ _null_ ));
DESCR("is the range's upper bound inclusive?");
-DATA(insert OID = 3853 ( lower_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_lower_inf _null_ _null_ _null_ ));
+DATA(insert OID = 3853 ( lower_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_lower_inf _null_ _null_ _null_ ));
DESCR("is the range's lower bound infinite?");
-DATA(insert OID = 3854 ( upper_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_upper_inf _null_ _null_ _null_ ));
+DATA(insert OID = 3854 ( upper_inf PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 16 "3831" _null_ _null_ _null_ _null_ range_upper_inf _null_ _null_ _null_ ));
DESCR("is the range's upper bound infinite?");
DATA(insert OID = 3855 ( range_eq PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ range_eq _null_ _null_ _null_ ));
DESCR("implementation of = operator");
@@ -4504,19 +4508,19 @@ DATA(insert OID = 3868 ( range_intersect PGNSP PGUID 12 1 0 0 0 f f f f t f i 2
DESCR("implementation of * operator");
DATA(insert OID = 3869 ( range_minus PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 3831 "3831 3831" _null_ _null_ _null_ _null_ range_minus _null_ _null_ _null_ ));
DESCR("implementation of - operator");
-DATA(insert OID = 3870 ( range_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23 "3831 3831" _null_ _null_ _null_ _null_ range_cmp _null_ _null_ _null_ ));
+DATA(insert OID = 3870 ( range_cmp PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 23 "3831 3831" _null_ _null_ _null_ _null_ range_cmp _null_ _null_ _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 3871 ( range_lt PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ range_lt _null_ _null_ _null_ ));
DATA(insert OID = 3872 ( range_le PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ range_le _null_ _null_ _null_ ));
DATA(insert OID = 3873 ( range_ge PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ range_ge _null_ _null_ _null_ ));
DATA(insert OID = 3874 ( range_gt PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "3831 3831" _null_ _null_ _null_ _null_ range_gt _null_ _null_ _null_ ));
-DATA(insert OID = 3875 ( range_gist_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 16 "2281 3831 23 26 2281" _null_ _null_ _null_ _null_ range_gist_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 3875 ( range_gist_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 5 0 16 "2281 3831 23 26 2281" _null_ _null_ _null_ _null_ range_gist_consistent _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 3876 ( range_gist_union PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ range_gist_union _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 3877 ( range_gist_compress PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2281 "2281" _null_ _null_ _null_ _null_ range_gist_compress _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 3878 ( range_gist_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2281 "2281" _null_ _null_ _null_ _null_ range_gist_decompress _null_ _null_ _null_ ));
+DATA(insert OID = 3878 ( range_gist_decompress PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2281 "2281" _null_ _null_ _null_ _null_ range_gist_decompress _null_ _null_ _null_ ));
DESCR("GiST support");
DATA(insert OID = 3879 ( range_gist_penalty PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 2281 "2281 2281 2281" _null_ _null_ _null_ _null_ range_gist_penalty _null_ _null_ _null_ ));
DESCR("GiST support");
@@ -4524,7 +4528,7 @@ DATA(insert OID = 3880 ( range_gist_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t
DESCR("GiST support");
DATA(insert OID = 3881 ( range_gist_same PGNSP PGUID 12 1 0 0 0 f f f f t f i 3 0 2281 "3831 3831 2281" _null_ _null_ _null_ _null_ range_gist_same _null_ _null_ _null_ ));
DESCR("GiST support");
-DATA(insert OID = 3902 ( hash_range PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "3831" _null_ _null_ _null_ _null_ hash_range _null_ _null_ _null_ ));
+DATA(insert OID = 3902 ( hash_range PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 23 "3831" _null_ _null_ _null_ _null_ hash_range _null_ _null_ _null_ ));
DESCR("hash a range");
DATA(insert OID = 3916 ( range_typanalyze PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "2281" _null_ _null_ _null_ _null_ range_typanalyze _null_ _null_ _null_ ));
DESCR("range typanalyze");
@@ -4548,9 +4552,9 @@ DESCR("float8 difference of two timestamp values");
DATA(insert OID = 3930 ( tstzrange_subdiff PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 701 "1184 1184" _null_ _null_ _null_ _null_ tstzrange_subdiff _null_ _null_ _null_ ));
DESCR("float8 difference of two timestamp with time zone values");
-DATA(insert OID = 3840 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3904 "23 23" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
+DATA(insert OID = 3840 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3904 "23 23" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
DESCR("int4range constructor");
-DATA(insert OID = 3841 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3904 "23 23 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
+DATA(insert OID = 3841 ( int4range PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3904 "23 23 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
DESCR("int4range constructor");
DATA(insert OID = 3844 ( numrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3906 "1700 1700" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
DESCR("numrange constructor");
@@ -4560,17 +4564,17 @@ DATA(insert OID = 3933 ( tsrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3908
DESCR("tsrange constructor");
DATA(insert OID = 3934 ( tsrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3908 "1114 1114 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
DESCR("tsrange constructor");
-DATA(insert OID = 3937 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3910 "1184 1184" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
+DATA(insert OID = 3937 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3910 "1184 1184" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
DESCR("tstzrange constructor");
-DATA(insert OID = 3938 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3910 "1184 1184 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
+DATA(insert OID = 3938 ( tstzrange PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3910 "1184 1184 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
DESCR("tstzrange constructor");
-DATA(insert OID = 3941 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3912 "1082 1082" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
+DATA(insert OID = 3941 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3912 "1082 1082" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
DESCR("daterange constructor");
-DATA(insert OID = 3942 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3912 "1082 1082 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
+DATA(insert OID = 3942 ( daterange PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3912 "1082 1082 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
DESCR("daterange constructor");
-DATA(insert OID = 3945 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3926 "20 20" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
+DATA(insert OID = 3945 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i 2 0 3926 "20 20" _null_ _null_ _null_ _null_ range_constructor2 _null_ _null_ _null_ ));
DESCR("int8range constructor");
-DATA(insert OID = 3946 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3926 "20 20 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
+DATA(insert OID = 3946 ( int8range PGNSP PGUID 12 1 0 0 0 f f f f f f i 3 0 3926 "20 20 25" _null_ _null_ _null_ _null_ range_constructor3 _null_ _null_ _null_ ));
DESCR("int8range constructor");
/* spgist support functions */
@@ -4596,7 +4600,7 @@ DATA(insert OID = 4010 ( spgbuildempty PGNSP PGUID 12 1 0 0 0 f f f f t f v
DESCR("spgist(internal)");
DATA(insert OID = 4011 ( spgbulkdelete PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 2281 "2281 2281 2281 2281" _null_ _null_ _null_ _null_ spgbulkdelete _null_ _null_ _null_ ));
DESCR("spgist(internal)");
-DATA(insert OID = 4012 ( spgvacuumcleanup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ spgvacuumcleanup _null_ _null_ _null_ ));
+DATA(insert OID = 4012 ( spgvacuumcleanup PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2281 "2281 2281" _null_ _null_ _null_ _null_ spgvacuumcleanup _null_ _null_ _null_ ));
DESCR("spgist(internal)");
DATA(insert OID = 4032 ( spgcanreturn PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "2281" _null_ _null_ _null_ _null_ spgcanreturn _null_ _null_ _null_ ));
DESCR("spgist(internal)");
@@ -4612,14 +4616,14 @@ DATA(insert OID = 4019 ( spg_quad_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i 2
DESCR("SP-GiST support for quad tree over point");
DATA(insert OID = 4020 ( spg_quad_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_quad_picksplit _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over point");
-DATA(insert OID = 4021 ( spg_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_quad_inner_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 4021 ( spg_quad_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_quad_inner_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree over point");
DATA(insert OID = 4022 ( spg_quad_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ spg_quad_leaf_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for quad tree and k-d tree over point");
-DATA(insert OID = 4023 ( spg_kd_config PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_kd_config _null_ _null_ _null_ ));
+DATA(insert OID = 4023 ( spg_kd_config PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_kd_config _null_ _null_ _null_ ));
DESCR("SP-GiST support for k-d tree over point");
-DATA(insert OID = 4024 ( spg_kd_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_kd_choose _null_ _null_ _null_ ));
+DATA(insert OID = 4024 ( spg_kd_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_kd_choose _null_ _null_ _null_ ));
DESCR("SP-GiST support for k-d tree over point");
DATA(insert OID = 4025 ( spg_kd_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_kd_picksplit _null_ _null_ _null_ ));
DESCR("SP-GiST support for k-d tree over point");
@@ -4632,7 +4636,7 @@ DATA(insert OID = 4028 ( spg_text_choose PGNSP PGUID 12 1 0 0 0 f f f f t f i 2
DESCR("SP-GiST support for suffix tree over text");
DATA(insert OID = 4029 ( spg_text_picksplit PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_text_picksplit _null_ _null_ _null_ ));
DESCR("SP-GiST support for suffix tree over text");
-DATA(insert OID = 4030 ( spg_text_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_text_inner_consistent _null_ _null_ _null_ ));
+DATA(insert OID = 4030 ( spg_text_inner_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 2278 "2281 2281" _null_ _null_ _null_ _null_ spg_text_inner_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for suffix tree over text");
DATA(insert OID = 4031 ( spg_text_leaf_consistent PGNSP PGUID 12 1 0 0 0 f f f f t f i 2 0 16 "2281 2281" _null_ _null_ _null_ _null_ spg_text_leaf_consistent _null_ _null_ _null_ ));
DESCR("SP-GiST support for suffix tree over text");
@@ -4662,4 +4666,3 @@ DESCR("SP-GiST support for suffix tree over text");
#define PROARGMODE_TABLE 't'
#endif /* PG_PROC_H */
-
diff --git a/src/include/catalog/pg_range.h b/src/include/catalog/pg_range.h
index d66e83a017..9b2be92fc9 100644
--- a/src/include/catalog/pg_range.h
+++ b/src/include/catalog/pg_range.h
@@ -29,7 +29,7 @@
* typedef struct FormData_pg_range
* ----------------
*/
-#define RangeRelationId 3541
+#define RangeRelationId 3541
CATALOG(pg_range,3541) BKI_WITHOUT_OIDS
{
@@ -65,12 +65,12 @@ typedef FormData_pg_range *Form_pg_range;
* initial contents of pg_range
* ----------------
*/
-DATA(insert ( 3904 23 0 1978 int4range_canonical int4range_subdiff));
+DATA(insert ( 3904 23 0 1978 int4range_canonical int4range_subdiff));
DATA(insert ( 3906 1700 0 3125 - numrange_subdiff));
DATA(insert ( 3908 1114 0 3128 - tsrange_subdiff));
DATA(insert ( 3910 1184 0 3127 - tstzrange_subdiff));
DATA(insert ( 3912 1082 0 3122 daterange_canonical daterange_subdiff));
-DATA(insert ( 3926 20 0 3124 int8range_canonical int8range_subdiff));
+DATA(insert ( 3926 20 0 3124 int8range_canonical int8range_subdiff));
/*
diff --git a/src/include/catalog/pg_rewrite.h b/src/include/catalog/pg_rewrite.h
index e04ba81b3c..5171522da0 100644
--- a/src/include/catalog/pg_rewrite.h
+++ b/src/include/catalog/pg_rewrite.h
@@ -39,6 +39,7 @@ CATALOG(pg_rewrite,2618)
char ev_type;
char ev_enabled;
bool is_instead;
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
pg_node_tree ev_qual;
pg_node_tree ev_action;
diff --git a/src/include/catalog/pg_seclabel.h b/src/include/catalog/pg_seclabel.h
index 101ec3c111..917efcf8ca 100644
--- a/src/include/catalog/pg_seclabel.h
+++ b/src/include/catalog/pg_seclabel.h
@@ -25,6 +25,7 @@ CATALOG(pg_seclabel,3596) BKI_WITHOUT_OIDS
Oid objoid; /* OID of the object itself */
Oid classoid; /* OID of table containing the object */
int4 objsubid; /* column number, or 0 if not used */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text provider; /* name of label provider */
text label; /* security label of the object */
diff --git a/src/include/catalog/pg_shdescription.h b/src/include/catalog/pg_shdescription.h
index 377f4337d9..acd529b875 100644
--- a/src/include/catalog/pg_shdescription.h
+++ b/src/include/catalog/pg_shdescription.h
@@ -42,6 +42,7 @@ CATALOG(pg_shdescription,2396) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
{
Oid objoid; /* OID of object itself */
Oid classoid; /* OID of table containing object */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text description; /* description of object */
#endif
diff --git a/src/include/catalog/pg_shseclabel.h b/src/include/catalog/pg_shseclabel.h
index d7c49e78bb..3d7a013e8c 100644
--- a/src/include/catalog/pg_shseclabel.h
+++ b/src/include/catalog/pg_shseclabel.h
@@ -1,7 +1,7 @@
/* -------------------------------------------------------------------------
*
* pg_shseclabel.h
- * definition of the system "security label" relation (pg_shseclabel)
+ * definition of the system "security label" relation (pg_shseclabel)
*
* Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -22,11 +22,12 @@
CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
{
- Oid objoid; /* OID of the shared object itself */
- Oid classoid; /* OID of table containing the shared object */
-#ifdef CATALOG_VARLEN /* variable-length fields start here */
- text provider; /* name of label provider */
- text label; /* security label of the object */
+ Oid objoid; /* OID of the shared object itself */
+ Oid classoid; /* OID of table containing the shared object */
+
+#ifdef CATALOG_VARLEN /* variable-length fields start here */
+ text provider; /* name of label provider */
+ text label; /* security label of the object */
#endif
} FormData_pg_shseclabel;
@@ -40,4 +41,4 @@ CATALOG(pg_shseclabel,3592) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
#define Anum_pg_shseclabel_provider 3
#define Anum_pg_shseclabel_label 4
-#endif /* PG_SHSECLABEL_H */
+#endif /* PG_SHSECLABEL_H */
diff --git a/src/include/catalog/pg_statistic.h b/src/include/catalog/pg_statistic.h
index 383cc01415..3ad0c28110 100644
--- a/src/include/catalog/pg_statistic.h
+++ b/src/include/catalog/pg_statistic.h
@@ -105,7 +105,7 @@ CATALOG(pg_statistic,2619) BKI_WITHOUT_OIDS
/*
* Values in these arrays are values of the column's data type, or of some
- * related type such as an array element type. We presently have to cheat
+ * related type such as an array element type. We presently have to cheat
* quite a bit to allow polymorphic arrays of this kind, but perhaps
* someday it'll be a less bogus facility.
*/
@@ -258,7 +258,7 @@ typedef FormData_pg_statistic *Form_pg_statistic;
/*
* A "distinct elements count histogram" slot describes the distribution of
* the number of distinct element values present in each row of an array-type
- * column. Only non-null rows are considered, and only non-null elements.
+ * column. Only non-null rows are considered, and only non-null elements.
* staop contains the equality operator appropriate to the element type.
* stavalues is not used and should be NULL. The last member of stanumbers is
* the average count of distinct element values over all non-null rows. The
@@ -266,6 +266,6 @@ typedef FormData_pg_statistic *Form_pg_statistic;
* distinct-elements counts into M-1 bins of approximately equal population.
* The first of these is the minimum observed count, and the last the maximum.
*/
-#define STATISTIC_KIND_DECHIST 5
+#define STATISTIC_KIND_DECHIST 5
#endif /* PG_STATISTIC_H */
diff --git a/src/include/catalog/pg_tablespace.h b/src/include/catalog/pg_tablespace.h
index 0650a5fbae..777a8a1778 100644
--- a/src/include/catalog/pg_tablespace.h
+++ b/src/include/catalog/pg_tablespace.h
@@ -32,6 +32,7 @@ CATALOG(pg_tablespace,1213) BKI_SHARED_RELATION
{
NameData spcname; /* tablespace name */
Oid spcowner; /* owner of tablespace */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
aclitem spcacl[1]; /* access permissions */
text spcoptions[1]; /* per-tablespace options */
diff --git a/src/include/catalog/pg_trigger.h b/src/include/catalog/pg_trigger.h
index 0ee5b8af8d..71afab58e5 100644
--- a/src/include/catalog/pg_trigger.h
+++ b/src/include/catalog/pg_trigger.h
@@ -50,9 +50,12 @@ CATALOG(pg_trigger,2620)
bool tginitdeferred; /* constraint trigger is deferred initially */
int2 tgnargs; /* # of extra arguments in tgargs */
- /* Variable-length fields start here, but we allow direct access to tgattr.
- * Note: tgattr and tgargs must not be null. */
+ /*
+ * Variable-length fields start here, but we allow direct access to
+ * tgattr. Note: tgattr and tgargs must not be null.
+ */
int2vector tgattr; /* column numbers, if trigger is on columns */
+
#ifdef CATALOG_VARLEN
bytea tgargs; /* first\000second\000tgnargs\000 */
pg_node_tree tgqual; /* WHEN expression, or NULL if none */
diff --git a/src/include/catalog/pg_ts_dict.h b/src/include/catalog/pg_ts_dict.h
index 31fcdd8896..677a870b77 100644
--- a/src/include/catalog/pg_ts_dict.h
+++ b/src/include/catalog/pg_ts_dict.h
@@ -36,6 +36,7 @@ CATALOG(pg_ts_dict,3600)
Oid dictnamespace; /* name space */
Oid dictowner; /* owner */
Oid dicttemplate; /* dictionary's template */
+
#ifdef CATALOG_VARLEN /* variable-length fields start here */
text dictinitoption; /* options passed to dict_init() */
#endif
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 507819da80..25c664b7c9 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -61,9 +61,8 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
/*
* typtype is 'b' for a base type, 'c' for a composite type (e.g., a
- * table's rowtype), 'd' for a domain, 'e' for an enum type,
- * 'p' for a pseudo-type, or 'r' for a range type.
- * (Use the TYPTYPE macros below.)
+ * table's rowtype), 'd' for a domain, 'e' for an enum type, 'p' for a
+ * pseudo-type, or 'r' for a range type. (Use the TYPTYPE macros below.)
*
* If typtype is 'c', typrelid is the OID of the class' entry in pg_class.
*/
@@ -201,6 +200,7 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
Oid typcollation;
#ifdef CATALOG_VARLEN /* variable-length fields start here */
+
/*
* If typdefaultbin is not NULL, it is the nodeToString representation of
* a default expression for the type. Currently this is only used for
diff --git a/src/include/commands/createas.h b/src/include/commands/createas.h
index ed65ccd8ee..946c7e2cf1 100644
--- a/src/include/commands/createas.h
+++ b/src/include/commands/createas.h
@@ -20,7 +20,7 @@
extern void ExecCreateTableAs(CreateTableAsStmt *stmt, const char *queryString,
- ParamListInfo params, char *completionTag);
+ ParamListInfo params, char *completionTag);
extern int GetIntoRelEFlags(IntoClause *intoClause);
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index 163b2ea002..8f3d2c358d 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -149,7 +149,7 @@ extern List *deserialize_deflist(Datum txt);
extern void RenameForeignServer(const char *oldname, const char *newname);
extern void RenameForeignDataWrapper(const char *oldname, const char *newname);
extern void AlterForeignServerOwner(const char *name, Oid newOwnerId);
-extern void AlterForeignServerOwner_oid(Oid , Oid newOwnerId);
+extern void AlterForeignServerOwner_oid(Oid, Oid newOwnerId);
extern void AlterForeignDataWrapperOwner(const char *name, Oid newOwnerId);
extern void AlterForeignDataWrapperOwner_oid(Oid fwdId, Oid newOwnerId);
extern void CreateForeignDataWrapper(CreateFdwStmt *stmt);
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index e4e98bfb04..cd1d56d6e8 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -42,7 +42,7 @@ typedef struct ExplainState
/* Hook for plugins to get control in ExplainOneQuery() */
typedef void (*ExplainOneQuery_hook_type) (Query *query,
- IntoClause *into,
+ IntoClause *into,
ExplainState *es,
const char *queryString,
ParamListInfo params);
diff --git a/src/include/commands/tablecmds.h b/src/include/commands/tablecmds.h
index 47b0cddc9b..9ceb086f68 100644
--- a/src/include/commands/tablecmds.h
+++ b/src/include/commands/tablecmds.h
@@ -75,6 +75,6 @@ extern void AtEOSubXact_on_commit_actions(bool isCommit,
SubTransactionId parentSubid);
extern void RangeVarCallbackOwnsTable(const RangeVar *relation,
- Oid relId, Oid oldRelId, void *arg);
+ Oid relId, Oid oldRelId, void *arg);
#endif /* TABLECMDS_H */
diff --git a/src/include/commands/typecmds.h b/src/include/commands/typecmds.h
index bb4a7c32bc..b72cfc4fd9 100644
--- a/src/include/commands/typecmds.h
+++ b/src/include/commands/typecmds.h
@@ -34,7 +34,7 @@ extern void AlterDomainNotNull(List *names, bool notNull);
extern void AlterDomainAddConstraint(List *names, Node *constr);
extern void AlterDomainValidateConstraint(List *names, char *constrName);
extern void AlterDomainDropConstraint(List *names, const char *constrName,
- DropBehavior behavior, bool missing_ok);
+ DropBehavior behavior, bool missing_ok);
extern void checkDomainOwner(HeapTuple tup);
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 3c95dec3bc..7a50d2fcb3 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -62,9 +62,9 @@ typedef Datum (*AnalyzeAttrFetchFunc) (VacAttrStatsP stats, int rownum,
bool *isNull);
typedef void (*AnalyzeAttrComputeStatsFunc) (VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
typedef struct VacAttrStats
{
diff --git a/src/include/datatype/timestamp.h b/src/include/datatype/timestamp.h
index 0583d45fad..706b4480b3 100644
--- a/src/include/datatype/timestamp.h
+++ b/src/include/datatype/timestamp.h
@@ -109,10 +109,10 @@ typedef struct
* We allow numeric timezone offsets up to 15:59:59 either way from Greenwich.
* Currently, the record holders for wackiest offsets in actual use are zones
* Asia/Manila, at -15:56:00 until 1844, and America/Metlakatla, at +15:13:42
- * until 1867. If we were to reject such values we would fail to dump and
+ * until 1867. If we were to reject such values we would fail to dump and
* restore old timestamptz values with these zone settings.
*/
-#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
+#define MAX_TZDISP_HOUR 15 /* maximum allowed hour part */
#define TZDISP_LIMIT ((MAX_TZDISP_HOUR + 1) * SECS_PER_HOUR)
/*
@@ -121,7 +121,7 @@ typedef struct
#ifdef HAVE_INT64_TIMESTAMP
#define DT_NOBEGIN (-INT64CONST(0x7fffffffffffffff) - 1)
#define DT_NOEND (INT64CONST(0x7fffffffffffffff))
-#else /* !HAVE_INT64_TIMESTAMP */
+#else /* !HAVE_INT64_TIMESTAMP */
#ifdef HUGE_VAL
#define DT_NOBEGIN (-HUGE_VAL)
#define DT_NOEND (HUGE_VAL)
@@ -164,7 +164,7 @@ typedef struct
|| ((m) == JULIAN_MINMONTH && (d) >= JULIAN_MINDAY)))) \
&& (y) < JULIAN_MAXYEAR)
-#define JULIAN_MAX (2147483494) /* == date2j(JULIAN_MAXYEAR, 1, 1) */
+#define JULIAN_MAX (2147483494) /* == date2j(JULIAN_MAXYEAR, 1, 1) */
/* Julian-date equivalents of Day 0 in Unix and Postgres reckoning */
#define UNIX_EPOCH_JDATE 2440588 /* == date2j(1970, 1, 1) */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index f5503a5663..075bbe8b57 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -51,7 +51,7 @@
* is responsible for there being a trigger context for them to be queued in.
*
* WITH/WITHOUT_OIDS tell the executor to emit tuples with or without space
- * for OIDs, respectively. These are currently used only for CREATE TABLE AS.
+ * for OIDs, respectively. These are currently used only for CREATE TABLE AS.
* If neither is set, the plan may or may not produce tuples including OIDs.
*/
#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */
diff --git a/src/include/executor/instrument.h b/src/include/executor/instrument.h
index fe64369527..e6dd03c2d7 100644
--- a/src/include/executor/instrument.h
+++ b/src/include/executor/instrument.h
@@ -18,18 +18,18 @@
typedef struct BufferUsage
{
- long shared_blks_hit; /* # of shared buffer hits */
+ long shared_blks_hit; /* # of shared buffer hits */
long shared_blks_read; /* # of shared disk blocks read */
long shared_blks_dirtied; /* # of shared blocks dirtied */
long shared_blks_written; /* # of shared disk blocks written */
- long local_blks_hit; /* # of local buffer hits */
- long local_blks_read; /* # of local disk blocks read */
+ long local_blks_hit; /* # of local buffer hits */
+ long local_blks_read; /* # of local disk blocks read */
long local_blks_dirtied; /* # of shared blocks dirtied */
long local_blks_written; /* # of local disk blocks written */
- long temp_blks_read; /* # of temp blocks read */
+ long temp_blks_read; /* # of temp blocks read */
long temp_blks_written; /* # of temp blocks written */
- instr_time blk_read_time; /* time spent reading */
- instr_time blk_write_time; /* time spent writing */
+ instr_time blk_read_time; /* time spent reading */
+ instr_time blk_write_time; /* time spent writing */
} BufferUsage;
/* Flag bits included in InstrAlloc's instrument_options bitmask */
@@ -44,7 +44,7 @@ typedef enum InstrumentOption
typedef struct Instrumentation
{
/* Parameters set at node creation: */
- bool need_timer; /* TRUE if we need timer data */
+ bool need_timer; /* TRUE if we need timer data */
bool need_bufusage; /* TRUE if we need buffer usage data */
/* Info about current plan cycle: */
bool running; /* TRUE if we've completed first tuple */
diff --git a/src/include/executor/spi_priv.h b/src/include/executor/spi_priv.h
index 900cebb000..4fbb548af4 100644
--- a/src/include/executor/spi_priv.h
+++ b/src/include/executor/spi_priv.h
@@ -48,7 +48,7 @@ typedef struct
* adequate locks to prevent other backends from messing with the tables.
*
* For a saved plan, the plancxt is made a child of CacheMemoryContext
- * since it should persist until explicitly destroyed. Likewise, the
+ * since it should persist until explicitly destroyed. Likewise, the
* plancache entries will be under CacheMemoryContext since we tell
* plancache.c to save them. We rely on plancache.c to keep the cache
* entries up-to-date as needed in the face of invalidation events.
diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h
index 0a09c94932..721cd25436 100644
--- a/src/include/foreign/fdwapi.h
+++ b/src/include/foreign/fdwapi.h
@@ -24,19 +24,19 @@ struct ExplainState;
*/
typedef void (*GetForeignRelSize_function) (PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
typedef void (*GetForeignPaths_function) (PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid);
+ RelOptInfo *baserel,
+ Oid foreigntableid);
typedef ForeignScan *(*GetForeignPlan_function) (PlannerInfo *root,
- RelOptInfo *baserel,
- Oid foreigntableid,
- ForeignPath *best_path,
- List *tlist,
- List *scan_clauses);
+ RelOptInfo *baserel,
+ Oid foreigntableid,
+ ForeignPath *best_path,
+ List *tlist,
+ List *scan_clauses);
typedef void (*ExplainForeignScan_function) (ForeignScanState *node,
struct ExplainState *es);
@@ -51,13 +51,13 @@ typedef void (*ReScanForeignScan_function) (ForeignScanState *node);
typedef void (*EndForeignScan_function) (ForeignScanState *node);
typedef int (*AcquireSampleRowsFunc) (Relation relation, int elevel,
- HeapTuple *rows, int targrows,
- double *totalrows,
- double *totaldeadrows);
+ HeapTuple *rows, int targrows,
+ double *totalrows,
+ double *totaldeadrows);
typedef bool (*AnalyzeForeignTable_function) (Relation relation,
- AcquireSampleRowsFunc *func,
- BlockNumber *totalpages);
+ AcquireSampleRowsFunc *func,
+ BlockNumber *totalpages);
/*
* FdwRoutine is the struct returned by a foreign-data wrapper's handler
@@ -86,8 +86,8 @@ typedef struct FdwRoutine
EndForeignScan_function EndForeignScan;
/*
- * These functions are optional. Set the pointer to NULL for any
- * that are not provided.
+ * These functions are optional. Set the pointer to NULL for any that are
+ * not provided.
*/
AnalyzeForeignTable_function AnalyzeForeignTable;
} FdwRoutine;
diff --git a/src/include/lib/stringinfo.h b/src/include/lib/stringinfo.h
index 8e3a7afb47..dbf9277abe 100644
--- a/src/include/lib/stringinfo.h
+++ b/src/include/lib/stringinfo.h
@@ -105,7 +105,8 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
* without modifying str. Typically the caller would enlarge str and retry
* on false return --- see appendStringInfo for standard usage pattern.
*/
-extern bool appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
+extern bool
+appendStringInfoVA(StringInfo str, const char *fmt, va_list args)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)));
/*------------------------
diff --git a/src/include/libpq/hba.h b/src/include/libpq/hba.h
index c5a77c2a5a..f3b8be6a0c 100644
--- a/src/include/libpq/hba.h
+++ b/src/include/libpq/hba.h
@@ -11,7 +11,7 @@
#ifndef HBA_H
#define HBA_H
-#include "libpq/pqcomm.h" /* pgrminclude ignore */ /* needed for NetBSD */
+#include "libpq/pqcomm.h" /* pgrminclude ignore */ /* needed for NetBSD */
#include "nodes/pg_list.h"
diff --git a/src/include/libpq/ip.h b/src/include/libpq/ip.h
index a81234df45..0ea57461fc 100644
--- a/src/include/libpq/ip.h
+++ b/src/include/libpq/ip.h
@@ -15,8 +15,8 @@
#ifndef IP_H
#define IP_H
-#include "getaddrinfo.h" /* pgrminclude ignore */
-#include "libpq/pqcomm.h" /* pgrminclude ignore */
+#include "getaddrinfo.h" /* pgrminclude ignore */
+#include "libpq/pqcomm.h" /* pgrminclude ignore */
#ifdef HAVE_UNIX_SOCKETS
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index b48a03b4b2..6fe8c2303a 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -1100,7 +1100,7 @@ typedef struct MergeAppendState
PlanState **mergeplans; /* array of PlanStates for my inputs */
int ms_nplans;
int ms_nkeys;
- SortSupport ms_sortkeys; /* array of length ms_nkeys */
+ SortSupport ms_sortkeys; /* array of length ms_nkeys */
TupleTableSlot **ms_slots; /* array of length ms_nplans */
int *ms_heap; /* array of length ms_nplans */
int ms_heap_size; /* current active length of ms_heap[] */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 13b95e11aa..deff1a374c 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -706,7 +706,7 @@ typedef struct RangeTblEntry
* Fields valid for a subquery RTE (else NULL):
*/
Query *subquery; /* the sub-query */
- bool security_barrier; /* subquery from security_barrier view */
+ bool security_barrier; /* subquery from security_barrier view */
/*
* Fields valid for a join RTE (else NULL/zero):
@@ -1171,7 +1171,7 @@ typedef struct AlterTableStmt
RangeVar *relation; /* table to work on */
List *cmds; /* list of subcommands */
ObjectType relkind; /* type of object */
- bool missing_ok; /* skip error if table missing */
+ bool missing_ok; /* skip error if table missing */
} AlterTableStmt;
typedef enum AlterTableType
@@ -1193,14 +1193,14 @@ typedef enum AlterTableType
AT_AddConstraint, /* add constraint */
AT_AddConstraintRecurse, /* internal to commands/tablecmds.c */
AT_ValidateConstraint, /* validate constraint */
- AT_ValidateConstraintRecurse, /* internal to commands/tablecmds.c */
+ AT_ValidateConstraintRecurse, /* internal to commands/tablecmds.c */
AT_ProcessedConstraint, /* pre-processed add constraint (local in
* parser/parse_utilcmd.c) */
AT_AddIndexConstraint, /* add constraint using existing index */
AT_DropConstraint, /* drop constraint */
AT_DropConstraintRecurse, /* internal to commands/tablecmds.c */
AT_AlterColumnType, /* alter column type */
- AT_AlterColumnGenericOptions, /* alter column OPTIONS (...) */
+ AT_AlterColumnGenericOptions, /* alter column OPTIONS (...) */
AT_ChangeOwner, /* change owner */
AT_ClusterOn, /* CLUSTER ON */
AT_DropCluster, /* SET WITHOUT CLUSTER */
@@ -1477,7 +1477,7 @@ typedef struct CreateStmt
*
* If skip_validation is true then we skip checking that the existing rows
* in the table satisfy the constraint, and just install the catalog entries
- * for the constraint. A new FK constraint is marked as valid iff
+ * for the constraint. A new FK constraint is marked as valid iff
* initially_valid is true. (Usually skip_validation and initially_valid
* are inverses, but we can set both true if the table is known empty.)
*
@@ -1967,7 +1967,7 @@ typedef struct SecLabelStmt
#define CURSOR_OPT_HOLD 0x0010 /* WITH HOLD */
/* these planner-control flags do not correspond to any SQL grammar: */
#define CURSOR_OPT_FAST_PLAN 0x0020 /* prefer fast-start plan */
-#define CURSOR_OPT_GENERIC_PLAN 0x0040 /* force use of generic plan */
+#define CURSOR_OPT_GENERIC_PLAN 0x0040 /* force use of generic plan */
#define CURSOR_OPT_CUSTOM_PLAN 0x0080 /* force use of custom plan */
typedef struct DeclareCursorStmt
@@ -2122,7 +2122,7 @@ typedef struct RenameStmt
* trigger, etc) */
char *newname; /* the new name */
DropBehavior behavior; /* RESTRICT or CASCADE behavior */
- bool missing_ok; /* skip error if missing? */
+ bool missing_ok; /* skip error if missing? */
} RenameStmt;
/* ----------------------
@@ -2138,7 +2138,7 @@ typedef struct AlterObjectSchemaStmt
List *objarg; /* argument types, if applicable */
char *addname; /* additional name if needed */
char *newschema; /* the new schema */
- bool missing_ok; /* skip error if missing? */
+ bool missing_ok; /* skip error if missing? */
} AlterObjectSchemaStmt;
/* ----------------------
@@ -2413,7 +2413,7 @@ typedef struct CreateTableAsStmt
NodeTag type;
Node *query; /* the query (see comments above) */
IntoClause *into; /* destination table */
- bool is_select_into; /* it was written as SELECT INTO */
+ bool is_select_into; /* it was written as SELECT INTO */
} CreateTableAsStmt;
/* ----------------------
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 50831eebf8..cd4561dcf4 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -125,9 +125,9 @@ typedef struct Expr
* The code doesn't really need varnoold/varoattno, but they are very useful
* for debugging and interpreting completed plans, so we keep them around.
*/
-#define INNER_VAR 65000 /* reference to inner subplan */
-#define OUTER_VAR 65001 /* reference to outer subplan */
-#define INDEX_VAR 65002 /* reference to index column */
+#define INNER_VAR 65000 /* reference to inner subplan */
+#define OUTER_VAR 65001 /* reference to outer subplan */
+#define INDEX_VAR 65002 /* reference to index column */
#define IS_SPECIAL_VARNO(varno) ((varno) >= INNER_VAR)
@@ -847,13 +847,13 @@ typedef struct ArrayExpr
* the same as the number of columns logically present in the rowtype.
*
* colnames provides field names in cases where the names can't easily be
- * obtained otherwise. Names *must* be provided if row_typeid is RECORDOID.
+ * obtained otherwise. Names *must* be provided if row_typeid is RECORDOID.
* If row_typeid identifies a known composite type, colnames can be NIL to
* indicate the type's cataloged field names apply. Note that colnames can
* be non-NIL even for a composite type, and typically is when the RowExpr
* was created by expanding a whole-row Var. This is so that we can retain
* the column alias names of the RTE that the Var referenced (which would
- * otherwise be very difficult to extract from the parsetree). Like the
+ * otherwise be very difficult to extract from the parsetree). Like the
* args list, colnames is one-for-one with physical fields of the rowtype.
*/
typedef struct RowExpr
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index e1d5fc0319..cf0bbd9f15 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -147,8 +147,8 @@ typedef struct PlannerInfo
/*
* all_baserels is a Relids set of all base relids (but not "other"
- * relids) in the query; that is, the Relids identifier of the final
- * join we need to form.
+ * relids) in the query; that is, the Relids identifier of the final join
+ * we need to form.
*/
Relids all_baserels;
@@ -423,7 +423,7 @@ typedef struct RelOptInfo
struct Plan *subplan; /* if subquery */
PlannerInfo *subroot; /* if subquery */
/* use "struct FdwRoutine" to avoid including fdwapi.h here */
- struct FdwRoutine *fdwroutine; /* if foreign table */
+ struct FdwRoutine *fdwroutine; /* if foreign table */
void *fdw_private; /* if foreign table */
/* used by various scans and joins: */
@@ -575,7 +575,7 @@ typedef struct EquivalenceClass
*
* em_is_child signifies that this element was built by transposing a member
* for an appendrel parent relation to represent the corresponding expression
- * for an appendrel child. These members are used for determining the
+ * for an appendrel child. These members are used for determining the
* pathkeys of scans on the child relation and for explicitly sorting the
* child when necessary to build a MergeAppend path for the whole appendrel
* tree. An em_is_child member has no impact on the properties of the EC as a
@@ -668,7 +668,7 @@ typedef struct ParamPathInfo
* "param_info", if not NULL, links to a ParamPathInfo that identifies outer
* relation(s) that provide parameter values to each scan of this path.
* That means this path can only be joined to those rels by means of nestloop
- * joins with this path on the inside. Also note that a parameterized path
+ * joins with this path on the inside. Also note that a parameterized path
* is responsible for testing all "movable" joinclauses involving this rel
* and the specified outer rel(s).
*
@@ -832,7 +832,7 @@ typedef struct TidPath
/*
* ForeignPath represents a potential scan of a foreign table
*
- * fdw_private stores FDW private data about the scan. While fdw_private is
+ * fdw_private stores FDW private data about the scan. While fdw_private is
* not actually touched by the core code during normal operations, it's
* generally a good idea to use a representation that can be dumped by
* nodeToString(), so that you can examine the structure during debugging
@@ -1483,7 +1483,7 @@ typedef struct MinMaxAggInfo
* value in the Var will always be zero.
*
* A PlaceHolderVar: this works much like the Var case, except that the
- * entry is a PlaceHolderVar node with a contained expression. The PHV
+ * entry is a PlaceHolderVar node with a contained expression. The PHV
* will have phlevelsup = 0, and the contained expression is adjusted
* to match in level.
*
diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h
index c197e7c0c1..b2cdb3d62e 100644
--- a/src/include/optimizer/cost.h
+++ b/src/include/optimizer/cost.h
@@ -67,9 +67,9 @@ extern double clamp_row_est(double nrows);
extern double index_pages_fetched(double tuples_fetched, BlockNumber pages,
double index_pages, PlannerInfo *root);
extern void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
- ParamPathInfo *param_info);
+ ParamPathInfo *param_info);
extern void cost_index(IndexPath *path, PlannerInfo *root,
- double loop_count);
+ double loop_count);
extern void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
ParamPathInfo *param_info,
Path *bitmapqual, double loop_count);
@@ -79,7 +79,7 @@ extern void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec);
extern void cost_tidscan(Path *path, PlannerInfo *root,
RelOptInfo *baserel, List *tidquals);
extern void cost_subqueryscan(Path *path, PlannerInfo *root,
- RelOptInfo *baserel, ParamPathInfo *param_info);
+ RelOptInfo *baserel, ParamPathInfo *param_info);
extern void cost_functionscan(Path *path, PlannerInfo *root,
RelOptInfo *baserel);
extern void cost_valuesscan(Path *path, PlannerInfo *root,
@@ -153,14 +153,14 @@ extern void compute_semi_anti_join_factors(PlannerInfo *root,
SemiAntiJoinFactors *semifactors);
extern void set_baserel_size_estimates(PlannerInfo *root, RelOptInfo *rel);
extern double get_parameterized_baserel_size(PlannerInfo *root,
- RelOptInfo *rel,
- List *param_clauses);
+ RelOptInfo *rel,
+ List *param_clauses);
extern double get_parameterized_joinrel_size(PlannerInfo *root,
- RelOptInfo *rel,
- double outer_rows,
- double inner_rows,
- SpecialJoinInfo *sjinfo,
- List *restrict_clauses);
+ RelOptInfo *rel,
+ double outer_rows,
+ double inner_rows,
+ SpecialJoinInfo *sjinfo,
+ List *restrict_clauses);
extern void set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
RelOptInfo *outer_rel,
RelOptInfo *inner_rel,
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 4b2483be60..385bae6eb8 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -31,7 +31,7 @@ extern bool add_path_precheck(RelOptInfo *parent_rel,
List *pathkeys, Relids required_outer);
extern Path *create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
- Relids required_outer);
+ Relids required_outer);
extern IndexPath *create_index_path(PlannerInfo *root,
IndexOptInfo *index,
List *indexclauses,
@@ -57,7 +57,7 @@ extern BitmapOrPath *create_bitmap_or_path(PlannerInfo *root,
extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel,
List *tidquals);
extern AppendPath *create_append_path(RelOptInfo *rel, List *subpaths,
- Relids required_outer);
+ Relids required_outer);
extern MergeAppendPath *create_merge_append_path(PlannerInfo *root,
RelOptInfo *rel,
List *subpaths,
@@ -68,7 +68,7 @@ extern MaterialPath *create_material_path(RelOptInfo *rel, Path *subpath);
extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
Path *subpath, SpecialJoinInfo *sjinfo);
extern Path *create_subqueryscan_path(PlannerInfo *root, RelOptInfo *rel,
- List *pathkeys, Relids required_outer);
+ List *pathkeys, Relids required_outer);
extern Path *create_functionscan_path(PlannerInfo *root, RelOptInfo *rel);
extern Path *create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel);
extern Path *create_ctescan_path(PlannerInfo *root, RelOptInfo *rel);
@@ -139,18 +139,18 @@ extern RelOptInfo *build_join_rel(PlannerInfo *root,
SpecialJoinInfo *sjinfo,
List **restrictlist_ptr);
extern AppendRelInfo *find_childrel_appendrelinfo(PlannerInfo *root,
- RelOptInfo *rel);
+ RelOptInfo *rel);
extern ParamPathInfo *get_baserel_parampathinfo(PlannerInfo *root,
- RelOptInfo *baserel,
- Relids required_outer);
+ RelOptInfo *baserel,
+ Relids required_outer);
extern ParamPathInfo *get_joinrel_parampathinfo(PlannerInfo *root,
- RelOptInfo *joinrel,
- Path *outer_path,
- Path *inner_path,
- SpecialJoinInfo *sjinfo,
- Relids required_outer,
- List **restrict_clauses);
+ RelOptInfo *joinrel,
+ Path *outer_path,
+ Path *inner_path,
+ SpecialJoinInfo *sjinfo,
+ Relids required_outer,
+ List **restrict_clauses);
extern ParamPathInfo *get_appendrel_parampathinfo(RelOptInfo *appendrel,
- Relids required_outer);
+ Relids required_outer);
#endif /* PATHNODE_H */
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index b3a2dc1d2d..b6fb8ee5ce 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -50,8 +50,8 @@ extern bool relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
List *restrictlist,
List *exprlist, List *oprlist);
extern bool eclass_member_matches_indexcol(EquivalenceClass *ec,
- EquivalenceMember *em,
- IndexOptInfo *index, int indexcol);
+ EquivalenceMember *em,
+ IndexOptInfo *index, int indexcol);
extern bool match_index_to_operand(Node *operand, int indexcol,
IndexOptInfo *index);
extern void expand_indexqual_conditions(IndexOptInfo *index,
diff --git a/src/include/optimizer/prep.h b/src/include/optimizer/prep.h
index fb03acc2b4..47a27b66e8 100644
--- a/src/include/optimizer/prep.h
+++ b/src/include/optimizer/prep.h
@@ -53,6 +53,6 @@ extern Plan *plan_set_operations(PlannerInfo *root, double tuple_fraction,
extern void expand_inherited_tables(PlannerInfo *root);
extern Node *adjust_appendrel_attrs(PlannerInfo *root, Node *node,
- AppendRelInfo *appinfo);
+ AppendRelInfo *appinfo);
#endif /* PREP_H */
diff --git a/src/include/optimizer/subselect.h b/src/include/optimizer/subselect.h
index 06b50624a1..90fe8fc9c0 100644
--- a/src/include/optimizer/subselect.h
+++ b/src/include/optimizer/subselect.h
@@ -31,7 +31,7 @@ extern Param *SS_make_initplan_from_plan(PlannerInfo *root, Plan *plan,
Oid resulttype, int32 resulttypmod, Oid resultcollation);
extern Param *assign_nestloop_param_var(PlannerInfo *root, Var *var);
extern Param *assign_nestloop_param_placeholdervar(PlannerInfo *root,
- PlaceHolderVar *phv);
+ PlaceHolderVar *phv);
extern int SS_assign_special_param(PlannerInfo *root);
#endif /* SUBSELECT_H */
diff --git a/src/include/parser/analyze.h b/src/include/parser/analyze.h
index fe7f80a5aa..5fbf520992 100644
--- a/src/include/parser/analyze.h
+++ b/src/include/parser/analyze.h
@@ -18,7 +18,7 @@
/* Hook for plugins to get control at end of parse analysis */
typedef void (*post_parse_analyze_hook_type) (ParseState *pstate,
- Query *query);
+ Query *query);
extern PGDLLIMPORT post_parse_analyze_hook_type post_parse_analyze_hook;
diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h
index ac45ee6426..f29f9e64e3 100644
--- a/src/include/pg_config_manual.h
+++ b/src/include/pg_config_manual.h
@@ -182,7 +182,7 @@
* which should be safe in nearly all cases. You might want to override
* this if you are building 32-bit code for a known-recent PPC machine.
*/
-#ifdef HAVE_PPC_LWARX_MUTEX_HINT /* must have assembler support in any case */
+#ifdef HAVE_PPC_LWARX_MUTEX_HINT /* must have assembler support in any case */
#if defined(__ppc64__) || defined(__powerpc64__)
#define USE_PPC_LWARX_MUTEX_HINT
#endif
@@ -190,7 +190,7 @@
/*
* On PPC machines, decide whether to use LWSYNC instructions in place of
- * ISYNC and SYNC. This provides slightly better performance, but will
+ * ISYNC and SYNC. This provides slightly better performance, but will
* result in illegal-instruction failures on some pre-POWER4 machines.
* By default we use LWSYNC when building for 64-bit PPC, which should be
* safe in nearly all cases.
diff --git a/src/include/pg_trace.h b/src/include/pg_trace.h
index 049f1b29d8..45d495fc0d 100644
--- a/src/include/pg_trace.h
+++ b/src/include/pg_trace.h
@@ -12,6 +12,6 @@
#ifndef PG_TRACE_H
#define PG_TRACE_H
-#include "utils/probes.h" /* pgrminclude ignore */
+#include "utils/probes.h" /* pgrminclude ignore */
#endif /* PG_TRACE_H */
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index 3583bbe301..dd978d79c3 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -233,8 +233,8 @@ typedef struct PgStat_MsgTabstat
int m_nentries;
int m_xact_commit;
int m_xact_rollback;
- PgStat_Counter m_block_read_time; /* times in microseconds */
- PgStat_Counter m_block_write_time;
+ PgStat_Counter m_block_read_time; /* times in microseconds */
+ PgStat_Counter m_block_write_time;
PgStat_TableEntry m_entry[PGSTAT_NUM_TABENTRIES];
} PgStat_MsgTabstat;
@@ -429,7 +429,7 @@ typedef struct PgStat_FunctionEntry
{
Oid f_id;
PgStat_Counter f_numcalls;
- PgStat_Counter f_total_time; /* times in microseconds */
+ PgStat_Counter f_total_time; /* times in microseconds */
PgStat_Counter f_self_time;
} PgStat_FunctionEntry;
@@ -540,7 +540,7 @@ typedef struct PgStat_StatDBEntry
PgStat_Counter n_temp_files;
PgStat_Counter n_temp_bytes;
PgStat_Counter n_deadlocks;
- PgStat_Counter n_block_read_time; /* times in microseconds */
+ PgStat_Counter n_block_read_time; /* times in microseconds */
PgStat_Counter n_block_write_time;
TimestampTz stat_reset_timestamp;
@@ -600,7 +600,7 @@ typedef struct PgStat_StatFuncEntry
PgStat_Counter f_numcalls;
- PgStat_Counter f_total_time; /* times in microseconds */
+ PgStat_Counter f_total_time; /* times in microseconds */
PgStat_Counter f_self_time;
} PgStat_StatFuncEntry;
@@ -629,7 +629,8 @@ typedef struct PgStat_GlobalStats
* Backend states
* ----------
*/
-typedef enum BackendState {
+typedef enum BackendState
+{
STATE_UNDEFINED,
STATE_IDLE,
STATE_RUNNING,
@@ -674,7 +675,7 @@ typedef struct PgBackendStatus
TimestampTz st_proc_start_timestamp;
TimestampTz st_xact_start_timestamp;
TimestampTz st_activity_start_timestamp;
- TimestampTz st_state_start_timestamp;
+ TimestampTz st_state_start_timestamp;
/* Database OID, owning user's OID, connection client address */
Oid st_databaseid;
@@ -685,8 +686,8 @@ typedef struct PgBackendStatus
/* Is backend currently waiting on an lmgr lock? */
bool st_waiting;
- /* current state */
- BackendState st_state;
+ /* current state */
+ BackendState st_state;
/* application name; MUST be null-terminated */
char *st_appname;
diff --git a/src/include/port.h b/src/include/port.h
index 9f06f1a34d..25c4e9883d 100644
--- a/src/include/port.h
+++ b/src/include/port.h
@@ -244,7 +244,6 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern char *pgwin32_setlocale(int category, const char *locale);
#define setlocale(a,b) pgwin32_setlocale(a,b)
-
#endif /* WIN32 */
/* Portable prompt handling */
@@ -378,7 +377,7 @@ extern long pg_lrand48(void);
extern void pg_srand48(long seed);
#ifndef HAVE_FLS
-extern int fls(int mask);
+extern int fls(int mask);
#endif
#ifndef HAVE_FSEEKO
diff --git a/src/include/port/win32.h b/src/include/port/win32.h
index 287da98ecf..a00ec897d2 100644
--- a/src/include/port/win32.h
+++ b/src/include/port/win32.h
@@ -319,7 +319,7 @@ typedef int pid_t;
#define ECONNREFUSED WSAECONNREFUSED
#define EOPNOTSUPP WSAEOPNOTSUPP
#pragma warning(default:4005)
-#endif
+#endif
/*
* Extended locale functions with gratuitous underscore prefixes.
diff --git a/src/include/postgres.h b/src/include/postgres.h
index 94c0218cd1..63203bec9a 100644
--- a/src/include/postgres.h
+++ b/src/include/postgres.h
@@ -685,6 +685,6 @@ extern PGDLLIMPORT bool assert_enabled;
extern void ExceptionalCondition(const char *conditionName,
const char *errorType,
- const char *fileName, int lineNumber) __attribute__((noreturn));
+ const char *fileName, int lineNumber) __attribute__((noreturn));
#endif /* POSTGRES_H */
diff --git a/src/include/postmaster/postmaster.h b/src/include/postmaster/postmaster.h
index dded0e623e..683ce3c407 100644
--- a/src/include/postmaster/postmaster.h
+++ b/src/include/postmaster/postmaster.h
@@ -33,13 +33,15 @@ extern bool restart_after_crash;
#ifdef WIN32
extern HANDLE PostmasterHandle;
#else
-extern int postmaster_alive_fds[2];
+extern int postmaster_alive_fds[2];
+
/*
* Constants that represent which of postmaster_alive_fds is held by
* postmaster, and which is used in children to check for postmaster death.
*/
-#define POSTMASTER_FD_WATCH 0 /* used in children to check for postmaster death */
-#define POSTMASTER_FD_OWN 1 /* kept open by postmaster only */
+#define POSTMASTER_FD_WATCH 0 /* used in children to check for
+ * postmaster death */
+#define POSTMASTER_FD_OWN 1 /* kept open by postmaster only */
#endif
extern const char *progname;
diff --git a/src/include/regex/regguts.h b/src/include/regex/regguts.h
index 65b8d178da..e8415799ec 100644
--- a/src/include/regex/regguts.h
+++ b/src/include/regex/regguts.h
@@ -186,12 +186,12 @@ union tree
*
* If "sub" is not NOSUB then it is the number of the color's current
* subcolor, i.e. we are in process of dividing this color (character
- * equivalence class) into two colors. See src/backend/regex/README for
+ * equivalence class) into two colors. See src/backend/regex/README for
* discussion of subcolors.
*
* Currently-unused colors have the FREECOL bit set and are linked into a
* freelist using their "sub" fields, but only if their color numbers are
- * less than colormap.max. Any array entries beyond "max" are just garbage.
+ * less than colormap.max. Any array entries beyond "max" are just garbage.
*/
struct colordesc
{
diff --git a/src/include/replication/walprotocol.h b/src/include/replication/walprotocol.h
index 01cb208d6f..0305fb7e59 100644
--- a/src/include/replication/walprotocol.h
+++ b/src/include/replication/walprotocol.h
@@ -59,7 +59,7 @@ typedef struct
*
* Note that the data length is not specified here.
*/
-typedef WalSndrMessage PrimaryKeepaliveMessage;
+typedef WalSndrMessage PrimaryKeepaliveMessage;
/*
* Reply message from standby (message type 'r'). This is wrapped within
diff --git a/src/include/replication/walreceiver.h b/src/include/replication/walreceiver.h
index 68c864751e..d21ec94a45 100644
--- a/src/include/replication/walreceiver.h
+++ b/src/include/replication/walreceiver.h
@@ -118,7 +118,7 @@ extern void ShutdownWalRcv(void);
extern bool WalRcvInProgress(void);
extern void RequestXLogStreaming(XLogRecPtr recptr, const char *conninfo);
extern XLogRecPtr GetWalRcvWriteRecPtr(XLogRecPtr *latestChunkStart);
-extern int GetReplicationApplyDelay(void);
-extern int GetReplicationTransferLatency(void);
+extern int GetReplicationApplyDelay(void);
+extern int GetReplicationTransferLatency(void);
#endif /* _WALRECEIVER_H */
diff --git a/src/include/replication/walsender_private.h b/src/include/replication/walsender_private.h
index 183bf19f6d..66234cd8b5 100644
--- a/src/include/replication/walsender_private.h
+++ b/src/include/replication/walsender_private.h
@@ -35,7 +35,8 @@ typedef struct WalSnd
pid_t pid; /* this walsender's process id, or 0 */
WalSndState state; /* this walsender's state */
XLogRecPtr sentPtr; /* WAL has been sent up to this point */
- bool needreload; /* does currently-open file need to be reloaded? */
+ bool needreload; /* does currently-open file need to be
+ * reloaded? */
/*
* The xlog locations that have been written, flushed, and applied by
diff --git a/src/include/rewrite/rewriteSupport.h b/src/include/rewrite/rewriteSupport.h
index 653bc444ca..e61fc0582d 100644
--- a/src/include/rewrite/rewriteSupport.h
+++ b/src/include/rewrite/rewriteSupport.h
@@ -23,7 +23,7 @@ extern void SetRelationRuleStatus(Oid relationId, bool relHasRules,
bool relIsBecomingView);
extern Oid get_rewrite_oid(Oid relid, const char *rulename, bool missing_ok);
-extern Oid get_rewrite_oid_without_relid(const char *rulename,
- Oid *relid, bool missing_ok);
+extern Oid get_rewrite_oid_without_relid(const char *rulename,
+ Oid *relid, bool missing_ok);
#endif /* REWRITESUPPORT_H */
diff --git a/src/include/snowball/header.h b/src/include/snowball/header.h
index 0c280a076c..159aa4d8e8 100644
--- a/src/include/snowball/header.h
+++ b/src/include/snowball/header.h
@@ -33,7 +33,7 @@
#endif
/* Now we can include the original Snowball header.h */
-#include "snowball/libstemmer/header.h" /* pgrminclude ignore */
+#include "snowball/libstemmer/header.h" /* pgrminclude ignore */
/*
* Redefine standard memory allocation interface to pgsql's one.
diff --git a/src/include/storage/barrier.h b/src/include/storage/barrier.h
index 57f03ecf2a..5037870991 100644
--- a/src/include/storage/barrier.h
+++ b/src/include/storage/barrier.h
@@ -15,7 +15,7 @@
#include "storage/s_lock.h"
-extern slock_t dummy_spinlock;
+extern slock_t dummy_spinlock;
/*
* A compiler barrier need not (and preferably should not) emit any actual
@@ -30,10 +30,10 @@ extern slock_t dummy_spinlock;
* loads and stores are totally ordered (which is not the case on most
* architectures) this requires issuing some sort of memory fencing
* instruction.
- *
+ *
* A read barrier must act as a compiler barrier, and in addition must
* guarantee that any loads issued prior to the barrier are completed before
- * any loads issued after the barrier. Similarly, a write barrier acts
+ * any loads issued after the barrier. Similarly, a write barrier acts
* as a compiler barrier, and also orders stores. Read and write barriers
* are thus weaker than a full memory barrier, but stronger than a compiler
* barrier. In practice, on machines with strong memory ordering, read and
@@ -48,7 +48,6 @@ extern slock_t dummy_spinlock;
/*
* Fall through to the spinlock-based implementation.
*/
-
#elif defined(__INTEL_COMPILER)
/*
@@ -56,7 +55,6 @@ extern slock_t dummy_spinlock;
*/
#define pg_memory_barrier() _mm_mfence()
#define pg_compiler_barrier() __memory_barrier()
-
#elif defined(__GNUC__)
/* This works on any architecture, since it's only talking to GCC itself. */
@@ -75,7 +73,6 @@ extern slock_t dummy_spinlock;
__asm__ __volatile__ ("lock; addl $0,0(%%esp)" : : : "memory")
#define pg_read_barrier() pg_compiler_barrier()
#define pg_write_barrier() pg_compiler_barrier()
-
#elif defined(__x86_64__) /* 64 bit x86 */
/*
@@ -90,7 +87,6 @@ extern slock_t dummy_spinlock;
__asm__ __volatile__ ("lock; addl $0,0(%%rsp)" : : : "memory")
#define pg_read_barrier() pg_compiler_barrier()
#define pg_write_barrier() pg_compiler_barrier()
-
#elif defined(__ia64__) || defined(__ia64)
/*
@@ -98,7 +94,6 @@ extern slock_t dummy_spinlock;
* fence.
*/
#define pg_memory_barrier() __asm__ __volatile__ ("mf" : : : "memory")
-
#elif defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
/*
@@ -109,8 +104,7 @@ extern slock_t dummy_spinlock;
#define pg_memory_barrier() __asm__ __volatile__ ("sync" : : : "memory")
#define pg_read_barrier() __asm__ __volatile__ ("lwsync" : : : "memory")
#define pg_write_barrier() __asm__ __volatile__ ("lwsync" : : : "memory")
-
-#elif defined(__alpha) || defined(__alpha__) /* Alpha */
+#elif defined(__alpha) || defined(__alpha__) /* Alpha */
/*
* Unlike all other known architectures, Alpha allows dependent reads to be
@@ -120,7 +114,6 @@ extern slock_t dummy_spinlock;
#define pg_memory_barrier() __asm__ __volatile__ ("mb" : : : "memory")
#define pg_read_barrier() __asm__ __volatile__ ("rmb" : : : "memory")
#define pg_write_barrier() __asm__ __volatile__ ("wmb" : : : "memory")
-
#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
/*
@@ -129,14 +122,11 @@ extern slock_t dummy_spinlock;
* own definitions where possible, and use this only as a fallback.
*/
#define pg_memory_barrier() __sync_synchronize()
-
#endif
-
#elif defined(__ia64__) || defined(__ia64)
#define pg_compiler_barrier() _Asm_sched_fence()
#define pg_memory_barrier() _Asm_mf()
-
#elif defined(WIN32_ONLY_COMPILER)
/* Should work on both MSVC and Borland. */
@@ -144,7 +134,6 @@ extern slock_t dummy_spinlock;
#pragma intrinsic(_ReadWriteBarrier)
#define pg_compiler_barrier() _ReadWriteBarrier()
#define pg_memory_barrier() MemoryBarrier()
-
#endif
/*
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 6a7df38d1a..71fb4868a0 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -68,7 +68,7 @@
* than an ad-hoc shared latch for signaling auxiliary processes. This is
* because generic signal handlers will call SetLatch on the process latch
* only, so using any latch other than the process latch effectively precludes
- * ever registering a generic handler. Since signals have the potential to
+ * ever registering a generic handler. Since signals have the potential to
* invalidate the latch timeout on some platforms, resulting in a
* denial-of-service, it is important to verify that all signal handlers
* within all WaitLatch-calling processes call SetLatch.
@@ -102,10 +102,10 @@ typedef struct
} Latch;
/* Bitmasks for events that may wake-up WaitLatch() clients */
-#define WL_LATCH_SET (1 << 0)
-#define WL_SOCKET_READABLE (1 << 1)
+#define WL_LATCH_SET (1 << 0)
+#define WL_SOCKET_READABLE (1 << 1)
#define WL_SOCKET_WRITEABLE (1 << 2)
-#define WL_TIMEOUT (1 << 3)
+#define WL_TIMEOUT (1 << 3)
#define WL_POSTMASTER_DEATH (1 << 4)
/*
@@ -115,7 +115,7 @@ extern void InitLatch(volatile Latch *latch);
extern void InitSharedLatch(volatile Latch *latch);
extern void OwnLatch(volatile Latch *latch);
extern void DisownLatch(volatile Latch *latch);
-extern int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout);
+extern int WaitLatch(volatile Latch *latch, int wakeEvents, long timeout);
extern int WaitLatchOrSocket(volatile Latch *latch, int wakeEvents,
pgsocket sock, long timeout);
extern void SetLatch(volatile Latch *latch);
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 17b894285b..d629ac2ad2 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -428,7 +428,7 @@ typedef struct LockInstanceData
LOCKMASK holdMask; /* locks held by this PGPROC */
LOCKMODE waitLockMode; /* lock awaited by this PGPROC, if any */
BackendId backend; /* backend ID of this PGPROC */
- LocalTransactionId lxid; /* local transaction ID of this PGPROC */
+ LocalTransactionId lxid; /* local transaction ID of this PGPROC */
int pid; /* pid of this PGPROC */
bool fastpath; /* taken via fastpath? */
} LockInstanceData;
@@ -436,7 +436,7 @@ typedef struct LockInstanceData
typedef struct LockData
{
int nelements; /* The length of the array */
- LockInstanceData *locks;
+ LockInstanceData *locks;
} LockData;
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 6b59efcbb1..82d8ec4edc 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -95,9 +95,9 @@ typedef enum LWLockMode
{
LW_EXCLUSIVE,
LW_SHARED,
- LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode, when
- * waiting for lock to become free. Not to be used
- * as LWLockAcquire argument */
+ LW_WAIT_UNTIL_FREE /* A special mode used in PGPROC->lwlockMode,
+ * when waiting for lock to become free. Not
+ * to be used as LWLockAcquire argument */
} LWLockMode;
diff --git a/src/include/storage/predicate.h b/src/include/storage/predicate.h
index 6ea70ea845..7ec79e077d 100644
--- a/src/include/storage/predicate.h
+++ b/src/include/storage/predicate.h
@@ -44,7 +44,7 @@ extern bool PageIsPredicateLocked(Relation relation, BlockNumber blkno);
/* predicate lock maintenance */
extern Snapshot GetSerializableTransactionSnapshot(Snapshot snapshot);
extern void SetSerializableTransactionSnapshot(Snapshot snapshot,
- TransactionId sourcexid);
+ TransactionId sourcexid);
extern void RegisterPredicateLockingXid(TransactionId xid);
extern void PredicateLockRelation(Relation relation, Snapshot snapshot);
extern void PredicateLockPage(Relation relation, BlockNumber blkno, Snapshot snapshot);
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 618a02f42b..71413aaf59 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -131,14 +131,15 @@ struct PGPROC
struct XidCache subxids; /* cache for subtransaction XIDs */
- /* Per-backend LWLock. Protects fields below. */
+ /* Per-backend LWLock. Protects fields below. */
LWLockId backendLock; /* protects the fields below */
/* Lock manager data, recording fast-path locks taken by this backend. */
uint64 fpLockBits; /* lock modes held for each fast-path slot */
- Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
+ Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
bool fpVXIDLock; /* are we holding a fast-path VXID lock? */
- LocalTransactionId fpLocalTransactionId; /* lxid for fast-path VXID lock */
+ LocalTransactionId fpLocalTransactionId; /* lxid for fast-path VXID
+ * lock */
};
/* NOTE: "typedef struct PGPROC PGPROC" appears in storage/lock.h. */
@@ -149,7 +150,7 @@ extern PGDLLIMPORT struct PGXACT *MyPgXact;
/*
* Prior to PostgreSQL 9.2, the fields below were stored as part of the
- * PGPROC. However, benchmarking revealed that packing these particular
+ * PGPROC. However, benchmarking revealed that packing these particular
* members into a separate array as tightly as possible sped up GetSnapshotData
* considerably on systems with many CPU cores, by reducing the number of
* cache lines needing to be fetched. Thus, think very carefully before adding
diff --git a/src/include/storage/procarray.h b/src/include/storage/procarray.h
index 0b0aa35ee1..5b4cab926f 100644
--- a/src/include/storage/procarray.h
+++ b/src/include/storage/procarray.h
@@ -43,7 +43,7 @@ extern int GetMaxSnapshotSubxidCount(void);
extern Snapshot GetSnapshotData(Snapshot snapshot);
extern bool ProcArrayInstallImportedXmin(TransactionId xmin,
- TransactionId sourcexid);
+ TransactionId sourcexid);
extern RunningTransactions GetRunningTransactionData(void);
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index 7fdfdbe7c4..bcf2c8111d 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -33,8 +33,8 @@
* updates and deletions in system catalogs (see CacheInvalidateHeapTuple).
* An update can generate two inval events, one for the old tuple and one for
* the new, but this is reduced to one event if the tuple's hash key doesn't
- * change. Note that the inval events themselves don't actually say whether
- * the tuple is being inserted or deleted. Also, since we transmit only a
+ * change. Note that the inval events themselves don't actually say whether
+ * the tuple is being inserted or deleted. Also, since we transmit only a
* hash key, there is a small risk of unnecessary invalidations due to chance
* matches of hash keys.
*
diff --git a/src/include/storage/smgr.h b/src/include/storage/smgr.h
index f1e1b8cdef..f8fc2b2d6e 100644
--- a/src/include/storage/smgr.h
+++ b/src/include/storage/smgr.h
@@ -60,7 +60,7 @@ typedef struct SMgrRelationData
* submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
- bool smgr_transient; /* T if files are to be closed at EOXact */
+ bool smgr_transient; /* T if files are to be closed at EOXact */
/* for md.c; NULL for forks that are not open */
struct _MdfdVec *md_fd[MAX_FORKNUM + 1];
diff --git a/src/include/tsearch/ts_public.h b/src/include/tsearch/ts_public.h
index 380df6ca5f..d5c18f2cf0 100644
--- a/src/include/tsearch/ts_public.h
+++ b/src/include/tsearch/ts_public.h
@@ -91,11 +91,11 @@ typedef struct
* and ( fot, ball, klubb ). So, dictionary should return:
*
* nvariant lexeme
- * 1 fotball
- * 1 klubb
- * 2 fot
- * 2 ball
- * 2 klubb
+ * 1 fotball
+ * 1 klubb
+ * 2 fot
+ * 2 ball
+ * 2 klubb
*
* In general, a TSLexeme will be considered to belong to the same split
* variant as the previous one if they have the same nvariant value.
diff --git a/src/include/utils/acl.h b/src/include/utils/acl.h
index ff3c6aa1f6..6de39b21cf 100644
--- a/src/include/utils/acl.h
+++ b/src/include/utils/acl.h
@@ -278,7 +278,7 @@ extern AclMode pg_foreign_data_wrapper_aclmask(Oid fdw_oid, Oid roleid,
extern AclMode pg_foreign_server_aclmask(Oid srv_oid, Oid roleid,
AclMode mask, AclMaskHow how);
extern AclMode pg_type_aclmask(Oid type_oid, Oid roleid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclResult pg_attribute_aclcheck(Oid table_oid, AttrNumber attnum,
Oid roleid, AclMode mode);
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index f246f117ba..d1e8370760 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -311,7 +311,7 @@ extern Datum btnamecmp(PG_FUNCTION_ARGS);
extern Datum bttextcmp(PG_FUNCTION_ARGS);
/*
- * Per-opclass sort support functions for new btrees. Like the
+ * Per-opclass sort support functions for new btrees. Like the
* functions above, these are stored in pg_amproc; most are defined in
* access/nbtree/nbtcompare.c
*/
diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h
index 38ed54891a..6810387755 100644
--- a/src/include/utils/guc.h
+++ b/src/include/utils/guc.h
@@ -313,7 +313,7 @@ extern void ParseLongOption(const char *string, char **name, char **value);
extern bool parse_int(const char *value, int *result, int flags,
const char **hintmsg);
extern bool parse_real(const char *value, double *result);
-extern int set_config_option(const char *name, const char *value,
+extern int set_config_option(const char *name, const char *value,
GucContext context, GucSource source,
GucAction action, bool changeVal, int elevel);
extern char *GetConfigOptionByName(const char *name, const char **varname);
diff --git a/src/include/utils/guc_tables.h b/src/include/utils/guc_tables.h
index eaea4862c2..5d1ca06b2a 100644
--- a/src/include/utils/guc_tables.h
+++ b/src/include/utils/guc_tables.h
@@ -120,7 +120,7 @@ typedef struct guc_stack
GucSource source; /* source of the prior value */
/* masked value's source must be PGC_S_SESSION, so no need to store it */
GucContext scontext; /* context that set the prior value */
- GucContext masked_scontext; /* context that set the masked value */
+ GucContext masked_scontext; /* context that set the masked value */
config_var_value prior; /* previous value of variable */
config_var_value masked; /* SET value in a GUC_SET_LOCAL entry */
} GucStack;
@@ -152,7 +152,7 @@ struct config_generic
GucSource source; /* source of the current actual value */
GucSource reset_source; /* source of the reset_value */
GucContext scontext; /* context that set the current value */
- GucContext reset_scontext; /* context that set the reset value */
+ GucContext reset_scontext; /* context that set the reset value */
GucStack *stack; /* stacked prior values */
void *extra; /* "extra" pointer for current actual value */
char *sourcefile; /* file current setting is from (NULL if not
diff --git a/src/include/utils/json.h b/src/include/utils/json.h
index c355e0f73e..0f38147acb 100644
--- a/src/include/utils/json.h
+++ b/src/include/utils/json.h
@@ -25,6 +25,6 @@ extern Datum array_to_json(PG_FUNCTION_ARGS);
extern Datum array_to_json_pretty(PG_FUNCTION_ARGS);
extern Datum row_to_json(PG_FUNCTION_ARGS);
extern Datum row_to_json_pretty(PG_FUNCTION_ARGS);
-extern void escape_json(StringInfo buf, const char *str);
+extern void escape_json(StringInfo buf, const char *str);
#endif /* JSON_H */
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index 696ca77307..4bf1029e10 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -18,10 +18,10 @@
/* Result list element for get_op_btree_interpretation */
typedef struct OpBtreeInterpretation
{
- Oid opfamily_id; /* btree opfamily containing operator */
- int strategy; /* its strategy number */
- Oid oplefttype; /* declared left input datatype */
- Oid oprighttype; /* declared right input datatype */
+ Oid opfamily_id; /* btree opfamily containing operator */
+ int strategy; /* its strategy number */
+ Oid oplefttype; /* declared left input datatype */
+ Oid oprighttype; /* declared right input datatype */
} OpBtreeInterpretation;
/* I/O function selector for get_type_io_data */
@@ -149,7 +149,7 @@ extern void free_attstatsslot(Oid atttype,
Datum *values, int nvalues,
float4 *numbers, int nnumbers);
extern char *get_namespace_name(Oid nspid);
-extern Oid get_range_subtype(Oid rangeOid);
+extern Oid get_range_subtype(Oid rangeOid);
#define type_is_array(typid) (get_element_type(typid) != InvalidOid)
/* type_is_array_domain accepts both plain arrays and domains over arrays */
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index a2bb3d9c72..06c8afdd0a 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -91,7 +91,7 @@ extern void MemoryContextResetChildren(MemoryContext context);
extern void MemoryContextDeleteChildren(MemoryContext context);
extern void MemoryContextResetAndDeleteChildren(MemoryContext context);
extern void MemoryContextSetParent(MemoryContext context,
- MemoryContext new_parent);
+ MemoryContext new_parent);
extern Size GetMemoryChunkSpace(void *pointer);
extern MemoryContext GetMemoryChunkContext(void *pointer);
extern MemoryContext MemoryContextGetParent(MemoryContext context);
diff --git a/src/include/utils/pg_crc_tables.h b/src/include/utils/pg_crc_tables.h
index 524410fffd..43052aa898 100644
--- a/src/include/utils/pg_crc_tables.h
+++ b/src/include/utils/pg_crc_tables.h
@@ -511,7 +511,6 @@ const uint64 pg_crc64_table[256] = {
UINT64CONST(0xD80C07CD676F8394), UINT64CONST(0x9AFCE626CE85B507)
};
#endif /* SIZEOF_VOID_P < 8 */
-
#endif /* PROVIDE_64BIT_CRC */
#endif /* PG_CRC_TABLES_H */
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 9b1f7e46ec..413e8462a6 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -30,7 +30,7 @@
* the analyzed-and-rewritten query tree, and rebuild it when next needed.
*
* An actual execution plan, represented by CachedPlan, is derived from the
- * CachedPlanSource when we need to execute the query. The plan could be
+ * CachedPlanSource when we need to execute the query. The plan could be
* either generic (usable with any set of plan parameters) or custom (for a
* specific set of parameters). plancache.c contains the logic that decides
* which way to do it for any particular execution. If we are using a generic
@@ -84,7 +84,7 @@ typedef struct CachedPlanSource
List *query_list; /* list of Query nodes, or NIL if not valid */
List *relationOids; /* OIDs of relations the queries depend on */
List *invalItems; /* other dependencies, as PlanInvalItems */
- MemoryContext query_context; /* context holding the above, or NULL */
+ MemoryContext query_context; /* context holding the above, or NULL */
/* If we have a generic plan, this is a reference-counted link to it: */
struct CachedPlan *gplan; /* generic plan, or NULL if not valid */
/* Some state flags: */
@@ -93,26 +93,26 @@ typedef struct CachedPlanSource
bool is_valid; /* is the query_list currently valid? */
int generation; /* increments each time we create a plan */
/* If CachedPlanSource has been saved, it is a member of a global list */
- struct CachedPlanSource *next_saved; /* list link, if so */
+ struct CachedPlanSource *next_saved; /* list link, if so */
/* State kept to help decide whether to use custom or generic plans: */
double generic_cost; /* cost of generic plan, or -1 if not known */
- double total_custom_cost; /* total cost of custom plans so far */
- int num_custom_plans; /* number of plans included in total */
+ double total_custom_cost; /* total cost of custom plans so far */
+ int num_custom_plans; /* number of plans included in total */
} CachedPlanSource;
/*
* CachedPlan represents an execution plan derived from a CachedPlanSource.
* The reference count includes both the link from the parent CachedPlanSource
* (if any), and any active plan executions, so the plan can be discarded
- * exactly when refcount goes to zero. Both the struct itself and the
+ * exactly when refcount goes to zero. Both the struct itself and the
* subsidiary data live in the context denoted by the context field.
* This makes it easy to free a no-longer-needed cached plan.
*/
typedef struct CachedPlan
{
int magic; /* should equal CACHEDPLAN_MAGIC */
- List *stmt_list; /* list of statement nodes (PlannedStmts
- * and bare utility statements) */
+ List *stmt_list; /* list of statement nodes (PlannedStmts and
+ * bare utility statements) */
bool is_saved; /* is CachedPlan in a long-lived context? */
bool is_valid; /* is the stmt_list currently valid? */
TransactionId saved_xmin; /* if valid, replan when TransactionXmin
@@ -130,20 +130,20 @@ extern CachedPlanSource *CreateCachedPlan(Node *raw_parse_tree,
const char *query_string,
const char *commandTag);
extern void CompleteCachedPlan(CachedPlanSource *plansource,
- List *querytree_list,
- MemoryContext querytree_context,
- Oid *param_types,
- int num_params,
- ParserSetupHook parserSetup,
- void *parserSetupArg,
- int cursor_options,
- bool fixed_result);
+ List *querytree_list,
+ MemoryContext querytree_context,
+ Oid *param_types,
+ int num_params,
+ ParserSetupHook parserSetup,
+ void *parserSetupArg,
+ int cursor_options,
+ bool fixed_result);
extern void SaveCachedPlan(CachedPlanSource *plansource);
extern void DropCachedPlan(CachedPlanSource *plansource);
extern void CachedPlanSetParentContext(CachedPlanSource *plansource,
- MemoryContext newcontext);
+ MemoryContext newcontext);
extern CachedPlanSource *CopyCachedPlan(CachedPlanSource *plansource);
diff --git a/src/include/utils/rangetypes.h b/src/include/utils/rangetypes.h
index 7b6ff198d4..ad72df57dd 100644
--- a/src/include/utils/rangetypes.h
+++ b/src/include/utils/rangetypes.h
@@ -30,7 +30,7 @@ typedef struct
} RangeType;
/* Use this macro in preference to fetching rangetypid field directly */
-#define RangeTypeGetOid(r) ((r)->rangetypid)
+#define RangeTypeGetOid(r) ((r)->rangetypid)
/* A range's flags byte contains these bits: */
#define RANGE_EMPTY 0x01 /* range is empty */
@@ -40,8 +40,8 @@ typedef struct
#define RANGE_UB_INF 0x10 /* upper bound is +infinity */
#define RANGE_LB_NULL 0x20 /* lower bound is null (NOT USED) */
#define RANGE_UB_NULL 0x40 /* upper bound is null (NOT USED) */
-#define RANGE_CONTAIN_EMPTY 0x80 /* marks a GiST internal-page entry whose
- * subtree contains some empty ranges */
+#define RANGE_CONTAIN_EMPTY 0x80/* marks a GiST internal-page entry whose
+ * subtree contains some empty ranges */
#define RANGE_HAS_LBOUND(flags) (!((flags) & (RANGE_EMPTY | \
RANGE_LB_NULL | \
@@ -149,18 +149,18 @@ extern Datum tstzrange_subdiff(PG_FUNCTION_ARGS);
/* assorted support functions */
extern TypeCacheEntry *range_get_typcache(FunctionCallInfo fcinfo,
- Oid rngtypid);
+ Oid rngtypid);
extern RangeType *range_serialize(TypeCacheEntry *typcache, RangeBound *lower,
- RangeBound *upper, bool empty);
+ RangeBound *upper, bool empty);
extern void range_deserialize(TypeCacheEntry *typcache, RangeType *range,
- RangeBound *lower, RangeBound *upper,
- bool *empty);
+ RangeBound *lower, RangeBound *upper,
+ bool *empty);
extern char range_get_flags(RangeType *range);
extern void range_set_contain_empty(RangeType *range);
extern RangeType *make_range(TypeCacheEntry *typcache, RangeBound *lower,
- RangeBound *upper, bool empty);
+ RangeBound *upper, bool empty);
extern int range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1,
- RangeBound *b2);
+ RangeBound *b2);
extern int range_cmp_bound_values(TypeCacheEntry *typcache, RangeBound *b1,
RangeBound *b2);
extern RangeType *make_empty_range(TypeCacheEntry *typcache);
diff --git a/src/include/utils/rel.h b/src/include/utils/rel.h
index d404c2adb5..4669d8a67e 100644
--- a/src/include/utils/rel.h
+++ b/src/include/utils/rel.h
@@ -196,7 +196,7 @@ typedef struct StdRdOptions
int32 vl_len_; /* varlena header (do not touch directly!) */
int fillfactor; /* page fill factor in percent (0..100) */
AutoVacOpts autovacuum; /* autovacuum-related options */
- bool security_barrier; /* for views */
+ bool security_barrier; /* for views */
} StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index 4529f27683..87c6554b32 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -121,7 +121,7 @@ extern void get_join_variables(PlannerInfo *root, List *args,
VariableStatData *vardata2,
bool *join_is_reversed);
extern double get_variable_numdistinct(VariableStatData *vardata,
- bool *isdefault);
+ bool *isdefault);
extern double mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
Datum constval, bool varonleft,
double *sumcommonp);
diff --git a/src/include/utils/sortsupport.h b/src/include/utils/sortsupport.h
index ef8d853493..720a54c0d7 100644
--- a/src/include/utils/sortsupport.h
+++ b/src/include/utils/sortsupport.h
@@ -33,7 +33,7 @@
*
* Note: since pg_amproc functions are indexed by (lefttype, righttype)
* it is possible to associate a BTSORTSUPPORT function with a cross-type
- * comparison. This could sensibly be used to provide a fast comparator
+ * comparison. This could sensibly be used to provide a fast comparator
* function for such cases, but probably not any other acceleration method.
*
*
@@ -57,28 +57,28 @@ typedef struct SortSupportData
* These fields are initialized before calling the BTSORTSUPPORT function
* and should not be changed later.
*/
- MemoryContext ssup_cxt; /* Context containing sort info */
- Oid ssup_collation; /* Collation to use, or InvalidOid */
+ MemoryContext ssup_cxt; /* Context containing sort info */
+ Oid ssup_collation; /* Collation to use, or InvalidOid */
/*
- * Additional sorting parameters; but unlike ssup_collation, these can
- * be changed after BTSORTSUPPORT is called, so don't use them in
- * selecting sort support functions.
+ * Additional sorting parameters; but unlike ssup_collation, these can be
+ * changed after BTSORTSUPPORT is called, so don't use them in selecting
+ * sort support functions.
*/
- bool ssup_reverse; /* descending-order sort? */
+ bool ssup_reverse; /* descending-order sort? */
bool ssup_nulls_first; /* sort nulls first? */
/*
* These fields are workspace for callers, and should not be touched by
* opclass-specific functions.
*/
- AttrNumber ssup_attno; /* column number to sort */
+ AttrNumber ssup_attno; /* column number to sort */
/*
- * ssup_extra is zeroed before calling the BTSORTSUPPORT function, and
- * is not touched subsequently by callers.
+ * ssup_extra is zeroed before calling the BTSORTSUPPORT function, and is
+ * not touched subsequently by callers.
*/
- void *ssup_extra; /* Workspace for opclass functions */
+ void *ssup_extra; /* Workspace for opclass functions */
/*
* Function pointers are zeroed before calling the BTSORTSUPPORT function,
@@ -140,13 +140,11 @@ ApplySortComparator(Datum datum1, bool isNull1,
return compare;
}
-
#else
-extern int ApplySortComparator(Datum datum1, bool isNull1,
+extern int ApplySortComparator(Datum datum1, bool isNull1,
Datum datum2, bool isNull2,
SortSupport ssup);
-
#endif /* USE_INLINE */
/* Other functions in utils/sort/sortsupport.c */
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 5669924766..665e969498 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -42,7 +42,6 @@
#define PG_RETURN_TIMESTAMP(x) return TimestampGetDatum(x)
#define PG_RETURN_TIMESTAMPTZ(x) return TimestampTzGetDatum(x)
#define PG_RETURN_INTERVAL_P(x) return IntervalPGetDatum(x)
-
#else /* !HAVE_INT64_TIMESTAMP */
#define DatumGetTimestamp(X) ((Timestamp) DatumGetFloat8(X))
@@ -60,7 +59,6 @@
#define PG_RETURN_TIMESTAMP(x) return TimestampGetDatum(x)
#define PG_RETURN_TIMESTAMPTZ(x) return TimestampTzGetDatum(x)
#define PG_RETURN_INTERVAL_P(x) return IntervalPGetDatum(x)
-
#endif /* HAVE_INT64_TIMESTAMP */
diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h
index 3d8a480d81..ff74f868a6 100644
--- a/src/include/utils/tqual.h
+++ b/src/include/utils/tqual.h
@@ -84,7 +84,7 @@ extern HTSU_Result HeapTupleSatisfiesUpdate(HeapTupleHeader tuple,
extern HTSV_Result HeapTupleSatisfiesVacuum(HeapTupleHeader tuple,
TransactionId OldestXmin, Buffer buffer);
extern bool HeapTupleIsSurelyDead(HeapTupleHeader tuple,
- TransactionId OldestXmin);
+ TransactionId OldestXmin);
extern void HeapTupleSetHintBits(HeapTupleHeader tuple, Buffer buffer,
uint16 infomask, TransactionId xid);
diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h
index 935fb864a3..12fb4f8310 100644
--- a/src/include/utils/typcache.h
+++ b/src/include/utils/typcache.h
@@ -73,13 +73,13 @@ typedef struct TypeCacheEntry
TupleDesc tupDesc;
/*
- * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if
- * not a range type or information hasn't yet been requested. Note that
+ * Fields computed when TYPECACHE_RANGE_INFO is requested. Zeroes if not
+ * a range type or information hasn't yet been requested. Note that
* rng_cmp_proc_finfo could be different from the element type's default
* btree comparison function.
*/
- struct TypeCacheEntry *rngelemtype; /* range's element type */
- Oid rng_collation; /* collation for comparisons, if any */
+ struct TypeCacheEntry *rngelemtype; /* range's element type */
+ Oid rng_collation; /* collation for comparisons, if any */
FmgrInfo rng_cmp_proc_finfo; /* comparison function */
FmgrInfo rng_canonical_finfo; /* canonicalization function, if any */
FmgrInfo rng_subdiff_finfo; /* difference function, if any */
diff --git a/src/include/utils/xml.h b/src/include/utils/xml.h
index a36e26f2ed..a645af918c 100644
--- a/src/include/utils/xml.h
+++ b/src/include/utils/xml.h
@@ -37,11 +37,11 @@ typedef enum
typedef enum
{
- PG_XML_STRICTNESS_LEGACY, /* ignore errors unless function result
- * indicates error condition */
- PG_XML_STRICTNESS_WELLFORMED, /* ignore non-parser messages */
- PG_XML_STRICTNESS_ALL /* report all notices/warnings/errors */
-} PgXmlStrictness;
+ PG_XML_STRICTNESS_LEGACY, /* ignore errors unless function result
+ * indicates error condition */
+ PG_XML_STRICTNESS_WELLFORMED, /* ignore non-parser messages */
+ PG_XML_STRICTNESS_ALL /* report all notices/warnings/errors */
+} PgXmlStrictness;
/* struct PgXmlErrorContext is private to xml.c */
typedef struct PgXmlErrorContext PgXmlErrorContext;
@@ -90,7 +90,7 @@ extern PgXmlErrorContext *pg_xml_init(PgXmlStrictness strictness);
extern void pg_xml_done(PgXmlErrorContext *errcxt, bool isError);
extern bool pg_xml_error_occurred(PgXmlErrorContext *errcxt);
extern void xml_ereport(PgXmlErrorContext *errcxt, int level, int sqlcode,
- const char *msg);
+ const char *msg);
extern xmltype *xmlconcat(List *args);
extern xmltype *xmlelement(XmlExprState *xmlExpr, ExprContext *econtext);
diff --git a/src/interfaces/ecpg/ecpglib/connect.c b/src/interfaces/ecpg/ecpglib/connect.c
index b54b1f5503..49f2d546bb 100644
--- a/src/interfaces/ecpg/ecpglib/connect.c
+++ b/src/interfaces/ecpg/ecpglib/connect.c
@@ -267,7 +267,8 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
struct sqlca_t *sqlca = ECPGget_sqlca();
enum COMPAT_MODE compat = c;
struct connection *this;
- int i, connect_params = 0;
+ int i,
+ connect_params = 0;
char *dbname = name ? ecpg_strdup(name, lineno) : NULL,
*host = NULL,
*tmp,
@@ -505,10 +506,10 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
connect_params++;
/* allocate enough space for all connection parameters */
- conn_keywords = (const char **) ecpg_alloc((connect_params + 1) * sizeof (char *), lineno);
- conn_values = (const char **) ecpg_alloc(connect_params * sizeof (char *), lineno);
+ conn_keywords = (const char **) ecpg_alloc((connect_params + 1) * sizeof(char *), lineno);
+ conn_values = (const char **) ecpg_alloc(connect_params * sizeof(char *), lineno);
if (conn_keywords == NULL || conn_values == NULL)
- {
+ {
if (host)
ecpg_free(host);
if (port)
@@ -560,23 +561,25 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
}
if (options)
{
- char *str;
+ char *str;
/* options look like this "option1 = value1 option2 = value2 ... */
/* we have to break up the string into single options */
for (str = options; *str;)
{
- int e, a;
- char *token1, *token2;
+ int e,
+ a;
+ char *token1,
+ *token2;
- for (token1 = str; *token1 && *token1 == ' '; token1++);
+ for (token1 = str; *token1 && *token1 == ' '; token1++);
for (e = 0; token1[e] && token1[e] != '='; e++);
- if (token1[e]) /* found "=" */
+ if (token1[e]) /* found "=" */
{
token1[e] = '\0';
for (token2 = token1 + e + 1; *token2 && *token2 == ' '; token2++);
for (a = 0; token2[a] && token2[a] != '&'; a++);
- if (token2[a]) /* found "&" => another option follows */
+ if (token2[a]) /* found "&" => another option follows */
{
token2[a] = '\0';
str = token2 + a + 1;
@@ -587,10 +590,10 @@ ECPGconnect(int lineno, int c, const char *name, const char *user, const char *p
conn_keywords[i] = token1;
conn_values[i] = token2;
i++;
- }
+ }
else
/* the parser should not be able to create this invalid option */
- str = token1 + e;
+ str = token1 + e;
}
}
diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c
index 50a2d95347..1a7876ecf2 100644
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -1441,7 +1441,7 @@ ecpg_execute(struct statement * stmt)
ecpg_log("ecpg_execute on line %d: query: %s; with %d parameter(s) on connection %s\n", stmt->lineno, stmt->command, nParams, stmt->connection->name);
if (stmt->statement_type == ECPGst_execute)
{
- results = PQexecPrepared(stmt->connection->connection, stmt->name, nParams, (const char *const*) paramValues, NULL, NULL, 0);
+ results = PQexecPrepared(stmt->connection->connection, stmt->name, nParams, (const char *const *) paramValues, NULL, NULL, 0);
ecpg_log("ecpg_execute on line %d: using PQexecPrepared for \"%s\"\n", stmt->lineno, stmt->command);
}
else
@@ -1453,7 +1453,7 @@ ecpg_execute(struct statement * stmt)
}
else
{
- results = PQexecParams(stmt->connection->connection, stmt->command, nParams, NULL, (const char *const*) paramValues, NULL, NULL, 0);
+ results = PQexecParams(stmt->connection->connection, stmt->command, nParams, NULL, (const char *const *) paramValues, NULL, NULL, 0);
ecpg_log("ecpg_execute on line %d: using PQexecParams\n", stmt->lineno);
}
}
diff --git a/src/interfaces/ecpg/ecpglib/extern.h b/src/interfaces/ecpg/ecpglib/extern.h
index bd1ffb096c..835e70c38f 100644
--- a/src/interfaces/ecpg/ecpglib/extern.h
+++ b/src/interfaces/ecpg/ecpglib/extern.h
@@ -123,8 +123,8 @@ struct variable
struct var_list
{
- int number;
- void *pointer;
+ int number;
+ void *pointer;
struct var_list *next;
};
@@ -170,7 +170,7 @@ void ecpg_raise(int line, int code, const char *sqlstate, const char *str);
void ecpg_raise_backend(int line, PGresult *result, PGconn *conn, int compat);
char *ecpg_prepared(const char *, struct connection *);
bool ecpg_deallocate_all_conn(int lineno, enum COMPAT_MODE c, struct connection * conn);
-void ecpg_log(const char *format, ...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+void ecpg_log(const char *format,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
bool ecpg_auto_prepare(int, const char *, const int, char **, const char *);
void ecpg_init_sqlca(struct sqlca_t * sqlca);
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h b/src/interfaces/ecpg/pgtypeslib/dt.h
index 269af7822b..dfe6f9e687 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -334,12 +334,12 @@ do { \
int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *);
int DecodeTime(char *, int *, struct tm *, fsec_t *);
-int EncodeDateTime(struct tm *tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates);
-int EncodeInterval(struct tm *tm, fsec_t fsec, int style, char *str);
+int EncodeDateTime(struct tm * tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates);
+int EncodeInterval(struct tm * tm, fsec_t fsec, int style, char *str);
int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
int DecodeUnits(int field, char *lowtoken, int *val);
bool CheckDateTokenTables(void);
-int EncodeDateOnly(struct tm *tm, int style, char *str, bool EuroDates);
+int EncodeDateOnly(struct tm * tm, int style, char *str, bool EuroDates);
int GetEpochTime(struct tm *);
int ParseDateTime(char *, char *, char **, int *, int *, char **);
int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool);
diff --git a/src/interfaces/ecpg/preproc/type.c b/src/interfaces/ecpg/preproc/type.c
index ff1224754e..c743616a6c 100644
--- a/src/interfaces/ecpg/preproc/type.c
+++ b/src/interfaces/ecpg/preproc/type.c
@@ -396,7 +396,10 @@ ECPGdump_a_simple(FILE *o, const char *name, enum ECPGttype type,
else
sprintf(variable, "&(%s%s)", prefix ? prefix : "", name);
- /* If we created a varchar structure atomatically, counter is greater than 0. */
+ /*
+ * If we created a varchar structure atomatically, counter is
+ * greater than 0.
+ */
if (counter)
sprintf(offset, "sizeof(struct varchar_%d)", counter);
else
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 33daaec7be..7c9fa34560 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -298,7 +298,7 @@ static void closePGconn(PGconn *conn);
static PQconninfoOption *conninfo_init(PQExpBuffer errorMessage);
static PQconninfoOption *parse_connection_string(const char *conninfo,
PQExpBuffer errorMessage, bool use_defaults);
-static int uri_prefix_length(const char *connstr);
+static int uri_prefix_length(const char *connstr);
static bool recognized_connection_string(const char *connstr);
static PQconninfoOption *conninfo_parse(const char *conninfo,
PQExpBuffer errorMessage, bool use_defaults);
@@ -308,7 +308,7 @@ static PQconninfoOption *conninfo_array_parse(const char *const * keywords,
static bool conninfo_add_defaults(PQconninfoOption *options,
PQExpBuffer errorMessage);
static PQconninfoOption *conninfo_uri_parse(const char *uri,
- PQExpBuffer errorMessage, bool use_defaults);
+ PQExpBuffer errorMessage, bool use_defaults);
static bool conninfo_uri_parse_options(PQconninfoOption *options,
const char *uri, PQExpBuffer errorMessage);
static bool conninfo_uri_parse_params(char *params,
@@ -319,8 +319,8 @@ static bool get_hexdigit(char digit, int *value);
static const char *conninfo_getval(PQconninfoOption *connOptions,
const char *keyword);
static PQconninfoOption *conninfo_storeval(PQconninfoOption *connOptions,
- const char *keyword, const char *value,
- PQExpBuffer errorMessage, bool ignoreMissing, bool uri_decode);
+ const char *keyword, const char *value,
+ PQExpBuffer errorMessage, bool ignoreMissing, bool uri_decode);
static PQconninfoOption *conninfo_find(PQconninfoOption *connOptions,
const char *keyword);
static void defaultNoticeReceiver(void *arg, const PGresult *res);
@@ -605,7 +605,7 @@ PQconnectStart(const char *conninfo)
static void
fillPGconn(PGconn *conn, PQconninfoOption *connOptions)
{
- const char *tmp;
+ const char *tmp;
/*
* Move option values into conn structure
@@ -903,8 +903,8 @@ PQsetdbLogin(const char *pghost, const char *pgport, const char *pgoptions,
return NULL;
/*
- * If the dbName parameter contains what looks like a connection
- * string, parse it into conn struct using connectOptions1.
+ * If the dbName parameter contains what looks like a connection string,
+ * parse it into conn struct using connectOptions1.
*/
if (dbName && recognized_connection_string(dbName))
{
@@ -2094,7 +2094,7 @@ keep_going: /* We will come back to here until there is
{
/*
* Server failure of some sort, such as failure to
- * fork a backend process. We need to process and
+ * fork a backend process. We need to process and
* report the error message, which might be formatted
* according to either protocol 2 or protocol 3.
* Rather than duplicate the code for that, we flip
@@ -4305,9 +4305,9 @@ conninfo_array_parse(const char *const * keywords, const char *const * values,
if (strcmp(pname, "dbname") == 0 && pvalue)
{
/*
- * If value is a connection string, parse it, but do not use defaults
- * here -- those get picked up later. We only want to override for
- * those parameters actually passed.
+ * If value is a connection string, parse it, but do not use
+ * defaults here -- those get picked up later. We only want to
+ * override for those parameters actually passed.
*/
if (recognized_connection_string(pvalue))
{
@@ -4558,14 +4558,15 @@ static bool
conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
PQExpBuffer errorMessage)
{
- int prefix_len;
- char *p;
- char *buf = strdup(uri); /* need a modifiable copy of the input URI */
- char *start = buf;
- char prevchar = '\0';
- char *user = NULL;
- char *host = NULL;
- bool retval = false;
+ int prefix_len;
+ char *p;
+ char *buf = strdup(uri); /* need a modifiable copy of the input
+ * URI */
+ char *start = buf;
+ char prevchar = '\0';
+ char *user = NULL;
+ char *host = NULL;
+ bool retval = false;
if (buf == NULL)
{
@@ -4657,7 +4658,7 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
if (p == host)
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("IPv6 host address may not be empty in URI: %s\n"),
+ libpq_gettext("IPv6 host address may not be empty in URI: %s\n"),
uri);
goto cleanup;
}
@@ -4683,8 +4684,8 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
host = p;
/*
- * Look for port specifier (colon) or end of host specifier
- * (slash), or query (question mark).
+ * Look for port specifier (colon) or end of host specifier (slash),
+ * or query (question mark).
*/
while (*p && *p != ':' && *p != '/' && *p != '?')
++p;
@@ -4718,7 +4719,7 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
if (prevchar && prevchar != '?')
{
- const char *dbname = ++p; /* advance past host terminator */
+ const char *dbname = ++p; /* advance past host terminator */
/* Look for query parameters */
while (*p && *p != '?')
@@ -4740,7 +4741,7 @@ conninfo_uri_parse_options(PQconninfoOption *options, const char *uri,
if (prevchar)
{
- ++p; /* advance past terminator */
+ ++p; /* advance past terminator */
if (!conninfo_uri_parse_params(p, options, errorMessage))
goto cleanup;
@@ -4758,7 +4759,7 @@ cleanup:
* Connection URI parameters parser routine
*
* If successful, returns true while connOptions is filled with parsed
- * parameters. Otherwise, returns false and fills errorMessage appropriately.
+ * parameters. Otherwise, returns false and fills errorMessage appropriately.
*
* Destructively modifies 'params' buffer.
*/
@@ -4769,10 +4770,10 @@ conninfo_uri_parse_params(char *params,
{
while (*params)
{
- char *keyword = params;
- char *value = NULL;
- char *p = params;
- bool malloced = false;
+ char *keyword = params;
+ char *value = NULL;
+ char *p = params;
+ bool malloced = false;
/*
* Scan the params string for '=' and '&', marking the end of keyword
@@ -4796,7 +4797,7 @@ conninfo_uri_parse_params(char *params,
}
else if (*p == '&' || *p == '\0')
{
- char prevchar;
+ char prevchar;
/* Cut off value, remember old value */
prevchar = *p;
@@ -4810,6 +4811,7 @@ conninfo_uri_parse_params(char *params,
params);
return false;
}
+
/*
* If not at the end, advance; now pointing to start of the
* next parameter, if any.
@@ -4876,7 +4878,7 @@ conninfo_uri_parse_params(char *params,
printfPQExpBuffer(errorMessage,
libpq_gettext(
- "invalid URI query parameter \"%s\"\n"),
+ "invalid URI query parameter \"%s\"\n"),
keyword);
return false;
}
@@ -4908,8 +4910,8 @@ conninfo_uri_parse_params(char *params,
static char *
conninfo_uri_decode(const char *str, PQExpBuffer errorMessage)
{
- char *buf = malloc(strlen(str) + 1);
- char *p = buf;
+ char *buf = malloc(strlen(str) + 1);
+ char *p = buf;
const char *q = str;
if (buf == NULL)
@@ -4928,20 +4930,20 @@ conninfo_uri_decode(const char *str, PQExpBuffer errorMessage)
}
else
{
- int hi;
- int lo;
- int c;
+ int hi;
+ int lo;
+ int c;
- ++q; /* skip the percent sign itself */
+ ++q; /* skip the percent sign itself */
/*
- * Possible EOL will be caught by the first call to get_hexdigit(),
- * so we never dereference an invalid q pointer.
+ * Possible EOL will be caught by the first call to
+ * get_hexdigit(), so we never dereference an invalid q pointer.
*/
if (!(get_hexdigit(*q++, &hi) && get_hexdigit(*q++, &lo)))
{
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid percent-encoded token: %s\n"),
+ libpq_gettext("invalid percent-encoded token: %s\n"),
str);
free(buf);
return NULL;
@@ -5025,14 +5027,14 @@ conninfo_storeval(PQconninfoOption *connOptions,
bool uri_decode)
{
PQconninfoOption *option;
- char *value_copy;
+ char *value_copy;
option = conninfo_find(connOptions, keyword);
if (option == NULL)
{
if (!ignoreMissing)
printfPQExpBuffer(errorMessage,
- libpq_gettext("invalid connection option \"%s\"\n"),
+ libpq_gettext("invalid connection option \"%s\"\n"),
keyword);
return NULL;
}
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index c643b8e69c..badc0b32a8 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -1052,7 +1052,7 @@ pqStdRowProcessor(PGresult *res, const PGdataValue *columns,
* copy the data over.
*
* Note: on malloc failure, we return -1 leaving *errmsgp still NULL,
- * which caller will take to mean "out of memory". This is preferable to
+ * which caller will take to mean "out of memory". This is preferable to
* trying to set up such a message here, because evidently there's not
* enough memory for gettext() to do anything.
*/
@@ -1063,7 +1063,7 @@ pqStdRowProcessor(PGresult *res, const PGdataValue *columns,
for (i = 0; i < nfields; i++)
{
- int clen = columns[i].len;
+ int clen = columns[i].len;
if (clen < 0)
{
@@ -1743,7 +1743,7 @@ PQgetResult(PGconn *conn)
*
* This is mainly useful for cleaning up after a longjmp out of a row
* processor, when resuming processing of the current query result isn't
- * wanted. Note that this is of little value in an async-style application,
+ * wanted. Note that this is of little value in an async-style application,
* since any preceding calls to PQisBusy would have already called the regular
* row processor.
*/
diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c
index 43f9954dd1..8dbd6b6982 100644
--- a/src/interfaces/libpq/fe-protocol2.c
+++ b/src/interfaces/libpq/fe-protocol2.c
@@ -773,10 +773,11 @@ getRowDescriptions(PGconn *conn)
goto set_error_result;
advance_and_error:
+
/*
- * Discard the failed message. Unfortunately we don't know for sure
- * where the end is, so just throw away everything in the input buffer.
- * This is not very desirable but it's the best we can do in protocol v2.
+ * Discard the failed message. Unfortunately we don't know for sure where
+ * the end is, so just throw away everything in the input buffer. This is
+ * not very desirable but it's the best we can do in protocol v2.
*/
conn->inStart = conn->inEnd;
@@ -829,6 +830,7 @@ getAnotherTuple(PGconn *conn, bool binary)
int nfields = result->numAttributes;
const char *errmsg;
PGdataValue *rowbuf;
+
/* the backend sends us a bitmap of which attributes are null */
char std_bitmap[64]; /* used unless it doesn't fit */
char *bitmap = std_bitmap;
@@ -962,10 +964,11 @@ getAnotherTuple(PGconn *conn, bool binary)
goto set_error_result;
advance_and_error:
+
/*
- * Discard the failed message. Unfortunately we don't know for sure
- * where the end is, so just throw away everything in the input buffer.
- * This is not very desirable but it's the best we can do in protocol v2.
+ * Discard the failed message. Unfortunately we don't know for sure where
+ * the end is, so just throw away everything in the input buffer. This is
+ * not very desirable but it's the best we can do in protocol v2.
*/
conn->inStart = conn->inEnd;
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index a773d7a524..173af2e0a7 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -627,7 +627,7 @@ set_error_result:
pqSaveErrorResult(conn);
/*
- * Return zero to allow input parsing to continue. Subsequent "D"
+ * Return zero to allow input parsing to continue. Subsequent "D"
* messages will be ignored until we get to end of data, since an error
* result is already set up.
*/
@@ -829,7 +829,7 @@ set_error_result:
pqSaveErrorResult(conn);
/*
- * Return zero to allow input parsing to continue. Subsequent "D"
+ * Return zero to allow input parsing to continue. Subsequent "D"
* messages will be ignored until we get to end of data, since an error
* result is already set up.
*/
diff --git a/src/interfaces/libpq/fe-secure.c b/src/interfaces/libpq/fe-secure.c
index 5c4d73c3ac..b1ad776a23 100644
--- a/src/interfaces/libpq/fe-secure.c
+++ b/src/interfaces/libpq/fe-secure.c
@@ -361,19 +361,19 @@ rloop:
result_errno == ECONNRESET)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
else
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: %s\n"),
+ libpq_gettext("SSL SYSCALL error: %s\n"),
SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ sebuf, sizeof(sebuf)));
}
else
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: EOF detected\n"));
+ libpq_gettext("SSL SYSCALL error: EOF detected\n"));
/* assume the connection is broken */
result_errno = ECONNRESET;
n = -1;
@@ -392,6 +392,7 @@ rloop:
break;
}
case SSL_ERROR_ZERO_RETURN:
+
/*
* Per OpenSSL documentation, this error code is only returned
* for a clean connection closure, so we should not report it
@@ -415,7 +416,7 @@ rloop:
RESTORE_SIGPIPE(conn, spinfo);
}
else
-#endif /* USE_SSL */
+#endif /* USE_SSL */
{
n = recv(conn->sock, ptr, len, 0);
@@ -440,15 +441,15 @@ rloop:
case ECONNRESET:
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
break;
#endif
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not receive data from server: %s\n"),
+ libpq_gettext("could not receive data from server: %s\n"),
SOCK_STRERROR(result_errno,
sebuf, sizeof(sebuf)));
break;
@@ -521,19 +522,19 @@ pqsecure_write(PGconn *conn, const void *ptr, size_t len)
result_errno == ECONNRESET)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
else
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: %s\n"),
+ libpq_gettext("SSL SYSCALL error: %s\n"),
SOCK_STRERROR(result_errno,
- sebuf, sizeof(sebuf)));
+ sebuf, sizeof(sebuf)));
}
else
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL SYSCALL error: EOF detected\n"));
+ libpq_gettext("SSL SYSCALL error: EOF detected\n"));
/* assume the connection is broken */
result_errno = ECONNRESET;
n = -1;
@@ -552,6 +553,7 @@ pqsecure_write(PGconn *conn, const void *ptr, size_t len)
break;
}
case SSL_ERROR_ZERO_RETURN:
+
/*
* Per OpenSSL documentation, this error code is only returned
* for a clean connection closure, so we should not report it
@@ -573,7 +575,7 @@ pqsecure_write(PGconn *conn, const void *ptr, size_t len)
}
}
else
-#endif /* USE_SSL */
+#endif /* USE_SSL */
{
int flags = 0;
@@ -629,14 +631,14 @@ retry_masked:
#endif
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext(
- "server closed the connection unexpectedly\n"
- "\tThis probably means the server terminated abnormally\n"
- "\tbefore or while processing the request.\n"));
+ "server closed the connection unexpectedly\n"
+ "\tThis probably means the server terminated abnormally\n"
+ "\tbefore or while processing the request.\n"));
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not send data to server: %s\n"),
+ libpq_gettext("could not send data to server: %s\n"),
SOCK_STRERROR(result_errno,
sebuf, sizeof(sebuf)));
break;
@@ -1346,11 +1348,12 @@ initialize_SSL(PGconn *conn)
}
/*
- * If the OpenSSL version used supports it (from 1.0.0 on)
- * and the user requested it, disable SSL compression.
+ * If the OpenSSL version used supports it (from 1.0.0 on) and the user
+ * requested it, disable SSL compression.
*/
#ifdef SSL_OP_NO_COMPRESSION
- if (conn->sslcompression && conn->sslcompression[0] == '0') {
+ if (conn->sslcompression && conn->sslcompression[0] == '0')
+ {
SSL_set_options(conn->ssl, SSL_OP_NO_COMPRESSION);
}
#endif
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index 32b466e245..67db6119bb 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -163,7 +163,7 @@ typedef struct pgNotify
/* Function type for row-processor callback */
typedef int (*PQrowProcessor) (PGresult *res, const PGdataValue *columns,
- const char **errmsgp, void *param);
+ const char **errmsgp, void *param);
/* Function types for notice-handling callbacks */
typedef void (*PQnoticeReceiver) (void *arg, const PGresult *res);
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 0b6e6769c0..4bc89269fa 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -310,7 +310,7 @@ struct pg_conn
char *keepalives_count; /* maximum number of TCP keepalive
* retransmits */
char *sslmode; /* SSL mode (require,prefer,allow,disable) */
- char *sslcompression; /* SSL compression (0 or 1) */
+ char *sslcompression; /* SSL compression (0 or 1) */
char *sslkey; /* client key filename */
char *sslcert; /* client certificate filename */
char *sslrootcert; /* root certificate filename */
@@ -326,7 +326,7 @@ struct pg_conn
/* Callback procedure for per-row processing */
PQrowProcessor rowProcessor; /* function pointer */
- void *rowProcessorParam; /* passthrough argument */
+ void *rowProcessorParam; /* passthrough argument */
/* Callback procedures for notice message processing */
PGNoticeHooks noticeHooks;
diff --git a/src/interfaces/libpq/test/uri-regress.c b/src/interfaces/libpq/test/uri-regress.c
index 17fcce9fb2..a0ba9e4583 100644
--- a/src/interfaces/libpq/test/uri-regress.c
+++ b/src/interfaces/libpq/test/uri-regress.c
@@ -1,8 +1,8 @@
/*
* uri-regress.c
- * A test program for libpq URI format
+ * A test program for libpq URI format
*
- * This is a helper for libpq conninfo regression testing. It takes a single
+ * This is a helper for libpq conninfo regression testing. It takes a single
* conninfo string as a parameter, parses it using PQconninfoParse, and then
* prints out the values from the parsed PQconninfoOption struct that differ
* from the defaults (obtained from PQconndefaults).
@@ -10,7 +10,7 @@
* Portions Copyright (c) 2012, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * src/interfaces/libpq/test/uri-regress.c
+ * src/interfaces/libpq/test/uri-regress.c
*/
#include "postgres_fe.h"
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 7a92f3d8e2..db584c4e7e 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1259,19 +1259,19 @@ plperl_sv_to_datum(SV *sv, Oid typid, int32 typmod,
if (!type_is_rowtype(typid))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot convert Perl hash to non-composite type %s",
- format_type_be(typid))));
+ errmsg("cannot convert Perl hash to non-composite type %s",
+ format_type_be(typid))));
td = lookup_rowtype_tupdesc_noerror(typid, typmod, true);
if (td == NULL)
{
/* Try to look it up based on our result type */
if (fcinfo == NULL ||
- get_call_result_type(fcinfo, NULL, &td) != TYPEFUNC_COMPOSITE)
+ get_call_result_type(fcinfo, NULL, &td) != TYPEFUNC_COMPOSITE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("function returning record called in context "
- "that cannot accept type record")));
+ errmsg("function returning record called in context "
+ "that cannot accept type record")));
}
ret = plperl_hash_to_datum(sv, td);
diff --git a/src/pl/plperl/plperl_helpers.h b/src/pl/plperl/plperl_helpers.h
index 6b714e52a1..1b6648be1d 100644
--- a/src/pl/plperl/plperl_helpers.h
+++ b/src/pl/plperl/plperl_helpers.h
@@ -7,15 +7,15 @@
static inline char *
utf_u2e(const char *utf8_str, size_t len)
{
- int enc = GetDatabaseEncoding();
+ int enc = GetDatabaseEncoding();
char *ret = (char *) pg_do_encoding_conversion((unsigned char *) utf8_str, len, PG_UTF8, enc);
/*
- * when we are a PG_UTF8 or SQL_ASCII database
- * pg_do_encoding_conversion() will not do any conversion or
- * verification. we need to do it manually instead.
- */
+ * when we are a PG_UTF8 or SQL_ASCII database pg_do_encoding_conversion()
+ * will not do any conversion or verification. we need to do it manually
+ * instead.
+ */
if (enc == PG_UTF8 || enc == PG_SQL_ASCII)
pg_verify_mbstr_len(PG_UTF8, utf8_str, len, false);
@@ -45,7 +45,8 @@ utf_e2u(const char *str)
static inline char *
sv2cstr(SV *sv)
{
- char *val, *res;
+ char *val,
+ *res;
STRLEN len;
/*
@@ -54,23 +55,26 @@ sv2cstr(SV *sv)
* SvPVutf8() croaks nastily on certain things, like typeglobs and
* readonly objects such as $^V. That's a perl bug - it's not supposed to
* happen. To avoid crashing the backend, we make a copy of the sv before
- * passing it to SvPVutf8(). The copy is garbage collected
- * when we're done with it.
+ * passing it to SvPVutf8(). The copy is garbage collected when we're done
+ * with it.
*/
if (SvREADONLY(sv) ||
isGV_with_GP(sv) ||
(SvTYPE(sv) > SVt_PVLV && SvTYPE(sv) != SVt_PVFM))
sv = newSVsv(sv);
else
- /* increase the reference count so we can just SvREFCNT_dec() it when
- * we are done */
+
+ /*
+ * increase the reference count so we can just SvREFCNT_dec() it when
+ * we are done
+ */
SvREFCNT_inc_simple_void(sv);
val = SvPVutf8(sv, len);
/*
- * we use perl's length in the event we had an embedded null byte to ensure
- * we error out properly
+ * we use perl's length in the event we had an embedded null byte to
+ * ensure we error out properly
*/
res = utf_u2e(val, len);
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index d43b8e0b1a..5d2f818dac 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -77,7 +77,7 @@ typedef struct
} ExceptionLabelMap;
static const ExceptionLabelMap exception_label_map[] = {
-#include "plerrcodes.h" /* pgrminclude ignore */
+#include "plerrcodes.h" /* pgrminclude ignore */
{NULL, 0}
};
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 95e74b38dc..8ca791ce3f 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -881,8 +881,8 @@ copy_plpgsql_datum(PLpgSQL_datum *datum)
/*
* These datum records are read-only at runtime, so no need to
- * copy them (well, ARRAYELEM contains some cached type data,
- * but we'd just as soon centralize the caching anyway)
+ * copy them (well, ARRAYELEM contains some cached type data, but
+ * we'd just as soon centralize the caching anyway)
*/
result = datum;
break;
@@ -1441,8 +1441,8 @@ exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt)
*/
if (stmt->is_stacked && estate->cur_error == NULL)
ereport(ERROR,
- (errcode(ERRCODE_STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER),
- errmsg("GET STACKED DIAGNOSTICS cannot be used outside an exception handler")));
+ (errcode(ERRCODE_STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER),
+ errmsg("GET STACKED DIAGNOSTICS cannot be used outside an exception handler")));
foreach(lc, stmt->diag_items)
{
@@ -1481,7 +1481,7 @@ exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt)
case PLPGSQL_GETDIAG_RETURNED_SQLSTATE:
exec_assign_c_string(estate, var,
- unpack_sql_state(estate->cur_error->sqlerrcode));
+ unpack_sql_state(estate->cur_error->sqlerrcode));
break;
case PLPGSQL_GETDIAG_MESSAGE_TEXT:
@@ -2676,8 +2676,8 @@ exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt)
ReThrowError(estate->cur_error);
/* oops, we're not inside a handler */
ereport(ERROR,
- (errcode(ERRCODE_STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER),
- errmsg("RAISE without parameters cannot be used outside an exception handler")));
+ (errcode(ERRCODE_STACKED_DIAGNOSTICS_ACCESSED_WITHOUT_ACTIVE_HANDLER),
+ errmsg("RAISE without parameters cannot be used outside an exception handler")));
}
if (stmt->condname)
@@ -3036,7 +3036,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
foreach(l2, plansource->query_list)
{
- Query *q = (Query *) lfirst(l2);
+ Query *q = (Query *) lfirst(l2);
Assert(IsA(q, Query));
if (q->canSetTag)
@@ -3288,9 +3288,9 @@ exec_stmt_dynexecute(PLpgSQL_execstate *estate,
* a functional limitation because CREATE TABLE AS is allowed.
*/
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("EXECUTE of SELECT ... INTO is not implemented"),
- errhint("You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead.")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("EXECUTE of SELECT ... INTO is not implemented"),
+ errhint("You might want to use EXECUTE ... INTO or EXECUTE CREATE TABLE ... AS instead.")));
break;
/* Some SPI errors deserve specific error messages */
@@ -3771,8 +3771,8 @@ exec_assign_value(PLpgSQL_execstate *estate,
/*
* If type is by-reference, copy the new value (which is
- * probably in the eval_econtext) into the procedure's
- * memory context.
+ * probably in the eval_econtext) into the procedure's memory
+ * context.
*/
if (!var->datatype->typbyval && !*isNull)
newvalue = datumCopy(newvalue,
@@ -4051,7 +4051,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
if (!OidIsValid(elemtypoid))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("subscripted object is not an array")));
+ errmsg("subscripted object is not an array")));
/* Collect needed data about the types */
arraytyplen = get_typlen(arraytypoid);
@@ -4124,7 +4124,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
* array, either, so that's a no-op too. This is all ugly but
* corresponds to the current behavior of ExecEvalArrayRef().
*/
- if (arrayelem->arraytyplen > 0 && /* fixed-length array? */
+ if (arrayelem->arraytyplen > 0 && /* fixed-length array? */
(oldarrayisnull || *isNull))
return;
@@ -5358,7 +5358,7 @@ convert_value_to_string(PLpgSQL_execstate *estate, Datum value, Oid valtype)
*
* Note: the estate's eval_econtext is used for temporary storage, and may
* also contain the result Datum if we have to do a conversion to a pass-
- * by-reference data type. Be sure to do an exec_eval_cleanup() call when
+ * by-reference data type. Be sure to do an exec_eval_cleanup() call when
* done with the result.
* ----------
*/
@@ -5708,8 +5708,8 @@ exec_simple_check_plan(PLpgSQL_expr *expr)
/*
* Initialize to "not simple", and remember the plan generation number we
- * last checked. (If we don't get as far as obtaining a plan to check,
- * we just leave expr_simple_generation set to 0.)
+ * last checked. (If we don't get as far as obtaining a plan to check, we
+ * just leave expr_simple_generation set to 0.)
*/
expr->expr_simple_expr = NULL;
expr->expr_simple_generation = 0;
@@ -5722,12 +5722,12 @@ exec_simple_check_plan(PLpgSQL_expr *expr)
plansource = (CachedPlanSource *) linitial(expr->plan->plancache_list);
/*
- * Do some checking on the analyzed-and-rewritten form of the query.
- * These checks are basically redundant with the tests in
+ * Do some checking on the analyzed-and-rewritten form of the query. These
+ * checks are basically redundant with the tests in
* exec_simple_recheck_plan, but the point is to avoid building a plan if
- * possible. Since this function is only
- * called immediately after creating the CachedPlanSource, we need not
- * worry about the query being stale.
+ * possible. Since this function is only called immediately after
+ * creating the CachedPlanSource, we need not worry about the query being
+ * stale.
*/
/*
diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c
index e8240b63c9..910e63b199 100644
--- a/src/pl/plpython/plpy_cursorobject.c
+++ b/src/pl/plpython/plpy_cursorobject.c
@@ -40,7 +40,7 @@ static PyMethodDef PLy_cursor_methods[] = {
static PyTypeObject PLy_CursorType = {
PyVarObject_HEAD_INIT(NULL, 0)
- "PLyCursor", /* tp_name */
+ "PLyCursor", /* tp_name */
sizeof(PLyCursorObject), /* tp_size */
0, /* tp_itemsize */
@@ -103,7 +103,7 @@ PLy_cursor(PyObject *self, PyObject *args)
static PyObject *
PLy_cursor_query(const char *query)
{
- PLyCursorObject *cursor;
+ PLyCursorObject *cursor;
volatile MemoryContext oldcontext;
volatile ResourceOwner oldowner;
@@ -120,7 +120,7 @@ PLy_cursor_query(const char *query)
PG_TRY();
{
- PLyExecutionContext *exec_ctx = PLy_current_execution_context();
+ PLyExecutionContext *exec_ctx = PLy_current_execution_context();
SPIPlanPtr plan;
Portal portal;
@@ -157,7 +157,7 @@ PLy_cursor_query(const char *query)
static PyObject *
PLy_cursor_plan(PyObject *ob, PyObject *args)
{
- PLyCursorObject *cursor;
+ PLyCursorObject *cursor;
volatile int nargs;
int i;
PLyPlanObject *plan;
@@ -187,8 +187,8 @@ PLy_cursor_plan(PyObject *ob, PyObject *args)
PLy_elog(ERROR, "could not execute plan");
sv = PyString_AsString(so);
PLy_exception_set_plural(PyExc_TypeError,
- "Expected sequence of %d argument, got %d: %s",
- "Expected sequence of %d arguments, got %d: %s",
+ "Expected sequence of %d argument, got %d: %s",
+ "Expected sequence of %d arguments, got %d: %s",
plan->nargs,
plan->nargs, nargs, sv);
Py_DECREF(so);
@@ -305,7 +305,7 @@ static void
PLy_cursor_dealloc(PyObject *arg)
{
PLyCursorObject *cursor;
- Portal portal;
+ Portal portal;
cursor = (PLyCursorObject *) arg;
@@ -328,10 +328,10 @@ static PyObject *
PLy_cursor_iternext(PyObject *self)
{
PLyCursorObject *cursor;
- PyObject *ret;
+ PyObject *ret;
volatile MemoryContext oldcontext;
volatile ResourceOwner oldowner;
- Portal portal;
+ Portal portal;
cursor = (PLyCursorObject *) self;
@@ -391,11 +391,11 @@ static PyObject *
PLy_cursor_fetch(PyObject *self, PyObject *args)
{
PLyCursorObject *cursor;
- int count;
- PLyResultObject *ret;
+ int count;
+ PLyResultObject *ret;
volatile MemoryContext oldcontext;
volatile ResourceOwner oldowner;
- Portal portal;
+ Portal portal;
if (!PyArg_ParseTuple(args, "i", &count))
return NULL;
@@ -440,7 +440,7 @@ PLy_cursor_fetch(PyObject *self, PyObject *args)
if (SPI_processed != 0)
{
- int i;
+ int i;
Py_DECREF(ret->rows);
ret->rows = PyList_New(SPI_processed);
@@ -450,6 +450,7 @@ PLy_cursor_fetch(PyObject *self, PyObject *args)
PyObject *row = PLyDict_FromTuple(&cursor->result,
SPI_tuptable->vals[i],
SPI_tuptable->tupdesc);
+
PyList_SetItem(ret->rows, i, row);
}
}
@@ -477,12 +478,12 @@ PLy_cursor_close(PyObject *self, PyObject *unused)
if (!cursor->closed)
{
- Portal portal = GetPortalByName(cursor->portalname);
+ Portal portal = GetPortalByName(cursor->portalname);
if (!PortalIsValid(portal))
{
PLy_exception_set(PyExc_ValueError,
- "closing a cursor in an aborted subtransaction");
+ "closing a cursor in an aborted subtransaction");
return NULL;
}
diff --git a/src/pl/plpython/plpy_cursorobject.h b/src/pl/plpython/plpy_cursorobject.h
index 1dd9d48fd5..3c28f4f8e7 100644
--- a/src/pl/plpython/plpy_cursorobject.h
+++ b/src/pl/plpython/plpy_cursorobject.h
@@ -11,7 +11,7 @@
typedef struct PLyCursorObject
{
PyObject_HEAD
- char *portalname;
+ char *portalname;
PLyTypeInfo result;
bool closed;
} PLyCursorObject;
@@ -19,4 +19,4 @@ typedef struct PLyCursorObject
extern void PLy_cursor_init_type(void);
extern PyObject *PLy_cursor(PyObject *self, PyObject *args);
-#endif /* PLPY_CURSOROBJECT_H */
+#endif /* PLPY_CURSOROBJECT_H */
diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c
index f7d321289d..c375ac07fa 100644
--- a/src/pl/plpython/plpy_elog.c
+++ b/src/pl/plpython/plpy_elog.c
@@ -16,15 +16,15 @@
#include "plpy_procedure.h"
-PyObject *PLy_exc_error = NULL;
-PyObject *PLy_exc_fatal = NULL;
-PyObject *PLy_exc_spi_error = NULL;
+PyObject *PLy_exc_error = NULL;
+PyObject *PLy_exc_fatal = NULL;
+PyObject *PLy_exc_spi_error = NULL;
static void PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth);
static void PLy_get_spi_error_data(PyObject *exc, int *sqlerrcode, char **detail,
- char **hint, char **query, int *position);
-static char * get_source_line(const char *src, int lineno);
+ char **hint, char **query, int *position);
+static char *get_source_line(const char *src, int lineno);
/*
@@ -46,7 +46,7 @@ PLy_elog(int elevel, const char *fmt,...)
*val,
*tb;
const char *primary = NULL;
- int sqlerrcode = 0;
+ int sqlerrcode = 0;
char *detail = NULL;
char *hint = NULL;
char *query = NULL;
@@ -98,7 +98,7 @@ PLy_elog(int elevel, const char *fmt,...)
{
ereport(elevel,
(errcode(sqlerrcode ? sqlerrcode : ERRCODE_INTERNAL_ERROR),
- errmsg_internal("%s", primary ? primary : "no exception data"),
+ errmsg_internal("%s", primary ? primary : "no exception data"),
(detail) ? errdetail_internal("%s", detail) : 0,
(tb_depth > 0 && tbmsg) ? errcontext("%s", tbmsg) : 0,
(hint) ? errhint("%s", hint) : 0,
@@ -256,7 +256,7 @@ PLy_traceback(char **xmsg, char **tbmsg, int *tb_depth)
/* The first frame always points at <module>, skip it. */
if (*tb_depth > 0)
{
- PLyExecutionContext *exec_ctx = PLy_current_execution_context();
+ PLyExecutionContext *exec_ctx = PLy_current_execution_context();
char *proname;
char *fname;
char *line;
diff --git a/src/pl/plpython/plpy_elog.h b/src/pl/plpython/plpy_elog.h
index f7223b0056..6b8d485625 100644
--- a/src/pl/plpython/plpy_elog.h
+++ b/src/pl/plpython/plpy_elog.h
@@ -10,15 +10,18 @@ extern PyObject *PLy_exc_error;
extern PyObject *PLy_exc_fatal;
extern PyObject *PLy_exc_spi_error;
-extern void PLy_elog(int elevel, const char *fmt,...)
+extern void
+PLy_elog(int elevel, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-extern void PLy_exception_set(PyObject *exc, const char *fmt,...)
+extern void
+PLy_exception_set(PyObject *exc, const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-extern void PLy_exception_set_plural(PyObject *exc, const char *fmt_singular, const char *fmt_plural,
- unsigned long n,...)
+extern void
+PLy_exception_set_plural(PyObject *exc, const char *fmt_singular, const char *fmt_plural,
+ unsigned long n,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 5)))
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 5)));
-#endif /* PLPY_ELOG_H */
+#endif /* PLPY_ELOG_H */
diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c
index ad30fc0065..96ee26c35c 100644
--- a/src/pl/plpython/plpy_exec.c
+++ b/src/pl/plpython/plpy_exec.c
@@ -30,9 +30,9 @@ static void PLy_function_delete_args(PLyProcedure *proc);
static void plpython_return_error_callback(void *arg);
static PyObject *PLy_trigger_build_args(FunctionCallInfo fcinfo, PLyProcedure *proc,
- HeapTuple *rv);
+ HeapTuple *rv);
static HeapTuple PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd,
- TriggerData *tdata, HeapTuple otup);
+ TriggerData *tdata, HeapTuple otup);
static void plpython_trigger_error_callback(void *arg);
static PyObject *PLy_procedure_call(PLyProcedure *proc, char *kargs, PyObject *vargs);
@@ -180,7 +180,7 @@ PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc)
}
else if (proc->result.is_rowtype >= 1)
{
- TupleDesc desc;
+ TupleDesc desc;
/* make sure it's not an unnamed record */
Assert((proc->result.out.d.typoid == RECORDOID &&
diff --git a/src/pl/plpython/plpy_exec.h b/src/pl/plpython/plpy_exec.h
index f3dec074c1..439a1d801f 100644
--- a/src/pl/plpython/plpy_exec.h
+++ b/src/pl/plpython/plpy_exec.h
@@ -10,4 +10,4 @@
extern Datum PLy_exec_function(FunctionCallInfo fcinfo, PLyProcedure *proc);
extern HeapTuple PLy_exec_trigger(FunctionCallInfo fcinfo, PLyProcedure *proc);
-#endif /* PLPY_EXEC_H */
+#endif /* PLPY_EXEC_H */
diff --git a/src/pl/plpython/plpy_main.c b/src/pl/plpython/plpy_main.c
index c126db995a..494ec37ea7 100644
--- a/src/pl/plpython/plpy_main.c
+++ b/src/pl/plpython/plpy_main.c
@@ -73,7 +73,7 @@ static void PLy_pop_execution_context(void);
static const int plpython_python_version = PY_MAJOR_VERSION;
/* initialize global variables */
-PyObject *PLy_interp_globals = NULL;
+PyObject *PLy_interp_globals = NULL;
/* this doesn't need to be global; use PLy_current_execution_context() */
static PLyExecutionContext *PLy_execution_contexts = NULL;
@@ -284,8 +284,8 @@ plpython_inline_handler(PG_FUNCTION_ARGS)
* Push execution context onto stack. It is important that this get
* popped again, so avoid putting anything that could throw error between
* here and the PG_TRY. (plpython_inline_error_callback doesn't currently
- * need the stack entry, but for consistency with plpython_call_handler
- * we do it in this order.)
+ * need the stack entry, but for consistency with plpython_call_handler we
+ * do it in this order.)
*/
exec_ctx = PLy_push_execution_context();
@@ -330,7 +330,8 @@ plpython2_inline_handler(PG_FUNCTION_ARGS)
}
#endif /* PY_MAJOR_VERSION < 3 */
-static bool PLy_procedure_is_trigger(Form_pg_proc procStruct)
+static bool
+PLy_procedure_is_trigger(Form_pg_proc procStruct)
{
return (procStruct->prorettype == TRIGGEROID ||
(procStruct->prorettype == OPAQUEOID &&
@@ -365,7 +366,7 @@ PLy_current_execution_context(void)
static PLyExecutionContext *
PLy_push_execution_context(void)
{
- PLyExecutionContext *context = PLy_malloc(sizeof(PLyExecutionContext));
+ PLyExecutionContext *context = PLy_malloc(sizeof(PLyExecutionContext));
context->curr_proc = NULL;
context->scratch_ctx = AllocSetContextCreate(TopTransactionContext,
@@ -381,7 +382,7 @@ PLy_push_execution_context(void)
static void
PLy_pop_execution_context(void)
{
- PLyExecutionContext *context = PLy_execution_contexts;
+ PLyExecutionContext *context = PLy_execution_contexts;
if (context == NULL)
elog(ERROR, "no Python function is currently executing");
diff --git a/src/pl/plpython/plpy_main.h b/src/pl/plpython/plpy_main.h
index cb214bf83c..b13e2c21a1 100644
--- a/src/pl/plpython/plpy_main.h
+++ b/src/pl/plpython/plpy_main.h
@@ -17,12 +17,12 @@ extern PyObject *PLy_interp_globals;
*/
typedef struct PLyExecutionContext
{
- PLyProcedure *curr_proc; /* the currently executing procedure */
- MemoryContext scratch_ctx; /* a context for things like type I/O */
+ PLyProcedure *curr_proc; /* the currently executing procedure */
+ MemoryContext scratch_ctx; /* a context for things like type I/O */
struct PLyExecutionContext *next; /* previous stack level */
} PLyExecutionContext;
/* Get the current execution context */
extern PLyExecutionContext *PLy_current_execution_context(void);
-#endif /* PLPY_MAIN_H */
+#endif /* PLPY_MAIN_H */
diff --git a/src/pl/plpython/plpy_planobject.h b/src/pl/plpython/plpy_planobject.h
index febc5c25ef..7a89ffc2c1 100644
--- a/src/pl/plpython/plpy_planobject.h
+++ b/src/pl/plpython/plpy_planobject.h
@@ -23,4 +23,4 @@ extern void PLy_plan_init_type(void);
extern PyObject *PLy_plan_new(void);
extern bool is_PLyPlanObject(PyObject *ob);
-#endif /* PLPY_PLANOBJECT_H */
+#endif /* PLPY_PLANOBJECT_H */
diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c
index bc0b9e6f84..37ea2a490d 100644
--- a/src/pl/plpython/plpy_plpymodule.c
+++ b/src/pl/plpython/plpy_plpymodule.c
@@ -21,7 +21,7 @@
#include "plpy_subxactobject.h"
-HTAB *PLy_spi_exceptions = NULL;
+HTAB *PLy_spi_exceptions = NULL;
static void PLy_add_exceptions(PyObject *plpy);
@@ -137,7 +137,7 @@ PyInit_plpy(void)
return m;
}
-#endif /* PY_MAJOR_VERSION >= 3 */
+#endif /* PY_MAJOR_VERSION >= 3 */
void
PLy_init_plpy(void)
@@ -145,6 +145,7 @@ PLy_init_plpy(void)
PyObject *main_mod,
*main_dict,
*plpy_mod;
+
#if PY_MAJOR_VERSION < 3
PyObject *plpy;
#endif
diff --git a/src/pl/plpython/plpy_plpymodule.h b/src/pl/plpython/plpy_plpymodule.h
index 930ecfd1b1..ee089b78a1 100644
--- a/src/pl/plpython/plpy_plpymodule.h
+++ b/src/pl/plpython/plpy_plpymodule.h
@@ -16,4 +16,4 @@ PyMODINIT_FUNC PyInit_plpy(void);
#endif
extern void PLy_init_plpy(void);
-#endif /* PLPY_PLPYMODULE_H */
+#endif /* PLPY_PLPYMODULE_H */
diff --git a/src/pl/plpython/plpy_procedure.h b/src/pl/plpython/plpy_procedure.h
index c7405e064e..40a0314cdf 100644
--- a/src/pl/plpython/plpy_procedure.h
+++ b/src/pl/plpython/plpy_procedure.h
@@ -45,4 +45,4 @@ extern PLyProcedure *PLy_procedure_get(Oid fn_oid, bool is_trigger);
extern void PLy_procedure_compile(PLyProcedure *proc, const char *src);
extern void PLy_procedure_delete(PLyProcedure *proc);
-#endif /* PLPY_PROCEDURE_H */
+#endif /* PLPY_PROCEDURE_H */
diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c
index deaddb7980..6b01e717c4 100644
--- a/src/pl/plpython/plpy_resultobject.c
+++ b/src/pl/plpython/plpy_resultobject.c
@@ -24,7 +24,7 @@ static PyObject *PLy_result_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hid
static int PLy_result_ass_item(PyObject *arg, Py_ssize_t idx, PyObject *item);
static int PLy_result_ass_slice(PyObject *rg, Py_ssize_t lidx, Py_ssize_t hidx, PyObject *slice);
static PyObject *PLy_result_subscript(PyObject *arg, PyObject *item);
-static int PLy_result_ass_subscript(PyObject* self, PyObject* item, PyObject* value);
+static int PLy_result_ass_subscript(PyObject *self, PyObject *item, PyObject *value);
static char PLy_result_doc[] = {
"Results of a PostgreSQL query"
@@ -263,7 +263,7 @@ PLy_result_ass_slice(PyObject *arg, Py_ssize_t lidx, Py_ssize_t hidx, PyObject *
static PyObject *
PLy_result_subscript(PyObject *arg, PyObject *item)
{
- PLyResultObject *ob = (PLyResultObject *) arg;
+ PLyResultObject *ob = (PLyResultObject *) arg;
return PyObject_GetItem(ob->rows, item);
}
@@ -271,7 +271,7 @@ PLy_result_subscript(PyObject *arg, PyObject *item)
static int
PLy_result_ass_subscript(PyObject *arg, PyObject *item, PyObject *value)
{
- PLyResultObject *ob = (PLyResultObject *) arg;
+ PLyResultObject *ob = (PLyResultObject *) arg;
return PyObject_SetItem(ob->rows, item, value);
}
diff --git a/src/pl/plpython/plpy_resultobject.h b/src/pl/plpython/plpy_resultobject.h
index c5ba999887..314510c40f 100644
--- a/src/pl/plpython/plpy_resultobject.h
+++ b/src/pl/plpython/plpy_resultobject.h
@@ -13,7 +13,8 @@ typedef struct PLyResultObject
PyObject_HEAD
/* HeapTuple *tuples; */
PyObject *nrows; /* number of rows returned by query */
- PyObject *rows; /* data rows, or empty list if no data returned */
+ PyObject *rows; /* data rows, or empty list if no data
+ * returned */
PyObject *status; /* query status, SPI_OK_*, or SPI_ERR_* */
TupleDesc tupdesc;
} PLyResultObject;
@@ -21,4 +22,4 @@ typedef struct PLyResultObject
extern void PLy_result_init_type(void);
extern PyObject *PLy_result_new(void);
-#endif /* PLPY_RESULTOBJECT_H */
+#endif /* PLPY_RESULTOBJECT_H */
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index 4bc3d96d58..00156e6658 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -350,7 +350,7 @@ PLy_spi_execute_query(char *query, long limit)
PG_TRY();
{
- PLyExecutionContext *exec_ctx = PLy_current_execution_context();
+ PLyExecutionContext *exec_ctx = PLy_current_execution_context();
pg_verifymbstr(query, strlen(query), false);
rv = SPI_execute(query, exec_ctx->curr_proc->fn_readonly, limit);
@@ -456,22 +456,22 @@ PLy_spi_execute_fetch_result(SPITupleTable *tuptable, int rows, int status)
*
* Usage:
*
- * MemoryContext oldcontext = CurrentMemoryContext;
- * ResourceOwner oldowner = CurrentResourceOwner;
+ * MemoryContext oldcontext = CurrentMemoryContext;
+ * ResourceOwner oldowner = CurrentResourceOwner;
*
- * PLy_spi_subtransaction_begin(oldcontext, oldowner);
- * PG_TRY();
- * {
- * <call SPI functions>
- * PLy_spi_subtransaction_commit(oldcontext, oldowner);
- * }
- * PG_CATCH();
- * {
- * <do cleanup>
- * PLy_spi_subtransaction_abort(oldcontext, oldowner);
- * return NULL;
- * }
- * PG_END_TRY();
+ * PLy_spi_subtransaction_begin(oldcontext, oldowner);
+ * PG_TRY();
+ * {
+ * <call SPI functions>
+ * PLy_spi_subtransaction_commit(oldcontext, oldowner);
+ * }
+ * PG_CATCH();
+ * {
+ * <do cleanup>
+ * PLy_spi_subtransaction_abort(oldcontext, oldowner);
+ * return NULL;
+ * }
+ * PG_END_TRY();
*
* These utilities take care of restoring connection to the SPI manager and
* setting a Python exception in case of an abort.
@@ -493,8 +493,8 @@ PLy_spi_subtransaction_commit(MemoryContext oldcontext, ResourceOwner oldowner)
CurrentResourceOwner = oldowner;
/*
- * AtEOSubXact_SPI() should not have popped any SPI context, but just
- * in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just in
+ * case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
@@ -517,8 +517,8 @@ PLy_spi_subtransaction_abort(MemoryContext oldcontext, ResourceOwner oldowner)
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will have
- * left us in a disconnected state. We need this hack to return to
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return to
* connected state.
*/
SPI_restore_connection();
diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h
index f8d31638ec..b0427947ef 100644
--- a/src/pl/plpython/plpy_spi.h
+++ b/src/pl/plpython/plpy_spi.h
@@ -22,4 +22,4 @@ extern void PLy_spi_subtransaction_begin(MemoryContext oldcontext, ResourceOwner
extern void PLy_spi_subtransaction_commit(MemoryContext oldcontext, ResourceOwner oldowner);
extern void PLy_spi_subtransaction_abort(MemoryContext oldcontext, ResourceOwner oldowner);
-#endif /* PLPY_SPI_H */
+#endif /* PLPY_SPI_H */
diff --git a/src/pl/plpython/plpy_subxactobject.c b/src/pl/plpython/plpy_subxactobject.c
index 9feeddb723..2e7ec4fdab 100644
--- a/src/pl/plpython/plpy_subxactobject.c
+++ b/src/pl/plpython/plpy_subxactobject.c
@@ -16,7 +16,7 @@
#include "plpy_elog.h"
-List *explicit_subtransactions = NIL;
+List *explicit_subtransactions = NIL;
static void PLy_subtransaction_dealloc(PyObject *subxact);
diff --git a/src/pl/plpython/plpy_subxactobject.h b/src/pl/plpython/plpy_subxactobject.h
index 7e3002fc2f..b8591c7bf0 100644
--- a/src/pl/plpython/plpy_subxactobject.h
+++ b/src/pl/plpython/plpy_subxactobject.h
@@ -26,4 +26,4 @@ typedef struct PLySubtransactionData
extern void PLy_subtransaction_init_type(void);
extern PyObject *PLy_subtransaction_new(PyObject *self, PyObject *unused);
-#endif /* PLPY_SUBXACTOBJECT */
+#endif /* PLPY_SUBXACTOBJECT */
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
index c5f6c4e5a3..2cc7bbbd4d 100644
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -293,8 +293,8 @@ PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc)
PG_TRY();
{
/*
- * Do the work in the scratch context to avoid leaking memory from
- * the datatype output function calls.
+ * Do the work in the scratch context to avoid leaking memory from the
+ * datatype output function calls.
*/
MemoryContextSwitchTo(exec_ctx->scratch_ctx);
for (i = 0; i < info->in.r.natts; i++)
@@ -341,7 +341,7 @@ PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc)
Datum
PLyObject_ToCompositeDatum(PLyTypeInfo *info, TupleDesc desc, PyObject *plrv)
{
- Datum datum;
+ Datum datum;
if (PyString_Check(plrv) || PyUnicode_Check(plrv))
datum = PLyString_ToComposite(info, desc, plrv);
diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h
index 11532b8c20..d2dfa66e0b 100644
--- a/src/pl/plpython/plpy_typeio.h
+++ b/src/pl/plpython/plpy_typeio.h
@@ -104,4 +104,4 @@ extern Datum PLyObject_ToCompositeDatum(PLyTypeInfo *info, TupleDesc desc, PyObj
/* conversion from heap tuples to Python dictionaries */
extern PyObject *PLyDict_FromTuple(PLyTypeInfo *info, HeapTuple tuple, TupleDesc desc);
-#endif /* PLPY_TYPEIO_H */
+#endif /* PLPY_TYPEIO_H */
diff --git a/src/pl/plpython/plpy_util.c b/src/pl/plpython/plpy_util.c
index 414b9d5445..9a4901ecb2 100644
--- a/src/pl/plpython/plpy_util.c
+++ b/src/pl/plpython/plpy_util.c
@@ -122,4 +122,5 @@ PLyUnicode_FromString(const char *s)
return o;
}
+
#endif /* PY_MAJOR_VERSION >= 3 */
diff --git a/src/pl/plpython/plpy_util.h b/src/pl/plpython/plpy_util.h
index 9b9eca0050..f93e8379fb 100644
--- a/src/pl/plpython/plpy_util.h
+++ b/src/pl/plpython/plpy_util.h
@@ -18,4 +18,4 @@ extern char *PLyUnicode_AsString(PyObject *unicode);
extern PyObject *PLyUnicode_FromString(const char *s);
#endif
-#endif /* PLPY_UTIL_H */
+#endif /* PLPY_UTIL_H */
diff --git a/src/pl/plpython/plpython.h b/src/pl/plpython/plpython.h
index 15ec85e805..e788cd9a89 100644
--- a/src/pl/plpython/plpython.h
+++ b/src/pl/plpython/plpython.h
@@ -132,11 +132,11 @@ typedef int Py_ssize_t;
#undef vsnprintf
#endif
#ifdef __GNUC__
-#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
-#define snprintf(...) pg_snprintf(__VA_ARGS__)
+#define vsnprintf(...) pg_vsnprintf(__VA_ARGS__)
+#define snprintf(...) pg_snprintf(__VA_ARGS__)
#else
-#define vsnprintf pg_vsnprintf
-#define snprintf pg_snprintf
+#define vsnprintf pg_vsnprintf
+#define snprintf pg_snprintf
#endif /* __GNUC__ */
#endif /* USE_REPL_SNPRINTF */
diff --git a/src/port/erand48.c b/src/port/erand48.c
index 9d471197c3..524911edd1 100644
--- a/src/port/erand48.c
+++ b/src/port/erand48.c
@@ -5,7 +5,7 @@
* This file supplies pg_erand48(), pg_lrand48(), and pg_srand48(), which
* are just like erand48(), lrand48(), and srand48() except that we use
* our own implementation rather than the one provided by the operating
- * system. We used to test for an operating system version rather than
+ * system. We used to test for an operating system version rather than
* unconditionally using our own, but (1) some versions of Cygwin have a
* buggy erand48() that always returns zero and (2) as of 2011, glibc's
* erand48() is strangely coded to be almost-but-not-quite thread-safe,
diff --git a/src/port/fls.c b/src/port/fls.c
index 4a2d6737cf..8be2c51ef3 100644
--- a/src/port/fls.c
+++ b/src/port/fls.c
@@ -10,7 +10,7 @@
* src/port/fls.c
*
* This file was taken from FreeBSD to provide an implementation of fls()
- * for platforms that lack it. Note that the operating system's version may
+ * for platforms that lack it. Note that the operating system's version may
* be substantially more efficient than ours, since some platforms have an
* assembly instruction that does exactly this.
*
@@ -25,18 +25,18 @@
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
@@ -54,11 +54,11 @@
int
fls(int mask)
{
- int bit;
+ int bit;
if (mask == 0)
return (0);
for (bit = 1; mask != 1; bit++)
- mask = (unsigned int)mask >> 1;
+ mask = (unsigned int) mask >> 1;
return (bit);
}
diff --git a/src/port/getaddrinfo.c b/src/port/getaddrinfo.c
index 579d855648..c117012ec7 100644
--- a/src/port/getaddrinfo.c
+++ b/src/port/getaddrinfo.c
@@ -328,7 +328,7 @@ gai_strerror(int errcode)
case EAI_MEMORY:
return "Not enough memory";
#endif
-#if defined(EAI_NODATA) && EAI_NODATA != EAI_NONAME /* MSVC/WIN64 duplicate */
+#if defined(EAI_NODATA) && EAI_NODATA != EAI_NONAME /* MSVC/WIN64 duplicate */
case EAI_NODATA:
return "No host data of that type was found";
#endif
diff --git a/src/port/path.c b/src/port/path.c
index be55e4af60..738b5cc547 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -212,7 +212,7 @@ join_path_components(char *ret_path,
}
if (*tail)
snprintf(ret_path + strlen(ret_path), MAXPGPATH - strlen(ret_path),
- /* only add slash if there is something already in head */
+ /* only add slash if there is something already in head */
"%s%s", head[0] ? "/" : "", tail);
}
diff --git a/src/port/win32setlocale.c b/src/port/win32setlocale.c
index f8b1762371..844891df53 100644
--- a/src/port/win32setlocale.c
+++ b/src/port/win32setlocale.c
@@ -27,12 +27,11 @@
struct locale_map
{
- const char *locale_name_part; /* string in locale name to replace */
- const char *replacement; /* string to replace it with */
+ const char *locale_name_part; /* string in locale name to replace */
+ const char *replacement; /* string to replace it with */
};
static const struct locale_map locale_map_list[] = {
-
/*
* "HKG" is listed here:
* http://msdn.microsoft.com/en-us/library/cdax410z%28v=vs.71%29.aspx
@@ -41,26 +40,26 @@ static const struct locale_map locale_map_list[] = {
* "ARE" is the ISO-3166 three-letter code for U.A.E. It is not on the
* above list, but seems to work anyway.
*/
- { "Hong Kong S.A.R.", "HKG" },
- { "U.A.E.", "ARE" },
+ {"Hong Kong S.A.R.", "HKG"},
+ {"U.A.E.", "ARE"},
/*
* The ISO-3166 country code for Macau S.A.R. is MAC, but Windows doesn't
- * seem to recognize that. And Macau isn't listed in the table of
- * accepted abbreviations linked above. Fortunately, "ZHM" seems to be
- * accepted as an alias for "Chinese (Traditional)_Macau S.A.R..950". I'm
- * not sure where "ZHM" comes from, must be some legacy naming scheme. But
- * hey, it works.
+ * seem to recognize that. And Macau isn't listed in the table of accepted
+ * abbreviations linked above. Fortunately, "ZHM" seems to be accepted as
+ * an alias for "Chinese (Traditional)_Macau S.A.R..950". I'm not sure
+ * where "ZHM" comes from, must be some legacy naming scheme. But hey, it
+ * works.
*
* Note that unlike HKG and ARE, ZHM is an alias for the *whole* locale
* name, not just the country part.
*
* Some versions of Windows spell it "Macau", others "Macao".
*/
- { "Chinese (Traditional)_Macau S.A.R..950", "ZHM" },
- { "Chinese_Macau S.A.R..950", "ZHM" },
- { "Chinese (Traditional)_Macao S.A.R..950", "ZHM" },
- { "Chinese_Macao S.A.R..950", "ZHM" }
+ {"Chinese (Traditional)_Macau S.A.R..950", "ZHM"},
+ {"Chinese_Macau S.A.R..950", "ZHM"},
+ {"Chinese (Traditional)_Macao S.A.R..950", "ZHM"},
+ {"Chinese_Macao S.A.R..950", "ZHM"}
};
char *
@@ -85,10 +84,10 @@ pgwin32_setlocale(int category, const char *locale)
if (match != NULL)
{
/* Found a match. Replace the matched string. */
- int matchpos = match - locale;
- int replacementlen = strlen(replacement);
- char *rest = match + strlen(needle);
- int restlen = strlen(rest);
+ int matchpos = match - locale;
+ int replacementlen = strlen(replacement);
+ char *rest = match + strlen(needle);
+ int restlen = strlen(rest);
alias = malloc(matchpos + replacementlen + restlen + 1);
if (!alias)
@@ -96,7 +95,8 @@ pgwin32_setlocale(int category, const char *locale)
memcpy(&alias[0], &locale[0], matchpos);
memcpy(&alias[matchpos], replacement, replacementlen);
- memcpy(&alias[matchpos + replacementlen], rest, restlen + 1); /* includes null terminator */
+ memcpy(&alias[matchpos + replacementlen], rest, restlen + 1); /* includes null
+ * terminator */
break;
}
diff --git a/src/test/isolation/isolationtester.c b/src/test/isolation/isolationtester.c
index 0e681639ba..98f89da6bf 100644
--- a/src/test/isolation/isolationtester.c
+++ b/src/test/isolation/isolationtester.c
@@ -18,7 +18,6 @@
#ifdef HAVE_GETOPT_H
#include <getopt.h>
#endif
-
#else
int getopt(int argc, char *const argv[], const char *optstring);
#endif /* ! WIN32 */
@@ -47,16 +46,16 @@ static int nconns = 0;
/* In dry run only output permutations to be run by the tester. */
static int dry_run = false;
-static void run_testspec(TestSpec *testspec);
+static void run_testspec(TestSpec * testspec);
static void run_all_permutations(TestSpec * testspec);
static void run_all_permutations_recurse(TestSpec * testspec, int nsteps,
Step ** steps);
static void run_named_permutations(TestSpec * testspec);
static void run_permutation(TestSpec * testspec, int nsteps, Step ** steps);
-#define STEP_NONBLOCK 0x1 /* return 0 as soon as cmd waits for a lock */
-#define STEP_RETRY 0x2 /* this is a retry of a previously-waiting cmd */
-static bool try_complete_step(Step *step, int flags);
+#define STEP_NONBLOCK 0x1 /* return 0 as soon as cmd waits for a lock */
+#define STEP_RETRY 0x2 /* this is a retry of a previously-waiting cmd */
+static bool try_complete_step(Step * step, int flags);
static int step_qsort_cmp(const void *a, const void *b);
static int step_bsearch_cmp(const void *a, const void *b);
@@ -82,7 +81,7 @@ main(int argc, char **argv)
int i;
PGresult *res;
PQExpBufferData wait_query;
- int opt;
+ int opt;
while ((opt = getopt(argc, argv, "n")) != -1)
{
@@ -99,8 +98,8 @@ main(int argc, char **argv)
/*
* If the user supplies a non-option parameter on the command line, use it
- * as the conninfo string; otherwise default to setting dbname=postgres and
- * using environment variables or defaults for all other connection
+ * as the conninfo string; otherwise default to setting dbname=postgres
+ * and using environment variables or defaults for all other connection
* parameters.
*/
if (argc > optind)
@@ -125,8 +124,8 @@ main(int argc, char **argv)
printf("Parsed test spec with %d sessions\n", testspec->nsessions);
/*
- * Establish connections to the database, one for each session and an extra
- * for lock wait detection and global work.
+ * Establish connections to the database, one for each session and an
+ * extra for lock wait detection and global work.
*/
nconns = 1 + testspec->nsessions;
conns = calloc(nconns, sizeof(PGconn *));
@@ -187,7 +186,7 @@ main(int argc, char **argv)
/*
* Build the query we'll use to detect lock contention among sessions in
- * the test specification. Most of the time, we could get away with
+ * the test specification. Most of the time, we could get away with
* simply checking whether a session is waiting for *any* lock: we don't
* exactly expect concurrent use of test tables. However, autovacuum will
* occasionally take AccessExclusiveLock to truncate a table, and we must
@@ -254,16 +253,16 @@ main(int argc, char **argv)
"'ExclusiveLock',"
"'AccessExclusiveLock'] END) "
- "AND holder.locktype IS NOT DISTINCT FROM waiter.locktype "
- "AND holder.database IS NOT DISTINCT FROM waiter.database "
- "AND holder.relation IS NOT DISTINCT FROM waiter.relation "
+ "AND holder.locktype IS NOT DISTINCT FROM waiter.locktype "
+ "AND holder.database IS NOT DISTINCT FROM waiter.database "
+ "AND holder.relation IS NOT DISTINCT FROM waiter.relation "
"AND holder.page IS NOT DISTINCT FROM waiter.page "
"AND holder.tuple IS NOT DISTINCT FROM waiter.tuple "
- "AND holder.virtualxid IS NOT DISTINCT FROM waiter.virtualxid "
- "AND holder.transactionid IS NOT DISTINCT FROM waiter.transactionid "
- "AND holder.classid IS NOT DISTINCT FROM waiter.classid "
+ "AND holder.virtualxid IS NOT DISTINCT FROM waiter.virtualxid "
+ "AND holder.transactionid IS NOT DISTINCT FROM waiter.transactionid "
+ "AND holder.classid IS NOT DISTINCT FROM waiter.classid "
"AND holder.objid IS NOT DISTINCT FROM waiter.objid "
- "AND holder.objsubid IS NOT DISTINCT FROM waiter.objsubid ");
+ "AND holder.objsubid IS NOT DISTINCT FROM waiter.objsubid ");
res = PQprepare(conns[0], PREP_WAITING, wait_query.data, 0, NULL);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
@@ -294,7 +293,7 @@ static int *piles;
* explicitly specified.
*/
static void
-run_testspec(TestSpec *testspec)
+run_testspec(TestSpec * testspec)
{
if (testspec->permutations)
run_named_permutations(testspec);
@@ -400,9 +399,10 @@ run_named_permutations(TestSpec * testspec)
/* Find all the named steps using the lookup table */
for (j = 0; j < p->nsteps; j++)
{
- Step **this = (Step **) bsearch(p->stepnames[j], allsteps,
- nallsteps, sizeof(Step *),
- &step_bsearch_cmp);
+ Step **this = (Step **) bsearch(p->stepnames[j], allsteps,
+ nallsteps, sizeof(Step *),
+ &step_bsearch_cmp);
+
if (this == NULL)
{
fprintf(stderr, "undefined step \"%s\" specified in permutation\n",
@@ -441,7 +441,7 @@ step_bsearch_cmp(const void *a, const void *b)
* If a step caused an error to be reported, print it out and clear it.
*/
static void
-report_error_message(Step *step)
+report_error_message(Step * step)
{
if (step->errormsg)
{
@@ -458,9 +458,9 @@ report_error_message(Step *step)
* one fails due to a timeout such as deadlock timeout.
*/
static void
-report_two_error_messages(Step *step1, Step *step2)
+report_two_error_messages(Step * step1, Step * step2)
{
- char *prefix;
+ char *prefix;
prefix = malloc(strlen(step1->name) + strlen(step2->name) + 2);
sprintf(prefix, "%s %s", step1->name, step2->name);
@@ -494,8 +494,8 @@ run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
Step *waiting = NULL;
/*
- * In dry run mode, just display the permutation in the same format used by
- * spec files, and return.
+ * In dry run mode, just display the permutation in the same format used
+ * by spec files, and return.
*/
if (dry_run)
{
@@ -547,21 +547,22 @@ run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
/* Perform steps */
for (i = 0; i < nsteps; i++)
{
- Step *step = steps[i];
- PGconn *conn = conns[1 + step->session];
+ Step *step = steps[i];
+ PGconn *conn = conns[1 + step->session];
if (waiting != NULL && step->session == waiting->session)
{
- PGcancel *cancel;
- PGresult *res;
- int j;
+ PGcancel *cancel;
+ PGresult *res;
+ int j;
/*
* This permutation is invalid: it can never happen in real life.
*
- * A session is blocked on an earlier step (waiting) and no further
- * steps from this session can run until it is unblocked, but it
- * can only be unblocked by running steps from other sessions.
+ * A session is blocked on an earlier step (waiting) and no
+ * further steps from this session can run until it is unblocked,
+ * but it can only be unblocked by running steps from other
+ * sessions.
*/
fprintf(stderr, "invalid permutation detected\n");
@@ -569,7 +570,7 @@ run_permutation(TestSpec * testspec, int nsteps, Step ** steps)
cancel = PQgetCancel(conn);
if (cancel != NULL)
{
- char buf[256];
+ char buf[256];
PQcancel(cancel, buf, sizeof(buf));
@@ -673,11 +674,11 @@ teardown:
/*
* Our caller already sent the query associated with this step. Wait for it
* to either complete or (if given the STEP_NONBLOCK flag) to block while
- * waiting for a lock. We assume that any lock wait will persist until we
+ * waiting for a lock. We assume that any lock wait will persist until we
* have executed additional steps in the permutation.
*
* When calling this function on behalf of a given step for a second or later
- * time, pass the STEP_RETRY flag. This only affects the messages printed.
+ * time, pass the STEP_RETRY flag. This only affects the messages printed.
*
* If the connection returns an error, the message is saved in step->errormsg.
* Caller should call report_error_message shortly after this, to have it
@@ -687,7 +688,7 @@ teardown:
* a lock, returns true. Otherwise, returns false.
*/
static bool
-try_complete_step(Step *step, int flags)
+try_complete_step(Step * step, int flags)
{
PGconn *conn = conns[1 + step->session];
fd_set read_set;
@@ -705,12 +706,12 @@ try_complete_step(Step *step, int flags)
timeout.tv_usec = 10000; /* Check for lock waits every 10ms. */
ret = select(sock + 1, &read_set, NULL, NULL, &timeout);
- if (ret < 0) /* error in select() */
+ if (ret < 0) /* error in select() */
{
fprintf(stderr, "select failed: %s\n", strerror(errno));
exit_nicely();
}
- else if (ret == 0) /* select() timeout: check for lock wait */
+ else if (ret == 0) /* select() timeout: check for lock wait */
{
int ntuples;
@@ -765,9 +766,9 @@ try_complete_step(Step *step, int flags)
}
/* Detail may contain xid values, so just show primary. */
step->errormsg = malloc(5 +
- strlen(PQresultErrorField(res, PG_DIAG_SEVERITY)) +
+ strlen(PQresultErrorField(res, PG_DIAG_SEVERITY)) +
strlen(PQresultErrorField(res,
- PG_DIAG_MESSAGE_PRIMARY)));
+ PG_DIAG_MESSAGE_PRIMARY)));
sprintf(step->errormsg, "%s: %s",
PQresultErrorField(res, PG_DIAG_SEVERITY),
PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY));
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index 2931a73fdb..7d89318b27 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -475,7 +475,7 @@ convert_sourcefiles_in(char *source_subdir, char *dest_dir, char *dest_subdir, c
/* build the full actual paths to open */
snprintf(prefix, strlen(*name) - 6, "%s", *name);
snprintf(srcfile, MAXPGPATH, "%s/%s", indir, *name);
- snprintf(destfile, MAXPGPATH, "%s/%s/%s.%s", dest_dir, dest_subdir,
+ snprintf(destfile, MAXPGPATH, "%s/%s/%s.%s", dest_dir, dest_subdir,
prefix, suffix);
infile = fopen(srcfile, "r");
diff --git a/src/test/thread/thread_test.c b/src/test/thread/thread_test.c
index bb5b92f142..9041928f08 100644
--- a/src/test/thread/thread_test.c
+++ b/src/test/thread/thread_test.c
@@ -114,8 +114,10 @@ static bool platform_is_threadsafe = true;
int
main(int argc, char *argv[])
{
- pthread_t thread1, thread2;
+ pthread_t thread1,
+ thread2;
int rc;
+
#ifdef WIN32
WSADATA wsaData;
int err;
@@ -199,7 +201,7 @@ main(int argc, char *argv[])
#endif
/* close down threads */
-
+
pthread_mutex_unlock(&init_mutex); /* let children exit */
pthread_join(thread1, NULL); /* clean up children */
@@ -277,7 +279,7 @@ func_call_1(void)
#ifdef WIN32
HANDLE h1;
#else
- int fd;
+ int fd;
#endif
unlink(TEMP_FILENAME_1);
@@ -285,10 +287,11 @@ func_call_1(void)
/* Set errno = EEXIST */
/* create, then try to fail on exclusive create open */
+
/*
* It would be great to check errno here but if errno is not thread-safe
- * we might get a value from the other thread and mis-report the cause
- * of the failure.
+ * we might get a value from the other thread and mis-report the cause of
+ * the failure.
*/
#ifdef WIN32
if ((h1 = CreateFile(TEMP_FILENAME_1, GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL)) ==
@@ -301,7 +304,7 @@ func_call_1(void)
TEMP_FILENAME_1);
exit(1);
}
-
+
#ifdef WIN32
if (CreateFile(TEMP_FILENAME_1, GENERIC_WRITE, 0, NULL, CREATE_NEW, 0, NULL)
!= INVALID_HANDLE_VALUE)
@@ -346,6 +349,7 @@ func_call_1(void)
unlink(TEMP_FILENAME_1);
#ifndef HAVE_STRERROR_R
+
/*
* If strerror() uses sys_errlist, the pointer might change for different
* errno values, so we don't check to see if it varies within the thread.
@@ -428,6 +432,7 @@ func_call_2(void)
}
#ifndef HAVE_STRERROR_R
+
/*
* If strerror() uses sys_errlist, the pointer might change for different
* errno values, so we don't check to see if it varies within the thread.
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 36e20b834e..3dae9e5e9f 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -295,7 +295,7 @@ pg_tzset(const char *name)
* This is called before GUC variable initialization begins. Its purpose
* is to ensure that log_timezone has a valid value before any logging GUC
* variables could become set to values that require elog.c to provide
- * timestamps (e.g., log_line_prefix). We may as well initialize
+ * timestamps (e.g., log_line_prefix). We may as well initialize
* session_timestamp to something valid, too.
*/
void
@@ -303,9 +303,9 @@ pg_timezone_initialize(void)
{
/*
* We may not yet know where PGSHAREDIR is (in particular this is true in
- * an EXEC_BACKEND subprocess). So use "GMT", which pg_tzset forces to
- * be interpreted without reference to the filesystem. This corresponds
- * to the bootstrap default for these variables in guc.c, although in
+ * an EXEC_BACKEND subprocess). So use "GMT", which pg_tzset forces to be
+ * interpreted without reference to the filesystem. This corresponds to
+ * the bootstrap default for these variables in guc.c, although in
* principle it could be different.
*/
session_timezone = pg_tzset("GMT");
diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm
index 7c743d17b4..058fab3e5a 100644
--- a/src/tools/msvc/Install.pm
+++ b/src/tools/msvc/Install.pm
@@ -19,593 +19,615 @@ our (@ISA,@EXPORT_OK);
sub lcopy
{
- my $src = shift;
- my $target = shift;
+ my $src = shift;
+ my $target = shift;
- if (-f $target)
- {
- unlink $target || confess "Could not delete $target\n";
- }
+ if (-f $target)
+ {
+ unlink $target || confess "Could not delete $target\n";
+ }
- copy($src,$target)
- || confess "Could not copy $src to $target\n";
+ copy($src,$target)
+ || confess "Could not copy $src to $target\n";
}
sub Install
{
- $| = 1;
-
- my $target = shift;
- our $config;
- require "config_default.pl";
- require "config.pl" if (-f "config.pl");
-
- chdir("../../..") if (-f "../../../configure");
- chdir("../../../..") if (-f "../../../../configure");
- my $conf = "";
- if (-d "debug")
- {
- $conf = "debug";
- }
- if (-d "release")
- {
- $conf = "release";
- }
- die "Could not find debug or release binaries" if ($conf eq "");
- my $majorver = DetermineMajorVersion();
- print "Installing version $majorver for $conf in $target\n";
-
- EnsureDirectories($target, 'bin', 'lib', 'share','share/timezonesets','share/extension',
- 'share/contrib','doc','doc/extension', 'doc/contrib','symbols', 'share/tsearch_data');
-
- CopySolutionOutput($conf, $target);
- lcopy($target . '/lib/libpq.dll', $target . '/bin/libpq.dll');
- my $sample_files = [];
- File::Find::find(
- {
- wanted =>sub {
- /^.*\.sample\z/s
- &&push(@$sample_files, $File::Find::name);
- }
- },
- "src"
- );
- CopySetOfFiles('config files', $sample_files, $target . '/share/');
- CopyFiles(
- 'Import libraries',
- $target .'/lib/',
- "$conf\\", "postgres\\postgres.lib","libpq\\libpq.lib", "libecpg\\libecpg.lib",
- "libpgport\\libpgport.lib"
- );
- CopySetOfFiles(
- 'timezone names',
- [ glob('src\timezone\tznames\*.txt') ],
- $target . '/share/timezonesets/'
- );
- CopyFiles(
- 'timezone sets',
- $target . '/share/timezonesets/',
- 'src/timezone/tznames/', 'Default','Australia','India'
- );
- CopySetOfFiles('BKI files', [ glob("src\\backend\\catalog\\postgres.*") ],$target .'/share/');
- CopySetOfFiles('SQL files', [ glob("src\\backend\\catalog\\*.sql") ],$target . '/share/');
- CopyFiles(
- 'Information schema data',
- $target . '/share/',
- 'src/backend/catalog/', 'sql_features.txt'
- );
- GenerateConversionScript($target);
- GenerateTimezoneFiles($target,$conf);
- GenerateTsearchFiles($target);
- CopySetOfFiles(
- 'Stopword files',
- [ glob("src\\backend\\snowball\\stopwords\\*.stop") ],
- $target . '/share/tsearch_data/'
- );
- CopySetOfFiles(
- 'Dictionaries sample files',
- [ glob("src\\backend\\tsearch\\*_sample.*") ],
- $target . '/share/tsearch_data/'
- );
- CopyContribFiles($config,$target);
- CopyIncludeFiles($target);
-
- my $pl_extension_files = [];
- my @pldirs = ('src/pl/plpgsql/src');
- push @pldirs,"src/pl/plperl" if $config->{perl};
- push @pldirs,"src/pl/plpython" if $config->{python};
- push @pldirs,"src/pl/tcl" if $config->{tcl};
- File::Find::find(
- {
- wanted =>sub {
- /^(.*--.*\.sql|.*\.control)\z/s
- &&push(@$pl_extension_files, $File::Find::name);
- }
- },
- @pldirs
- );
- CopySetOfFiles('PL Extension files', $pl_extension_files,$target . '/share/extension/');
-
- GenerateNLSFiles($target,$config->{nls},$majorver) if ($config->{nls});
-
- print "Installation complete.\n";
+ $| = 1;
+
+ my $target = shift;
+ our $config;
+ require "config_default.pl";
+ require "config.pl" if (-f "config.pl");
+
+ chdir("../../..") if (-f "../../../configure");
+ chdir("../../../..") if (-f "../../../../configure");
+ my $conf = "";
+ if (-d "debug")
+ {
+ $conf = "debug";
+ }
+ if (-d "release")
+ {
+ $conf = "release";
+ }
+ die "Could not find debug or release binaries" if ($conf eq "");
+ my $majorver = DetermineMajorVersion();
+ print "Installing version $majorver for $conf in $target\n";
+
+ EnsureDirectories($target, 'bin', 'lib', 'share','share/timezonesets','share/extension',
+ 'share/contrib','doc','doc/extension', 'doc/contrib','symbols',
+ 'share/tsearch_data');
+
+ CopySolutionOutput($conf, $target);
+ lcopy($target . '/lib/libpq.dll', $target . '/bin/libpq.dll');
+ my $sample_files = [];
+ File::Find::find(
+ {
+ wanted =>sub {
+ /^.*\.sample\z/s
+ &&push(@$sample_files, $File::Find::name);
+ }
+ },
+ "src"
+ );
+ CopySetOfFiles('config files', $sample_files, $target . '/share/');
+ CopyFiles(
+ 'Import libraries',
+ $target .'/lib/',
+ "$conf\\", "postgres\\postgres.lib","libpq\\libpq.lib", "libecpg\\libecpg.lib",
+ "libpgport\\libpgport.lib"
+ );
+ CopySetOfFiles(
+ 'timezone names',
+ [ glob('src\timezone\tznames\*.txt') ],
+ $target . '/share/timezonesets/'
+ );
+ CopyFiles(
+ 'timezone sets',
+ $target . '/share/timezonesets/',
+ 'src/timezone/tznames/', 'Default','Australia','India'
+ );
+ CopySetOfFiles(
+ 'BKI files',
+ [ glob("src\\backend\\catalog\\postgres.*") ],
+ $target .'/share/'
+ );
+ CopySetOfFiles('SQL files', [ glob("src\\backend\\catalog\\*.sql") ],$target . '/share/');
+ CopyFiles(
+ 'Information schema data',$target . '/share/',
+ 'src/backend/catalog/', 'sql_features.txt'
+ );
+ GenerateConversionScript($target);
+ GenerateTimezoneFiles($target,$conf);
+ GenerateTsearchFiles($target);
+ CopySetOfFiles(
+ 'Stopword files',
+ [ glob("src\\backend\\snowball\\stopwords\\*.stop") ],
+ $target . '/share/tsearch_data/'
+ );
+ CopySetOfFiles(
+ 'Dictionaries sample files',
+ [ glob("src\\backend\\tsearch\\*_sample.*") ],
+ $target . '/share/tsearch_data/'
+ );
+ CopyContribFiles($config,$target);
+ CopyIncludeFiles($target);
+
+ my $pl_extension_files = [];
+ my @pldirs = ('src/pl/plpgsql/src');
+ push @pldirs,"src/pl/plperl" if $config->{perl};
+ push @pldirs,"src/pl/plpython" if $config->{python};
+ push @pldirs,"src/pl/tcl" if $config->{tcl};
+ File::Find::find(
+ {
+ wanted =>sub {
+ /^(.*--.*\.sql|.*\.control)\z/s
+ &&push(@$pl_extension_files,
+ $File::Find::name);
+ }
+ },
+ @pldirs
+ );
+ CopySetOfFiles('PL Extension files', $pl_extension_files,$target . '/share/extension/');
+
+ GenerateNLSFiles($target,$config->{nls},$majorver) if ($config->{nls});
+
+ print "Installation complete.\n";
}
sub EnsureDirectories
{
- my $target = shift;
- mkdir $target unless -d ($target);
- while (my $d = shift)
- {
- mkdir $target . '/' . $d unless -d ($target . '/' . $d);
- }
+ my $target = shift;
+ mkdir $target unless -d ($target);
+ while (my $d = shift)
+ {
+ mkdir $target . '/' . $d unless -d ($target . '/' . $d);
+ }
}
sub CopyFiles
{
- my $what = shift;
- my $target = shift;
- my $basedir = shift;
-
- print "Copying $what";
- while (my $f = shift)
- {
- print ".";
- $f = $basedir . $f;
- die "No file $f\n" if (!-f $f);
- lcopy($f, $target . basename($f));
- }
- print "\n";
+ my $what = shift;
+ my $target = shift;
+ my $basedir = shift;
+
+ print "Copying $what";
+ while (my $f = shift)
+ {
+ print ".";
+ $f = $basedir . $f;
+ die "No file $f\n" if (!-f $f);
+ lcopy($f, $target . basename($f));
+ }
+ print "\n";
}
sub CopySetOfFiles
{
- my $what = shift;
- my $flist = shift;
- my $target = shift;
- print "Copying $what" if $what;
- foreach (@$flist)
- {
- next if /regress/; # Skip temporary install in regression subdir
- next if /ecpg.test/; # Skip temporary install in regression subdir
- my $tgt = $target . basename($_);
- print ".";
- lcopy($_, $tgt) || croak "Could not copy $_: $!\n";
- }
- print "\n";
+ my $what = shift;
+ my $flist = shift;
+ my $target = shift;
+ print "Copying $what" if $what;
+ foreach (@$flist)
+ {
+ next if /regress/; # Skip temporary install in regression subdir
+ next if /ecpg.test/; # Skip temporary install in regression subdir
+ my $tgt = $target . basename($_);
+ print ".";
+ lcopy($_, $tgt) || croak "Could not copy $_: $!\n";
+ }
+ print "\n";
}
sub CopySolutionOutput
{
- my $conf = shift;
- my $target = shift;
- my $rem = qr{Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"\) = "([^"]+)"};
-
- my $sln = read_file("pgsql.sln") || croak "Could not open pgsql.sln\n";
-
- my $vcproj = 'vcproj';
- if ($sln =~ /Microsoft Visual Studio Solution File, Format Version (\d+)\.\d+/ && $1 >= 11)
- {
- $vcproj = 'vcxproj';
- }
-
- print "Copying build output files...";
- while ($sln =~ $rem)
- {
- my $pf = $1;
- my $dir;
- my $ext;
-
- $sln =~ s/$rem//;
-
- my $proj = read_file("$pf.$vcproj") || croak "Could not open $pf.$vcproj\n";
- if ($vcproj eq 'vcproj' && $proj =~ qr{ConfigurationType="([^"]+)"})
- {
- if ($1 == 1)
- {
- $dir = "bin";
- $ext = "exe";
- }
- elsif ($1 == 2)
- {
- $dir = "lib";
- $ext = "dll";
- }
- else
- {
-
- # Static lib, such as libpgport, only used internally during build, don't install
- next;
- }
- }
- elsif ($vcproj eq 'vcxproj' && $proj =~ qr{<ConfigurationType>(\w+)</ConfigurationType>})
- {
- if ($1 eq 'Application')
- {
- $dir = "bin";
- $ext = "exe";
- }
- elsif ($1 eq 'DynamicLibrary')
- {
- $dir = "lib";
- $ext = "dll";
- }
- else # 'StaticLibrary'
- {
-
- # Static lib, such as libpgport, only used internally during build, don't install
- next;
- }
- }
- else
- {
- croak "Could not parse $pf.$vcproj\n";
- }
- lcopy("$conf\\$pf\\$pf.$ext","$target\\$dir\\$pf.$ext")
- || croak "Could not copy $pf.$ext\n";
- lcopy("$conf\\$pf\\$pf.pdb","$target\\symbols\\$pf.pdb")
- || croak "Could not copy $pf.pdb\n";
- print ".";
- }
- print "\n";
+ my $conf = shift;
+ my $target = shift;
+ my $rem = qr{Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}"\) = "([^"]+)"};
+
+ my $sln = read_file("pgsql.sln") || croak "Could not open pgsql.sln\n";
+
+ my $vcproj = 'vcproj';
+ if ($sln =~ /Microsoft Visual Studio Solution File, Format Version (\d+)\.\d+/ && $1 >= 11)
+ {
+ $vcproj = 'vcxproj';
+ }
+
+ print "Copying build output files...";
+ while ($sln =~ $rem)
+ {
+ my $pf = $1;
+ my $dir;
+ my $ext;
+
+ $sln =~ s/$rem//;
+
+ my $proj = read_file("$pf.$vcproj") || croak "Could not open $pf.$vcproj\n";
+ if ($vcproj eq 'vcproj' && $proj =~ qr{ConfigurationType="([^"]+)"})
+ {
+ if ($1 == 1)
+ {
+ $dir = "bin";
+ $ext = "exe";
+ }
+ elsif ($1 == 2)
+ {
+ $dir = "lib";
+ $ext = "dll";
+ }
+ else
+ {
+
+ # Static lib, such as libpgport, only used internally during build, don't install
+ next;
+ }
+ }
+ elsif ( $vcproj eq 'vcxproj'
+ && $proj =~ qr{<ConfigurationType>(\w+)</ConfigurationType>})
+ {
+ if ($1 eq 'Application')
+ {
+ $dir = "bin";
+ $ext = "exe";
+ }
+ elsif ($1 eq 'DynamicLibrary')
+ {
+ $dir = "lib";
+ $ext = "dll";
+ }
+ else # 'StaticLibrary'
+ {
+
+ # Static lib, such as libpgport, only used internally during build, don't install
+ next;
+ }
+ }
+ else
+ {
+ croak "Could not parse $pf.$vcproj\n";
+ }
+ lcopy("$conf\\$pf\\$pf.$ext","$target\\$dir\\$pf.$ext")
+ || croak "Could not copy $pf.$ext\n";
+ lcopy("$conf\\$pf\\$pf.pdb","$target\\symbols\\$pf.pdb")
+ || croak "Could not copy $pf.pdb\n";
+ print ".";
+ }
+ print "\n";
}
sub GenerateConversionScript
{
- my $target = shift;
- my $sql = "";
- my $F;
-
- print "Generating conversion proc script...";
- my $mf = read_file('src/backend/utils/mb/conversion_procs/Makefile');
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- $mf =~ /^CONVERSIONS\s*=\s*(.*)$/m
- || die "Could not find CONVERSIONS line in conversions Makefile\n";
- my @pieces = split /\s+/,$1;
- while ($#pieces > 0)
- {
- my $name = shift @pieces;
- my $se = shift @pieces;
- my $de = shift @pieces;
- my $func = shift @pieces;
- my $obj = shift @pieces;
- $sql .= "-- $se --> $de\n";
- $sql .=
+ my $target = shift;
+ my $sql = "";
+ my $F;
+
+ print "Generating conversion proc script...";
+ my $mf = read_file('src/backend/utils/mb/conversion_procs/Makefile');
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ $mf =~ /^CONVERSIONS\s*=\s*(.*)$/m
+ || die "Could not find CONVERSIONS line in conversions Makefile\n";
+ my @pieces = split /\s+/,$1;
+ while ($#pieces > 0)
+ {
+ my $name = shift @pieces;
+ my $se = shift @pieces;
+ my $de = shift @pieces;
+ my $func = shift @pieces;
+ my $obj = shift @pieces;
+ $sql .= "-- $se --> $de\n";
+ $sql .=
"CREATE OR REPLACE FUNCTION $func (INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) RETURNS VOID AS '\$libdir/$obj', '$func' LANGUAGE C STRICT;\n";
- $sql .=
+ $sql .=
"COMMENT ON FUNCTION $func(INTEGER, INTEGER, CSTRING, INTERNAL, INTEGER) IS 'internal conversion function for $se to $de';\n";
- $sql .= "DROP CONVERSION pg_catalog.$name;\n";
- $sql .= "CREATE DEFAULT CONVERSION pg_catalog.$name FOR '$se' TO '$de' FROM $func;\n";
- $sql .= "COMMENT ON CONVERSION pg_catalog.$name IS 'conversion for $se to $de';\n";
- }
- open($F,">$target/share/conversion_create.sql")
- || die "Could not write to conversion_create.sql\n";
- print $F $sql;
- close($F);
- print "\n";
+ $sql .= "DROP CONVERSION pg_catalog.$name;\n";
+ $sql .=
+ "CREATE DEFAULT CONVERSION pg_catalog.$name FOR '$se' TO '$de' FROM $func;\n";
+ $sql .= "COMMENT ON CONVERSION pg_catalog.$name IS 'conversion for $se to $de';\n";
+ }
+ open($F,">$target/share/conversion_create.sql")
+ || die "Could not write to conversion_create.sql\n";
+ print $F $sql;
+ close($F);
+ print "\n";
}
sub GenerateTimezoneFiles
{
- my $target = shift;
- my $conf = shift;
- my $mf = read_file("src/timezone/Makefile");
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- $mf =~ /^TZDATA\s*:?=\s*(.*)$/m || die "Could not find TZDATA row in timezone makefile\n";
- my @tzfiles = split /\s+/,$1;
- unshift @tzfiles,'';
- print "Generating timezone files...";
- system(
- "$conf\\zic\\zic -d \"$target/share/timezone\" " . join(" src/timezone/data/", @tzfiles));
- print "\n";
+ my $target = shift;
+ my $conf = shift;
+ my $mf = read_file("src/timezone/Makefile");
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ $mf =~ /^TZDATA\s*:?=\s*(.*)$/m || die "Could not find TZDATA row in timezone makefile\n";
+ my @tzfiles = split /\s+/,$1;
+ unshift @tzfiles,'';
+ print "Generating timezone files...";
+ system("$conf\\zic\\zic -d \"$target/share/timezone\" "
+ . join(" src/timezone/data/", @tzfiles));
+ print "\n";
}
sub GenerateTsearchFiles
{
- my $target = shift;
-
- print "Generating tsearch script...";
- my $F;
- my $tmpl = read_file('src/backend/snowball/snowball.sql.in');
- my $mf = read_file('src/backend/snowball/Makefile');
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- $mf =~ /^LANGUAGES\s*=\s*(.*)$/m
- || die "Could not find LANGUAGES line in snowball Makefile\n";
- my @pieces = split /\s+/,$1;
- open($F,">$target/share/snowball_create.sql")
- || die "Could not write snowball_create.sql";
- print $F read_file('src/backend/snowball/snowball_func.sql.in');
-
- while ($#pieces > 0)
- {
- my $lang = shift @pieces || last;
- my $asclang = shift @pieces || last;
- my $txt = $tmpl;
- my $stop = '';
-
- if (-s "src/backend/snowball/stopwords/$lang.stop")
- {
- $stop = ", StopWords=$lang";
- }
-
- $txt =~ s#_LANGNAME_#${lang}#gs;
- $txt =~ s#_DICTNAME_#${lang}_stem#gs;
- $txt =~ s#_CFGNAME_#${lang}#gs;
- $txt =~ s#_ASCDICTNAME_#${asclang}_stem#gs;
- $txt =~ s#_NONASCDICTNAME_#${lang}_stem#gs;
- $txt =~ s#_STOPWORDS_#$stop#gs;
- print $F $txt;
- print ".";
- }
- close($F);
- print "\n";
+ my $target = shift;
+
+ print "Generating tsearch script...";
+ my $F;
+ my $tmpl = read_file('src/backend/snowball/snowball.sql.in');
+ my $mf = read_file('src/backend/snowball/Makefile');
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ $mf =~ /^LANGUAGES\s*=\s*(.*)$/m
+ || die "Could not find LANGUAGES line in snowball Makefile\n";
+ my @pieces = split /\s+/,$1;
+ open($F,">$target/share/snowball_create.sql")
+ || die "Could not write snowball_create.sql";
+ print $F read_file('src/backend/snowball/snowball_func.sql.in');
+
+ while ($#pieces > 0)
+ {
+ my $lang = shift @pieces || last;
+ my $asclang = shift @pieces || last;
+ my $txt = $tmpl;
+ my $stop = '';
+
+ if (-s "src/backend/snowball/stopwords/$lang.stop")
+ {
+ $stop = ", StopWords=$lang";
+ }
+
+ $txt =~ s#_LANGNAME_#${lang}#gs;
+ $txt =~ s#_DICTNAME_#${lang}_stem#gs;
+ $txt =~ s#_CFGNAME_#${lang}#gs;
+ $txt =~ s#_ASCDICTNAME_#${asclang}_stem#gs;
+ $txt =~ s#_NONASCDICTNAME_#${lang}_stem#gs;
+ $txt =~ s#_STOPWORDS_#$stop#gs;
+ print $F $txt;
+ print ".";
+ }
+ close($F);
+ print "\n";
}
sub CopyContribFiles
{
- my $config = shift;
- my $target = shift;
-
- print "Copying contrib data files...";
- my $D;
- opendir($D, 'contrib') || croak "Could not opendir on contrib!\n";
- while (my $d = readdir($D))
- {
- next if ($d =~ /^\./);
- next unless (-f "contrib/$d/Makefile");
- next if ($d eq "uuid-ossp"&& !defined($config->{uuid}));
- next if ($d eq "sslinfo" && !defined($config->{openssl}));
- next if ($d eq "xml2" && !defined($config->{xml}));
- next if ($d eq "sepgsql");
-
- my $mf = read_file("contrib/$d/Makefile");
- $mf =~ s{\\s*[\r\n]+}{}mg;
-
- # Note: we currently don't support setting MODULEDIR in the makefile
- my $moduledir = 'contrib';
-
- my $flist = '';
- if ($mf =~ /^EXTENSION\s*=\s*(.*)$/m) {$flist .= $1}
- if ($flist ne '')
- {
- $moduledir = 'extension';
- $flist = ParseAndCleanRule($flist, $mf);
-
- foreach my $f (split /\s+/,$flist)
- {
- lcopy(
- 'contrib/' . $d . '/' . $f . '.control',
- $target . '/share/extension/' . $f . '.control'
- )|| croak("Could not copy file $f.control in contrib $d");
- print '.';
- }
- }
-
- $flist = '';
- if ($mf =~ /^DATA_built\s*=\s*(.*)$/m) {$flist .= $1}
- if ($mf =~ /^DATA\s*=\s*(.*)$/m) {$flist .= " $1"}
- $flist =~ s/^\s*//; # Remove leading spaces if we had only DATA_built
-
- if ($flist ne '')
- {
- $flist = ParseAndCleanRule($flist, $mf);
-
- foreach my $f (split /\s+/,$flist)
- {
- lcopy('contrib/' . $d . '/' . $f,
- $target . '/share/' . $moduledir . '/' . basename($f))
- || croak("Could not copy file $f in contrib $d");
- print '.';
- }
- }
-
- $flist = '';
- if ($mf =~ /^DATA_TSEARCH\s*=\s*(.*)$/m) {$flist .= $1}
- if ($flist ne '')
- {
- $flist = ParseAndCleanRule($flist, $mf);
-
- foreach my $f (split /\s+/,$flist)
- {
- lcopy('contrib/' . $d . '/' . $f,$target . '/share/tsearch_data/' . basename($f))
- || croak("Could not copy file $f in contrib $d");
- print '.';
- }
- }
-
- $flist = '';
- if ($mf =~ /^DOCS\s*=\s*(.*)$/mg) {$flist .= $1}
- if ($flist ne '')
- {
- $flist = ParseAndCleanRule($flist, $mf);
-
- # Special case for contrib/spi
- $flist =
+ my $config = shift;
+ my $target = shift;
+
+ print "Copying contrib data files...";
+ my $D;
+ opendir($D, 'contrib') || croak "Could not opendir on contrib!\n";
+ while (my $d = readdir($D))
+ {
+ next if ($d =~ /^\./);
+ next unless (-f "contrib/$d/Makefile");
+ next if ($d eq "uuid-ossp"&& !defined($config->{uuid}));
+ next if ($d eq "sslinfo" && !defined($config->{openssl}));
+ next if ($d eq "xml2" && !defined($config->{xml}));
+ next if ($d eq "sepgsql");
+
+ my $mf = read_file("contrib/$d/Makefile");
+ $mf =~ s{\\s*[\r\n]+}{}mg;
+
+ # Note: we currently don't support setting MODULEDIR in the makefile
+ my $moduledir = 'contrib';
+
+ my $flist = '';
+ if ($mf =~ /^EXTENSION\s*=\s*(.*)$/m) {$flist .= $1}
+ if ($flist ne '')
+ {
+ $moduledir = 'extension';
+ $flist = ParseAndCleanRule($flist, $mf);
+
+ foreach my $f (split /\s+/,$flist)
+ {
+ lcopy(
+ 'contrib/' . $d . '/' . $f . '.control',
+ $target . '/share/extension/' . $f . '.control'
+ )|| croak("Could not copy file $f.control in contrib $d");
+ print '.';
+ }
+ }
+
+ $flist = '';
+ if ($mf =~ /^DATA_built\s*=\s*(.*)$/m) {$flist .= $1}
+ if ($mf =~ /^DATA\s*=\s*(.*)$/m) {$flist .= " $1"}
+ $flist =~ s/^\s*//; # Remove leading spaces if we had only DATA_built
+
+ if ($flist ne '')
+ {
+ $flist = ParseAndCleanRule($flist, $mf);
+
+ foreach my $f (split /\s+/,$flist)
+ {
+ lcopy('contrib/' . $d . '/' . $f,
+ $target . '/share/' . $moduledir . '/' . basename($f))
+ || croak("Could not copy file $f in contrib $d");
+ print '.';
+ }
+ }
+
+ $flist = '';
+ if ($mf =~ /^DATA_TSEARCH\s*=\s*(.*)$/m) {$flist .= $1}
+ if ($flist ne '')
+ {
+ $flist = ParseAndCleanRule($flist, $mf);
+
+ foreach my $f (split /\s+/,$flist)
+ {
+ lcopy('contrib/' . $d . '/' . $f,
+ $target . '/share/tsearch_data/' . basename($f))
+ || croak("Could not copy file $f in contrib $d");
+ print '.';
+ }
+ }
+
+ $flist = '';
+ if ($mf =~ /^DOCS\s*=\s*(.*)$/mg) {$flist .= $1}
+ if ($flist ne '')
+ {
+ $flist = ParseAndCleanRule($flist, $mf);
+
+ # Special case for contrib/spi
+ $flist =
"autoinc.example insert_username.example moddatetime.example refint.example timetravel.example"
- if ($d eq 'spi');
- foreach my $f (split /\s+/,$flist)
- {
- lcopy('contrib/' . $d . '/' . $f,$target . '/doc/' . $moduledir . '/' . $f)
- || croak("Could not copy file $f in contrib $d");
- print '.';
- }
- }
- }
- closedir($D);
- print "\n";
+ if ($d eq 'spi');
+ foreach my $f (split /\s+/,$flist)
+ {
+ lcopy('contrib/' . $d . '/' . $f,
+ $target . '/doc/' . $moduledir . '/' . $f)
+ || croak("Could not copy file $f in contrib $d");
+ print '.';
+ }
+ }
+ }
+ closedir($D);
+ print "\n";
}
sub ParseAndCleanRule
{
- my $flist = shift;
- my $mf = shift;
-
- # Strip out $(addsuffix) rules
- if (index($flist, '$(addsuffix ') >= 0)
- {
- my $pcount = 0;
- my $i;
- for ($i = index($flist, '$(addsuffix ') + 12; $i < length($flist); $i++)
- {
- $pcount++ if (substr($flist, $i, 1) eq '(');
- $pcount-- if (substr($flist, $i, 1) eq ')');
- last if ($pcount < 0);
- }
- $flist = substr($flist, 0, index($flist, '$(addsuffix ')) . substr($flist, $i+1);
- }
- return $flist;
+ my $flist = shift;
+ my $mf = shift;
+
+ # Strip out $(addsuffix) rules
+ if (index($flist, '$(addsuffix ') >= 0)
+ {
+ my $pcount = 0;
+ my $i;
+ for ($i = index($flist, '$(addsuffix ') + 12; $i < length($flist); $i++)
+ {
+ $pcount++ if (substr($flist, $i, 1) eq '(');
+ $pcount-- if (substr($flist, $i, 1) eq ')');
+ last if ($pcount < 0);
+ }
+ $flist = substr($flist, 0, index($flist, '$(addsuffix ')) . substr($flist, $i+1);
+ }
+ return $flist;
}
sub CopyIncludeFiles
{
- my $target = shift;
-
- EnsureDirectories($target, 'include', 'include/libpq','include/internal',
- 'include/internal/libpq','include/server', 'include/server/parser');
-
- CopyFiles(
- 'Public headers',
- $target . '/include/',
- 'src/include/', 'postgres_ext.h', 'pg_config.h', 'pg_config_os.h', 'pg_config_manual.h'
- );
- lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/')
- || croak 'Could not copy libpq-fs.h';
-
- CopyFiles(
- 'Libpq headers',
- $target . '/include/',
- 'src/interfaces/libpq/','libpq-fe.h', 'libpq-events.h'
- );
- CopyFiles(
- 'Libpq internal headers',
- $target .'/include/internal/',
- 'src/interfaces/libpq/', 'libpq-int.h', 'pqexpbuffer.h'
- );
-
- CopyFiles(
- 'Internal headers',
- $target . '/include/internal/',
- 'src/include/', 'c.h', 'port.h', 'postgres_fe.h'
- );
- lcopy('src/include/libpq/pqcomm.h', $target . '/include/internal/libpq/')
- || croak 'Could not copy pqcomm.h';
-
- CopyFiles(
- 'Server headers',
- $target . '/include/server/',
- 'src/include/', 'pg_config.h', 'pg_config_os.h'
- );
- CopyFiles('Grammar header', $target . '/include/server/parser/','src/backend/parser/','gram.h');
- CopySetOfFiles('',[ glob("src\\include\\*.h") ],$target . '/include/server/');
- my $D;
- opendir($D, 'src/include') || croak "Could not opendir on src/include!\n";
-
- CopyFiles('PL/pgSQL header', $target . '/include/server/','src/pl/plpgsql/src/', 'plpgsql.h');
-
- # some xcopy progs don't like mixed slash style paths
- (my $ctarget = $target) =~ s!/!\\!g;
- while (my $d = readdir($D))
- {
- next if ($d =~ /^\./);
- next if ($d eq '.git');
- next if ($d eq 'CVS');
- next unless (-d "src/include/$d");
-
- EnsureDirectories("$target/include/server/$d");
- system(qq{xcopy /s /i /q /r /y src\\include\\$d\\*.h "$ctarget\\include\\server\\$d\\"})
- && croak("Failed to copy include directory $d\n");
- }
- closedir($D);
-
- my $mf = read_file('src/interfaces/ecpg/include/Makefile');
- $mf =~ s{\\s*[\r\n]+}{}mg;
- $mf =~ /^ecpg_headers\s*=\s*(.*)$/m || croak "Could not find ecpg_headers line\n";
- CopyFiles(
- 'ECPG headers',
- $target . '/include/',
- 'src/interfaces/ecpg/include/',
- 'ecpg_config.h', split /\s+/,$1
- );
- $mf =~ /^informix_headers\s*=\s*(.*)$/m || croak "Could not find informix_headers line\n";
- EnsureDirectories($target . '/include', 'informix', 'informix/esql');
- CopyFiles(
- 'ECPG informix headers',
- $target .'/include/informix/esql/',
- 'src/interfaces/ecpg/include/',
- split /\s+/,$1
- );
+ my $target = shift;
+
+ EnsureDirectories($target, 'include', 'include/libpq','include/internal',
+ 'include/internal/libpq','include/server', 'include/server/parser');
+
+ CopyFiles(
+ 'Public headers',
+ $target . '/include/',
+ 'src/include/', 'postgres_ext.h', 'pg_config.h', 'pg_config_os.h',
+ 'pg_config_manual.h'
+ );
+ lcopy('src/include/libpq/libpq-fs.h', $target . '/include/libpq/')
+ || croak 'Could not copy libpq-fs.h';
+
+ CopyFiles(
+ 'Libpq headers',
+ $target . '/include/',
+ 'src/interfaces/libpq/','libpq-fe.h', 'libpq-events.h'
+ );
+ CopyFiles(
+ 'Libpq internal headers',
+ $target .'/include/internal/',
+ 'src/interfaces/libpq/', 'libpq-int.h', 'pqexpbuffer.h'
+ );
+
+ CopyFiles(
+ 'Internal headers',
+ $target . '/include/internal/',
+ 'src/include/', 'c.h', 'port.h', 'postgres_fe.h'
+ );
+ lcopy('src/include/libpq/pqcomm.h', $target . '/include/internal/libpq/')
+ || croak 'Could not copy pqcomm.h';
+
+ CopyFiles(
+ 'Server headers',
+ $target . '/include/server/',
+ 'src/include/', 'pg_config.h', 'pg_config_os.h'
+ );
+ CopyFiles(
+ 'Grammar header',
+ $target . '/include/server/parser/',
+ 'src/backend/parser/','gram.h'
+ );
+ CopySetOfFiles('',[ glob("src\\include\\*.h") ],$target . '/include/server/');
+ my $D;
+ opendir($D, 'src/include') || croak "Could not opendir on src/include!\n";
+
+ CopyFiles(
+ 'PL/pgSQL header',
+ $target . '/include/server/',
+ 'src/pl/plpgsql/src/', 'plpgsql.h'
+ );
+
+ # some xcopy progs don't like mixed slash style paths
+ (my $ctarget = $target) =~ s!/!\\!g;
+ while (my $d = readdir($D))
+ {
+ next if ($d =~ /^\./);
+ next if ($d eq '.git');
+ next if ($d eq 'CVS');
+ next unless (-d "src/include/$d");
+
+ EnsureDirectories("$target/include/server/$d");
+ system(
+qq{xcopy /s /i /q /r /y src\\include\\$d\\*.h "$ctarget\\include\\server\\$d\\"}
+ )&& croak("Failed to copy include directory $d\n");
+ }
+ closedir($D);
+
+ my $mf = read_file('src/interfaces/ecpg/include/Makefile');
+ $mf =~ s{\\s*[\r\n]+}{}mg;
+ $mf =~ /^ecpg_headers\s*=\s*(.*)$/m || croak "Could not find ecpg_headers line\n";
+ CopyFiles(
+ 'ECPG headers',
+ $target . '/include/',
+ 'src/interfaces/ecpg/include/',
+ 'ecpg_config.h', split /\s+/,$1
+ );
+ $mf =~ /^informix_headers\s*=\s*(.*)$/m || croak "Could not find informix_headers line\n";
+ EnsureDirectories($target . '/include', 'informix', 'informix/esql');
+ CopyFiles(
+ 'ECPG informix headers',
+ $target .'/include/informix/esql/',
+ 'src/interfaces/ecpg/include/',
+ split /\s+/,$1
+ );
}
sub GenerateNLSFiles
{
- my $target = shift;
- my $nlspath = shift;
- my $majorver = shift;
-
- print "Installing NLS files...";
- EnsureDirectories($target, "share/locale");
- my @flist;
- File::Find::find(
- {
- wanted =>sub {
- /^nls\.mk\z/s
- &&!push(@flist, $File::Find::name);
- }
- },
- "src"
- );
- foreach (@flist)
- {
- my $prgm = DetermineCatalogName($_);
- s/nls.mk/po/;
- my $dir = $_;
- next unless ($dir =~ /([^\/]+)\/po$/);
- foreach (glob("$dir/*.po"))
- {
- my $lang;
- next unless /([^\/]+)\.po/;
- $lang = $1;
-
- EnsureDirectories($target, "share/locale/$lang", "share/locale/$lang/LC_MESSAGES");
- system(
+ my $target = shift;
+ my $nlspath = shift;
+ my $majorver = shift;
+
+ print "Installing NLS files...";
+ EnsureDirectories($target, "share/locale");
+ my @flist;
+ File::Find::find(
+ {
+ wanted =>sub {
+ /^nls\.mk\z/s
+ &&!push(@flist, $File::Find::name);
+ }
+ },
+ "src"
+ );
+ foreach (@flist)
+ {
+ my $prgm = DetermineCatalogName($_);
+ s/nls.mk/po/;
+ my $dir = $_;
+ next unless ($dir =~ /([^\/]+)\/po$/);
+ foreach (glob("$dir/*.po"))
+ {
+ my $lang;
+ next unless /([^\/]+)\.po/;
+ $lang = $1;
+
+ EnsureDirectories($target, "share/locale/$lang",
+ "share/locale/$lang/LC_MESSAGES");
+ system(
"\"$nlspath\\bin\\msgfmt\" -o \"$target\\share\\locale\\$lang\\LC_MESSAGES\\$prgm-$majorver.mo\" $_"
- )&& croak("Could not run msgfmt on $dir\\$_");
- print ".";
- }
- }
- print "\n";
+ )&& croak("Could not run msgfmt on $dir\\$_");
+ print ".";
+ }
+ }
+ print "\n";
}
sub DetermineMajorVersion
{
- my $f = read_file('src/include/pg_config.h') || croak 'Could not open pg_config.h';
- $f =~ /^#define\s+PG_MAJORVERSION\s+"([^"]+)"/m || croak 'Could not determine major version';
- return $1;
+ my $f = read_file('src/include/pg_config.h') || croak 'Could not open pg_config.h';
+ $f =~ /^#define\s+PG_MAJORVERSION\s+"([^"]+)"/m
+ || croak 'Could not determine major version';
+ return $1;
}
sub DetermineCatalogName
{
- my $filename = shift;
+ my $filename = shift;
- my $f = read_file($filename) || croak "Could not open $filename";
- $f =~ /CATALOG_NAME\s*\:?=\s*(\S+)/m || croak "Could not determine catalog name in $filename";
- return $1;
+ my $f = read_file($filename) || croak "Could not open $filename";
+ $f =~ /CATALOG_NAME\s*\:?=\s*(\S+)/m
+ || croak "Could not determine catalog name in $filename";
+ return $1;
}
sub read_file
{
- my $filename = shift;
- my $F;
- my $t = $/;
+ my $filename = shift;
+ my $F;
+ my $t = $/;
- undef $/;
- open($F, $filename) || die "Could not open file $filename\n";
- my $txt = <$F>;
- close($F);
- $/ = $t;
+ undef $/;
+ open($F, $filename) || die "Could not open file $filename\n";
+ my $txt = <$F>;
+ close($F);
+ $/ = $t;
- return $txt;
+ return $txt;
}
1;
diff --git a/src/tools/msvc/MSBuildProject.pm b/src/tools/msvc/MSBuildProject.pm
index fcce9ebb68..4e6ea1f740 100644
--- a/src/tools/msvc/MSBuildProject.pm
+++ b/src/tools/msvc/MSBuildProject.pm
@@ -13,158 +13,159 @@ use base qw(Project);
sub _new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{filenameExtension} = '.vcxproj';
+ $self->{filenameExtension} = '.vcxproj';
- return $self;
+ return $self;
}
sub WriteHeader
{
- my ($self, $f) = @_;
+ my ($self, $f) = @_;
- print $f <<EOF;
+ print $f <<EOF;
<?xml version="1.0" encoding="Windows-1252"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
EOF
- $self->WriteConfigurationHeader($f, 'Debug');
- $self->WriteConfigurationHeader($f, 'Release');
- print $f <<EOF;
+ $self->WriteConfigurationHeader($f, 'Debug');
+ $self->WriteConfigurationHeader($f, 'Release');
+ print $f <<EOF;
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>$self->{guid}</ProjectGuid>
</PropertyGroup>
<Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.Default.props" />
EOF
- $self->WriteConfigurationPropertyGroup($f, 'Release',{ wholeopt=>'false' });
- $self->WriteConfigurationPropertyGroup($f, 'Debug',{ wholeopt=>'false' });
- print $f <<EOF;
+ $self->WriteConfigurationPropertyGroup($f, 'Release',{wholeopt=>'false'});
+ $self->WriteConfigurationPropertyGroup($f, 'Debug',{wholeopt=>'false'});
+ print $f <<EOF;
<Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
EOF
- $self->WritePropertySheetsPropertyGroup($f, 'Release');
- $self->WritePropertySheetsPropertyGroup($f, 'Debug');
- print $f <<EOF;
+ $self->WritePropertySheetsPropertyGroup($f, 'Release');
+ $self->WritePropertySheetsPropertyGroup($f, 'Debug');
+ print $f <<EOF;
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>
EOF
- $self->WriteAdditionalProperties($f, 'Debug');
- $self->WriteAdditionalProperties($f, 'Release');
- print $f <<EOF;
+ $self->WriteAdditionalProperties($f, 'Debug');
+ $self->WriteAdditionalProperties($f, 'Release');
+ print $f <<EOF;
</PropertyGroup>
EOF
- $self->WriteItemDefinitionGroup(
- $f, 'Debug',
- {
- defs=>'_DEBUG;DEBUG=1;',
- opt=>'Disabled',
- strpool=>'false',
- runtime=>'MultiThreadedDebugDLL'
- }
- );
- $self->WriteItemDefinitionGroup($f, 'Release',
- { defs=>'', opt=>'Full', strpool=>'true', runtime=>'MultiThreadedDLL' });
+ $self->WriteItemDefinitionGroup(
+ $f, 'Debug',
+ {
+ defs=>'_DEBUG;DEBUG=1;',
+ opt=>'Disabled',
+ strpool=>'false',
+ runtime=>'MultiThreadedDebugDLL'
+ }
+ );
+ $self->WriteItemDefinitionGroup($f, 'Release',
+ {defs=>'', opt=>'Full', strpool=>'true', runtime=>'MultiThreadedDLL'});
}
sub AddDefine
{
- my ($self, $def) = @_;
+ my ($self, $def) = @_;
- $self->{defines} .= $def . ';';
+ $self->{defines} .= $def . ';';
}
sub WriteReferences
{
- my ($self, $f) = @_;
+ my ($self, $f) = @_;
- my @references = @{$self->{references}};
+ my @references = @{$self->{references}};
- if (scalar(@references))
- {
- print $f <<EOF;
+ if (scalar(@references))
+ {
+ print $f <<EOF;
<ItemGroup>
EOF
- foreach my $ref (@references)
- {
- print $f <<EOF;
+ foreach my $ref (@references)
+ {
+ print $f <<EOF;
<ProjectReference Include="$ref->{name}$ref->{filenameExtension}">
<Project>$ref->{guid}</Project>
</ProjectReference>
EOF
- }
- print $f <<EOF;
+ }
+ print $f <<EOF;
</ItemGroup>
EOF
- }
+ }
}
sub WriteFiles
{
- my ($self, $f) = @_;
- print $f <<EOF;
+ my ($self, $f) = @_;
+ print $f <<EOF;
<ItemGroup>
EOF
- my @grammarFiles = ();
- my @resourceFiles = ();
- my %uniquefiles;
- foreach my $fileNameWithPath (sort keys %{ $self->{files} })
- {
- confess "Bad format filename '$fileNameWithPath'\n"
- unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
- my $dir = $1;
- my $fileName = $2;
- if ($fileNameWithPath =~ /\.y$/ or $fileNameWithPath =~ /\.l$/)
- {
- push @grammarFiles, $fileNameWithPath;
- }
- elsif ($fileNameWithPath =~ /\.rc$/)
- {
- push @resourceFiles, $fileNameWithPath;
- }
- elsif (defined($uniquefiles{$fileName}))
- {
-
- # File already exists, so fake a new name
- my $obj = $dir;
- $obj =~ s/\\/_/g;
-
- print $f <<EOF;
+ my @grammarFiles = ();
+ my @resourceFiles = ();
+ my %uniquefiles;
+ foreach my $fileNameWithPath (sort keys %{$self->{files}})
+ {
+ confess "Bad format filename '$fileNameWithPath'\n"
+ unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
+ my $dir = $1;
+ my $fileName = $2;
+ if ($fileNameWithPath =~ /\.y$/ or $fileNameWithPath =~ /\.l$/)
+ {
+ push @grammarFiles, $fileNameWithPath;
+ }
+ elsif ($fileNameWithPath =~ /\.rc$/)
+ {
+ push @resourceFiles, $fileNameWithPath;
+ }
+ elsif (defined($uniquefiles{$fileName}))
+ {
+
+ # File already exists, so fake a new name
+ my $obj = $dir;
+ $obj =~ s/\\/_/g;
+
+ print $f <<EOF;
<ClCompile Include="$fileNameWithPath">
<ObjectFileName Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">.\\debug\\$self->{name}\\${obj}_$fileName.obj</ObjectFileName>
<ObjectFileName Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">.\\release\\$self->{name}\\${obj}_$fileName.obj</ObjectFileName>
</ClCompile>
EOF
- }
- else
- {
- $uniquefiles{$fileName} = 1;
- print $f <<EOF;
+ }
+ else
+ {
+ $uniquefiles{$fileName} = 1;
+ print $f <<EOF;
<ClCompile Include="$fileNameWithPath" />
EOF
- }
+ }
- }
- print $f <<EOF;
+ }
+ print $f <<EOF;
</ItemGroup>
EOF
- if (scalar(@grammarFiles))
- {
- print $f <<EOF;
+ if (scalar(@grammarFiles))
+ {
+ print $f <<EOF;
<ItemGroup>
EOF
- foreach my $grammarFile (@grammarFiles)
- {
- (my $outputFile = $grammarFile) =~ s/\.(y|l)$/.c/;
- if ($grammarFile =~ /\.y$/)
- {
- $outputFile =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
- print $f <<EOF;
+ foreach my $grammarFile (@grammarFiles)
+ {
+ (my $outputFile = $grammarFile) =~ s/\.(y|l)$/.c/;
+ if ($grammarFile =~ /\.y$/)
+ {
+ $outputFile =~
+s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
+ print $f <<EOF;
<CustomBuild Include="$grammarFile">
<Message Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">Running bison on $grammarFile</Message>
<Command Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">perl "src\\tools\\msvc\\pgbison.pl" "$grammarFile"</Command>
@@ -176,10 +177,10 @@ EOF
<Outputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
</CustomBuild>
EOF
- }
- else #if ($grammarFile =~ /\.l$/)
- {
- print $f <<EOF;
+ }
+ else #if ($grammarFile =~ /\.l$/)
+ {
+ print $f <<EOF;
<CustomBuild Include="$grammarFile">
<Message Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">Running flex on $grammarFile</Message>
<Command Condition="'\$(Configuration)|\$(Platform)'=='Debug|$self->{platform}'">perl "src\\tools\\msvc\\pgflex.pl" "$grammarFile"</Command>
@@ -191,33 +192,33 @@ EOF
<Outputs Condition="'\$(Configuration)|\$(Platform)'=='Release|$self->{platform}'">$outputFile;%(Outputs)</Outputs>
</CustomBuild>
EOF
- }
- }
- print $f <<EOF;
+ }
+ }
+ print $f <<EOF;
</ItemGroup>
EOF
- }
- if (scalar(@resourceFiles))
- {
- print $f <<EOF;
+ }
+ if (scalar(@resourceFiles))
+ {
+ print $f <<EOF;
<ItemGroup>
EOF
- foreach my $rcFile (@resourceFiles)
- {
- print $f <<EOF;
+ foreach my $rcFile (@resourceFiles)
+ {
+ print $f <<EOF;
<ResourceCompile Include="$rcFile" />
EOF
- }
- print $f <<EOF;
+ }
+ print $f <<EOF;
</ItemGroup>
EOF
- }
+ }
}
sub WriteConfigurationHeader
{
- my ($self, $f, $cfgname) = @_;
- print $f <<EOF;
+ my ($self, $f, $cfgname) = @_;
+ print $f <<EOF;
<ProjectConfiguration Include="$cfgname|$self->{platform}">
<Configuration>$cfgname</Configuration>
<Platform>$self->{platform}</Platform>
@@ -227,13 +228,13 @@ EOF
sub WriteConfigurationPropertyGroup
{
- my ($self, $f, $cfgname, $p) = @_;
- my $cfgtype =
- ($self->{type} eq "exe")
- ?'Application'
- :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
+ my ($self, $f, $cfgname, $p) = @_;
+ my $cfgtype =
+ ($self->{type} eq "exe")
+ ?'Application'
+ :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
- print $f <<EOF;
+ print $f <<EOF;
<PropertyGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'" Label="Configuration">
<ConfigurationType>$cfgtype</ConfigurationType>
<UseOfMfc>false</UseOfMfc>
@@ -245,8 +246,8 @@ EOF
sub WritePropertySheetsPropertyGroup
{
- my ($self, $f, $cfgname) = @_;
- print $f <<EOF;
+ my ($self, $f, $cfgname) = @_;
+ print $f <<EOF;
<ImportGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'" Label="PropertySheets">
<Import Project="\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props" Condition="exists('\$(UserRootDir)\\Microsoft.Cpp.\$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
@@ -255,8 +256,8 @@ EOF
sub WriteAdditionalProperties
{
- my ($self, $f, $cfgname) = @_;
- print $f <<EOF;
+ my ($self, $f, $cfgname) = @_;
+ print $f <<EOF;
<OutDir Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">.\\$cfgname\\$self->{name}\\</OutDir>
<IntDir Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">.\\$cfgname\\$self->{name}\\</IntDir>
<LinkIncremental Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">false</LinkIncremental>
@@ -265,21 +266,21 @@ EOF
sub WriteItemDefinitionGroup
{
- my ($self, $f, $cfgname, $p) = @_;
- my $cfgtype =
- ($self->{type} eq "exe")
- ?'Application'
- :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
- my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ';');
-
- my $targetmachine = $self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64';
-
- my $includes = $self->{includes};
- unless ($includes eq '' or $includes =~ /;$/)
- {
- $includes .= ';';
- }
- print $f <<EOF;
+ my ($self, $f, $cfgname, $p) = @_;
+ my $cfgtype =
+ ($self->{type} eq "exe")
+ ?'Application'
+ :($self->{type} eq "dll"?'DynamicLibrary':'StaticLibrary');
+ my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ';');
+
+ my $targetmachine = $self->{platform} eq 'Win32' ? 'MachineX86' : 'MachineX64';
+
+ my $includes = $self->{includes};
+ unless ($includes eq '' or $includes =~ /;$/)
+ {
+ $includes .= ';';
+ }
+ print $f <<EOF;
<ItemDefinitionGroup Condition="'\$(Configuration)|\$(Platform)'=='$cfgname|$self->{platform}'">
<ClCompile>
<Optimization>$p->{opt}</Optimization>
@@ -314,49 +315,49 @@ sub WriteItemDefinitionGroup
<SubSystem>Console</SubSystem>
<TargetMachine>$targetmachine</TargetMachine>
EOF
- if ($self->{disablelinkerwarnings})
- {
- print $f
+ if ($self->{disablelinkerwarnings})
+ {
+ print $f
" <AdditionalOptions>/ignore:$self->{disablelinkerwarnings} \%(AdditionalOptions)</AdditionalOptions>\n";
- }
- if ($self->{implib})
- {
- my $l = $self->{implib};
- $l =~ s/__CFGNAME__/$cfgname/g;
- print $f " <ImportLibrary>$l</ImportLibrary>\n";
- }
- if ($self->{def})
- {
- my $d = $self->{def};
- $d =~ s/__CFGNAME__/$cfgname/g;
- print $f " <ModuleDefinitionFile>$d</ModuleDefinitionFile>\n";
- }
- print $f <<EOF;
+ }
+ if ($self->{implib})
+ {
+ my $l = $self->{implib};
+ $l =~ s/__CFGNAME__/$cfgname/g;
+ print $f " <ImportLibrary>$l</ImportLibrary>\n";
+ }
+ if ($self->{def})
+ {
+ my $d = $self->{def};
+ $d =~ s/__CFGNAME__/$cfgname/g;
+ print $f " <ModuleDefinitionFile>$d</ModuleDefinitionFile>\n";
+ }
+ print $f <<EOF;
</Link>
<ResourceCompile>
<AdditionalIncludeDirectories>src\\include;\%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
</ResourceCompile>
EOF
- if ($self->{builddef})
- {
- print $f <<EOF;
+ if ($self->{builddef})
+ {
+ print $f <<EOF;
<PreLinkEvent>
<Message>Generate DEF file</Message>
<Command>perl src\\tools\\msvc\\gendef.pl $cfgname\\$self->{name} $self->{platform}</Command>
</PreLinkEvent>
EOF
- }
- print $f <<EOF;
+ }
+ print $f <<EOF;
</ItemDefinitionGroup>
EOF
}
sub Footer
{
- my ($self, $f) = @_;
- $self->WriteReferences($f);
+ my ($self, $f) = @_;
+ $self->WriteReferences($f);
- print $f <<EOF;
+ print $f <<EOF;
<Import Project="\$(VCTargetsPath)\\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
@@ -376,13 +377,13 @@ use base qw(MSBuildProject);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{vcver} = '10.00';
+ $self->{vcver} = '10.00';
- return $self;
+ return $self;
}
1;
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index f0fad43fea..23023e54b7 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -30,488 +30,506 @@ my $libpq;
my $contrib_defines = {'refint' => 'REFINT_VERBOSE'};
my @contrib_uselibpq = ('dblink', 'oid2name', 'pgbench', 'pg_upgrade','vacuumlo');
my @contrib_uselibpgport =(
- 'oid2name', 'pgbench', 'pg_standby','pg_archivecleanup',
- 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'vacuumlo'
+ 'oid2name', 'pgbench', 'pg_standby','pg_archivecleanup',
+ 'pg_test_fsync', 'pg_test_timing', 'pg_upgrade', 'vacuumlo'
);
my $contrib_extralibs = {'pgbench' => ['wsock32.lib']};
my $contrib_extraincludes = {'tsearch2' => ['contrib/tsearch2'], 'dblink' => ['src/backend']};
my $contrib_extrasource = {
- 'cube' => ['cubescan.l','cubeparse.y'],
- 'seg' => ['segscan.l','segparse.y']
+ 'cube' => ['cubescan.l','cubeparse.y'],
+ 'seg' => ['segscan.l','segparse.y']
};
my @contrib_excludes = ('pgcrypto','intagg','sepgsql');
sub mkvcbuild
{
- our $config = shift;
-
- chdir('..\..\..') if (-d '..\msvc' && -d '..\..\..\src');
- die 'Must run from root or msvc directory' unless (-d 'src\tools\msvc' && -d 'src');
-
- my $vsVersion = DetermineVisualStudioVersion();
-
- $solution = CreateSolution($vsVersion, $config);
-
- our @pgportfiles = qw(
- chklocale.c crypt.c fls.c fseeko.c getrusage.c inet_aton.c random.c
- srandom.c getaddrinfo.c gettimeofday.c inet_net_ntop.c kill.c open.c
- erand48.c snprintf.c strlcat.c strlcpy.c dirmod.c exec.c noblock.c path.c
- pgcheckdir.c pg_crc.c pgmkdirp.c pgsleep.c pgstrcasecmp.c qsort.c qsort_arg.c
- sprompt.c thread.c getopt.c getopt_long.c dirent.c rint.c win32env.c
- win32error.c win32setlocale.c);
-
- $libpgport = $solution->AddProject('libpgport','lib','misc');
- $libpgport->AddDefine('FRONTEND');
- $libpgport->AddFiles('src\port',@pgportfiles);
-
- $postgres = $solution->AddProject('postgres','exe','','src\backend');
- $postgres->AddIncludeDir('src\backend');
- $postgres->AddDir('src\backend\port\win32');
- $postgres->AddFile('src\backend\utils\fmgrtab.c');
- $postgres->ReplaceFile('src\backend\port\dynloader.c','src\backend\port\dynloader\win32.c');
- $postgres->ReplaceFile('src\backend\port\pg_sema.c','src\backend\port\win32_sema.c');
- $postgres->ReplaceFile('src\backend\port\pg_shmem.c','src\backend\port\win32_shmem.c');
- $postgres->ReplaceFile('src\backend\port\pg_latch.c','src\backend\port\win32_latch.c');
- $postgres->AddFiles('src\port',@pgportfiles);
- $postgres->AddDir('src\timezone');
- $postgres->AddFiles('src\backend\parser','scan.l','gram.y');
- $postgres->AddFiles('src\backend\bootstrap','bootscanner.l','bootparse.y');
- $postgres->AddFiles('src\backend\utils\misc','guc-file.l');
- $postgres->AddFiles('src\backend\replication', 'repl_scanner.l', 'repl_gram.y');
- $postgres->AddDefine('BUILDING_DLL');
- $postgres->AddLibrary('wsock32.lib');
- $postgres->AddLibrary('ws2_32.lib');
- $postgres->AddLibrary('secur32.lib');
- $postgres->AddLibrary('wldap32.lib') if ($solution->{options}->{ldap});
- $postgres->FullExportDLL('postgres.lib');
-
- my $snowball = $solution->AddProject('dict_snowball','dll','','src\backend\snowball');
- $snowball->RelocateFiles(
- 'src\backend\snowball\libstemmer',
- sub {
- return shift !~ /dict_snowball.c$/;
- }
- );
- $snowball->AddIncludeDir('src\include\snowball');
- $snowball->AddReference($postgres);
-
- my $plpgsql = $solution->AddProject('plpgsql','dll','PLs','src\pl\plpgsql\src');
- $plpgsql->AddFiles('src\pl\plpgsql\src', 'gram.y');
- $plpgsql->AddReference($postgres);
-
- if ($solution->{options}->{perl})
- {
- my $plperlsrc = "src\\pl\\plperl\\";
- my $plperl = $solution->AddProject('plperl','dll','PLs','src\pl\plperl');
- $plperl->AddIncludeDir($solution->{options}->{perl} . '/lib/CORE');
- $plperl->AddDefine('PLPERL_HAVE_UID_GID');
- foreach my $xs ('SPI.xs', 'Util.xs')
- {
- (my $xsc = $xs) =~ s/\.xs/.c/;
- if (Solution::IsNewer("$plperlsrc$xsc","$plperlsrc$xs"))
- {
- my $xsubppdir = first { -e "$_\\ExtUtils\\xsubpp" } @INC;
- print "Building $plperlsrc$xsc...\n";
- system( $solution->{options}->{perl}
- . '/bin/perl '
- . "$xsubppdir/ExtUtils/xsubpp -typemap "
- . $solution->{options}->{perl}
- . '/lib/ExtUtils/typemap '
- . "$plperlsrc$xs "
- . ">$plperlsrc$xsc");
- if ((!(-f "$plperlsrc$xsc")) || -z "$plperlsrc$xsc")
- {
- unlink("$plperlsrc$xsc"); # if zero size
- die "Failed to create $xsc.\n";
- }
- }
- }
- if ( Solution::IsNewer('src\pl\plperl\perlchunks.h','src\pl\plperl\plc_perlboot.pl')
- ||Solution::IsNewer('src\pl\plperl\perlchunks.h','src\pl\plperl\plc_trusted.pl'))
- {
- print 'Building src\pl\plperl\perlchunks.h ...' . "\n";
- my $basedir = getcwd;
- chdir 'src\pl\plperl';
- system( $solution->{options}->{perl}
- . '/bin/perl '
- . 'text2macro.pl '
- . '--strip="^(\#.*|\s*)$$" '
- . 'plc_perlboot.pl plc_trusted.pl '
- . '>perlchunks.h');
- chdir $basedir;
- if ((!(-f 'src\pl\plperl\perlchunks.h')) || -z 'src\pl\plperl\perlchunks.h')
- {
- unlink('src\pl\plperl\perlchunks.h'); # if zero size
- die 'Failed to create perlchunks.h' . "\n";
- }
- }
- if ( Solution::IsNewer('src\pl\plperl\plperl_opmask.h','src\pl\plperl\plperl_opmask.pl'))
- {
- print 'Building src\pl\plperl\plperl_opmask.h ...' . "\n";
- my $basedir = getcwd;
- chdir 'src\pl\plperl';
- system( $solution->{options}->{perl}
- . '/bin/perl '
- . 'plperl_opmask.pl '
- . 'plperl_opmask.h');
- chdir $basedir;
- if ((!(-f 'src\pl\plperl\plperl_opmask.h')) || -z 'src\pl\plperl\plperl_opmask.h')
- {
- unlink('src\pl\plperl\plperl_opmask.h'); # if zero size
- die 'Failed to create plperl_opmask.h' . "\n";
- }
- }
- $plperl->AddReference($postgres);
- my @perl_libs =
- grep {/perl\d+.lib$/ }glob($solution->{options}->{perl} . '\lib\CORE\perl*.lib');
- if (@perl_libs == 1)
- {
- $plperl->AddLibrary($perl_libs[0]);
- }
- else
- {
- die "could not identify perl library version";
- }
- }
-
- if ($solution->{options}->{python})
- {
-
- # Attempt to get python version and location.
- # Assume python.exe in specified dir.
- open(P,
- $solution->{options}->{python}
- . "\\python -c \"import sys;print(sys.prefix);print(str(sys.version_info[0])+str(sys.version_info[1]))\" |"
- ) || die "Could not query for python version!\n";
- my $pyprefix = <P>;
- chomp($pyprefix);
- my $pyver = <P>;
- chomp($pyver);
- close(P);
-
- # Sometimes (always?) if python is not present, the execution
- # appears to work, but gives no data...
- die "Failed to query python for version information\n"
- if (!(defined($pyprefix) && defined($pyver)));
-
- my $pymajorver = substr($pyver, 0, 1);
- my $plpython =
- $solution->AddProject('plpython' . $pymajorver, 'dll','PLs', 'src\pl\plpython');
- $plpython->AddIncludeDir($pyprefix . '\include');
- $plpython->AddLibrary($pyprefix . "\\Libs\\python$pyver.lib");
- $plpython->AddReference($postgres);
- }
-
- if ($solution->{options}->{tcl})
- {
- my $pltcl = $solution->AddProject('pltcl','dll','PLs','src\pl\tcl');
- $pltcl->AddIncludeDir($solution->{options}->{tcl} . '\include');
- $pltcl->AddReference($postgres);
- if (-e $solution->{options}->{tcl} . '\lib\tcl85.lib')
- {
- $pltcl->AddLibrary($solution->{options}->{tcl} . '\lib\tcl85.lib');
- }
- else
- {
- $pltcl->AddLibrary($solution->{options}->{tcl} . '\lib\tcl84.lib');
- }
- }
-
- $libpq = $solution->AddProject('libpq','dll','interfaces','src\interfaces\libpq');
- $libpq->AddDefine('FRONTEND');
- $libpq->AddDefine('UNSAFE_STAT_OK');
- $libpq->AddIncludeDir('src\port');
- $libpq->AddLibrary('wsock32.lib');
- $libpq->AddLibrary('secur32.lib');
- $libpq->AddLibrary('ws2_32.lib');
- $libpq->AddLibrary('wldap32.lib') if ($solution->{options}->{ldap});
- $libpq->UseDef('src\interfaces\libpq\libpqdll.def');
- $libpq->ReplaceFile('src\interfaces\libpq\libpqrc.c','src\interfaces\libpq\libpq.rc');
- $libpq->AddReference($libpgport);
-
- my $libpqwalreceiver = $solution->AddProject('libpqwalreceiver', 'dll', '',
- 'src\backend\replication\libpqwalreceiver');
- $libpqwalreceiver->AddIncludeDir('src\interfaces\libpq');
- $libpqwalreceiver->AddReference($postgres,$libpq);
-
- my $pgtypes =
- $solution->AddProject('libpgtypes','dll','interfaces','src\interfaces\ecpg\pgtypeslib');
- $pgtypes->AddDefine('FRONTEND');
- $pgtypes->AddReference($libpgport);
- $pgtypes->UseDef('src\interfaces\ecpg\pgtypeslib\pgtypeslib.def');
- $pgtypes->AddIncludeDir('src\interfaces\ecpg\include');
-
- my $libecpg =$solution->AddProject('libecpg','dll','interfaces','src\interfaces\ecpg\ecpglib');
- $libecpg->AddDefine('FRONTEND');
- $libecpg->AddIncludeDir('src\interfaces\ecpg\include');
- $libecpg->AddIncludeDir('src\interfaces\libpq');
- $libecpg->AddIncludeDir('src\port');
- $libecpg->UseDef('src\interfaces\ecpg\ecpglib\ecpglib.def');
- $libecpg->AddLibrary('wsock32.lib');
- $libecpg->AddReference($libpq,$pgtypes,$libpgport);
-
- my $libecpgcompat =
- $solution->AddProject('libecpg_compat','dll','interfaces','src\interfaces\ecpg\compatlib');
- $libecpgcompat->AddIncludeDir('src\interfaces\ecpg\include');
- $libecpgcompat->AddIncludeDir('src\interfaces\libpq');
- $libecpgcompat->UseDef('src\interfaces\ecpg\compatlib\compatlib.def');
- $libecpgcompat->AddReference($pgtypes,$libecpg,$libpgport);
-
- my $ecpg = $solution->AddProject('ecpg','exe','interfaces','src\interfaces\ecpg\preproc');
- $ecpg->AddIncludeDir('src\interfaces\ecpg\include');
- $ecpg->AddIncludeDir('src\interfaces\libpq');
- $ecpg->AddPrefixInclude('src\interfaces\ecpg\preproc');
- $ecpg->AddFiles('src\interfaces\ecpg\preproc','pgc.l','preproc.y');
- $ecpg->AddDefine('MAJOR_VERSION=4');
- $ecpg->AddDefine('MINOR_VERSION=2');
- $ecpg->AddDefine('PATCHLEVEL=1');
- $ecpg->AddDefine('ECPG_COMPILE');
- $ecpg->AddReference($libpgport);
-
- my $pgregress_ecpg = $solution->AddProject('pg_regress_ecpg','exe','misc');
- $pgregress_ecpg->AddFile('src\interfaces\ecpg\test\pg_regress_ecpg.c');
- $pgregress_ecpg->AddFile('src\test\regress\pg_regress.c');
- $pgregress_ecpg->AddIncludeDir('src\port');
- $pgregress_ecpg->AddIncludeDir('src\test\regress');
- $pgregress_ecpg->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
- $pgregress_ecpg->AddDefine('FRONTEND');
- $pgregress_ecpg->AddReference($libpgport);
-
- my $isolation_tester = $solution->AddProject('isolationtester','exe','misc');
- $isolation_tester->AddFile('src\test\isolation\isolationtester.c');
- $isolation_tester->AddFile('src\test\isolation\specparse.y');
- $isolation_tester->AddFile('src\test\isolation\specscanner.l');
- $isolation_tester->AddFile('src\test\isolation\specparse.c');
- $isolation_tester->AddIncludeDir('src\test\isolation');
- $isolation_tester->AddIncludeDir('src\port');
- $isolation_tester->AddIncludeDir('src\test\regress');
- $isolation_tester->AddIncludeDir('src\interfaces\libpq');
- $isolation_tester->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
- $isolation_tester->AddDefine('FRONTEND');
- $isolation_tester->AddLibrary('wsock32.lib');
- $isolation_tester->AddReference($libpq, $libpgport);
-
- my $pgregress_isolation = $solution->AddProject('pg_isolation_regress','exe','misc');
- $pgregress_isolation->AddFile('src\test\isolation\isolation_main.c');
- $pgregress_isolation->AddFile('src\test\regress\pg_regress.c');
- $pgregress_isolation->AddIncludeDir('src\port');
- $pgregress_isolation->AddIncludeDir('src\test\regress');
- $pgregress_isolation->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
- $pgregress_isolation->AddDefine('FRONTEND');
- $pgregress_isolation->AddReference($libpgport);
-
- # src/bin
- my $initdb = AddSimpleFrontend('initdb');
- $initdb->AddIncludeDir('src\interfaces\libpq');
- $initdb->AddIncludeDir('src\timezone');
- $initdb->AddDefine('FRONTEND');
- $initdb->AddLibrary('wsock32.lib');
- $initdb->AddLibrary('ws2_32.lib');
-
- my $pgbasebackup = AddSimpleFrontend('pg_basebackup', 1);
- $pgbasebackup->AddFile('src\bin\pg_basebackup\pg_basebackup.c');
- $pgbasebackup->AddLibrary('ws2_32.lib');
-
- my $pgreceivexlog = AddSimpleFrontend('pg_basebackup', 1);
- $pgreceivexlog->{name} = 'pg_receivexlog';
- $pgreceivexlog->AddFile('src\bin\pg_basebackup\pg_receivexlog.c');
- $pgreceivexlog->AddLibrary('ws2_32.lib');
-
- my $pgconfig = AddSimpleFrontend('pg_config');
-
- my $pgcontrol = AddSimpleFrontend('pg_controldata');
-
- my $pgctl = AddSimpleFrontend('pg_ctl', 1);
-
- my $pgreset = AddSimpleFrontend('pg_resetxlog');
-
- my $pgevent = $solution->AddProject('pgevent','dll','bin');
- $pgevent->AddFiles('src\bin\pgevent','pgevent.c','pgmsgevent.rc');
- $pgevent->AddResourceFile('src\bin\pgevent','Eventlog message formatter');
- $pgevent->RemoveFile('src\bin\pgevent\win32ver.rc');
- $pgevent->UseDef('src\bin\pgevent\pgevent.def');
- $pgevent->DisableLinkerWarnings('4104');
-
- my $psql = AddSimpleFrontend('psql', 1);
- $psql->AddIncludeDir('src\bin\pg_dump');
- $psql->AddIncludeDir('src\backend');
- $psql->AddFile('src\bin\psql\psqlscan.l');
-
- my $pgdump = AddSimpleFrontend('pg_dump', 1);
- $pgdump->AddIncludeDir('src\backend');
- $pgdump->AddFile('src\bin\pg_dump\pg_dump.c');
- $pgdump->AddFile('src\bin\pg_dump\common.c');
- $pgdump->AddFile('src\bin\pg_dump\pg_dump_sort.c');
- $pgdump->AddFile('src\bin\pg_dump\keywords.c');
- $pgdump->AddFile('src\backend\parser\kwlookup.c');
-
- my $pgdumpall = AddSimpleFrontend('pg_dump', 1);
-
- # pg_dumpall doesn't use the files in the Makefile's $(OBJS), unlike
- # pg_dump and pg_restore.
- # So remove their sources from the object, keeping the other setup that
- # AddSimpleFrontend() has done.
- my @nodumpall = grep { m/src\\bin\\pg_dump\\.*\.c$/ }
- keys %{$pgdumpall->{files}};
- delete @{$pgdumpall->{files}}{@nodumpall};
- $pgdumpall->{name} = 'pg_dumpall';
- $pgdumpall->AddIncludeDir('src\backend');
- $pgdumpall->AddFile('src\bin\pg_dump\pg_dumpall.c');
- $pgdumpall->AddFile('src\bin\pg_dump\dumputils.c');
- $pgdumpall->AddFile('src\bin\pg_dump\dumpmem.c');
- $pgdumpall->AddFile('src\bin\pg_dump\keywords.c');
- $pgdumpall->AddFile('src\backend\parser\kwlookup.c');
-
- my $pgrestore = AddSimpleFrontend('pg_dump', 1);
- $pgrestore->{name} = 'pg_restore';
- $pgrestore->AddIncludeDir('src\backend');
- $pgrestore->AddFile('src\bin\pg_dump\pg_restore.c');
- $pgrestore->AddFile('src\bin\pg_dump\keywords.c');
- $pgrestore->AddFile('src\backend\parser\kwlookup.c');
-
- my $zic = $solution->AddProject('zic','exe','utils');
- $zic->AddFiles('src\timezone','zic.c','ialloc.c','scheck.c','localtime.c');
- $zic->AddReference($libpgport);
-
- if ($solution->{options}->{xml})
- {
- $contrib_extraincludes->{'pgxml'} = [
- $solution->{options}->{xml} . '\include',
- $solution->{options}->{xslt} . '\include',
- $solution->{options}->{iconv} . '\include'
- ];
-
- $contrib_extralibs->{'pgxml'} = [
- $solution->{options}->{xml} . '\lib\libxml2.lib',
- $solution->{options}->{xslt} . '\lib\libxslt.lib'
- ];
- }
- else
- {
- push @contrib_excludes,'xml2';
- }
-
- if (!$solution->{options}->{openssl})
- {
- push @contrib_excludes,'sslinfo';
- }
-
- if ($solution->{options}->{uuid})
- {
- $contrib_extraincludes->{'uuid-ossp'} = [ $solution->{options}->{uuid} . '\include' ];
- $contrib_extralibs->{'uuid-ossp'} = [ $solution->{options}->{uuid} . '\lib\uuid.lib' ];
- }
- else
- {
- push @contrib_excludes,'uuid-ossp';
- }
-
- # Pgcrypto makefile too complex to parse....
- my $pgcrypto = $solution->AddProject('pgcrypto','dll','crypto');
- $pgcrypto->AddFiles(
- 'contrib\pgcrypto','pgcrypto.c','px.c','px-hmac.c',
- 'px-crypt.c','crypt-gensalt.c','crypt-blowfish.c','crypt-des.c',
- 'crypt-md5.c','mbuf.c','pgp.c','pgp-armor.c',
- 'pgp-cfb.c','pgp-compress.c','pgp-decrypt.c','pgp-encrypt.c',
- 'pgp-info.c','pgp-mpi.c','pgp-pubdec.c','pgp-pubenc.c',
- 'pgp-pubkey.c','pgp-s2k.c','pgp-pgsql.c'
- );
- if ($solution->{options}->{openssl})
- {
- $pgcrypto->AddFiles('contrib\pgcrypto', 'openssl.c','pgp-mpi-openssl.c');
- }
- else
- {
- $pgcrypto->AddFiles(
- 'contrib\pgcrypto', 'md5.c','sha1.c','sha2.c',
- 'internal.c','internal-sha2.c','blf.c','rijndael.c',
- 'fortuna.c','random.c','pgp-mpi-internal.c','imath.c'
- );
- }
- $pgcrypto->AddReference($postgres);
- $pgcrypto->AddLibrary('wsock32.lib');
- my $mf = Project::read_file('contrib/pgcrypto/Makefile');
- GenerateContribSqlFiles('pgcrypto', $mf);
-
- my $D;
- opendir($D, 'contrib') || croak "Could not opendir on contrib!\n";
- while (my $d = readdir($D))
- {
- next if ($d =~ /^\./);
- next unless (-f "contrib/$d/Makefile");
- next if (grep {/^$d$/} @contrib_excludes);
- AddContrib($d);
- }
- closedir($D);
-
- $mf = Project::read_file('src\backend\utils\mb\conversion_procs\Makefile');
- $mf =~ s{\\s*[\r\n]+}{}mg;
- $mf =~ m{SUBDIRS\s*=\s*(.*)$}m || die 'Could not match in conversion makefile' . "\n";
- foreach my $sub (split /\s+/,$1)
- {
- my $mf = Project::read_file('src\backend\utils\mb\conversion_procs\\' . $sub . '\Makefile');
- my $p = $solution->AddProject($sub, 'dll', 'conversion procs');
- $p->AddFile('src\backend\utils\mb\conversion_procs\\' . $sub . '\\' . $sub . '.c');
- if ($mf =~ m{^SRCS\s*\+=\s*(.*)$}m)
- {
- $p->AddFile('src\backend\utils\mb\conversion_procs\\' . $sub . '\\' . $1);
- }
- $p->AddReference($postgres);
- }
-
- $mf = Project::read_file('src\bin\scripts\Makefile');
- $mf =~ s{\\s*[\r\n]+}{}mg;
- $mf =~ m{PROGRAMS\s*=\s*(.*)$}m || die 'Could not match in bin\scripts\Makefile' . "\n";
- foreach my $prg (split /\s+/,$1)
- {
- my $proj = $solution->AddProject($prg,'exe','bin');
- $mf =~ m{$prg\s*:\s*(.*)$}m || die 'Could not find script define for $prg' . "\n";
- my @files = split /\s+/,$1;
- foreach my $f (@files)
- {
- $f =~ s/\.o$/\.c/;
- if ($f eq 'keywords.c')
- {
- $proj->AddFile('src\bin\pg_dump\keywords.c');
- }
- elsif ($f eq 'kwlookup.c')
- {
- $proj->AddFile('src\backend\parser\kwlookup.c');
- }
- elsif ($f eq 'dumputils.c')
- {
- $proj->AddFile('src\bin\pg_dump\dumputils.c');
- }
- elsif ($f =~ /print\.c$/)
- { # Also catches mbprint.c
- $proj->AddFile('src\bin\psql\\' . $f);
- }
- elsif ($f =~ /\.c$/)
- {
- $proj->AddFile('src\bin\scripts\\' . $f);
- }
- }
- $proj->AddIncludeDir('src\interfaces\libpq');
- $proj->AddIncludeDir('src\bin\pg_dump');
- $proj->AddIncludeDir('src\bin\psql');
- $proj->AddReference($libpq,$libpgport);
- $proj->AddResourceFile('src\bin\scripts','PostgreSQL Utility');
- }
-
- # Regression DLL and EXE
- my $regress = $solution->AddProject('regress','dll','misc');
- $regress->AddFile('src\test\regress\regress.c');
- $regress->AddReference($postgres);
-
- my $pgregress = $solution->AddProject('pg_regress','exe','misc');
- $pgregress->AddFile('src\test\regress\pg_regress.c');
- $pgregress->AddFile('src\test\regress\pg_regress_main.c');
- $pgregress->AddIncludeDir('src\port');
- $pgregress->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
- $pgregress->AddReference($libpgport);
-
- $solution->Save();
- return $solution->{vcver};
+ our $config = shift;
+
+ chdir('..\..\..') if (-d '..\msvc' && -d '..\..\..\src');
+ die 'Must run from root or msvc directory' unless (-d 'src\tools\msvc' && -d 'src');
+
+ my $vsVersion = DetermineVisualStudioVersion();
+
+ $solution = CreateSolution($vsVersion, $config);
+
+ our @pgportfiles = qw(
+ chklocale.c crypt.c fls.c fseeko.c getrusage.c inet_aton.c random.c
+ srandom.c getaddrinfo.c gettimeofday.c inet_net_ntop.c kill.c open.c
+ erand48.c snprintf.c strlcat.c strlcpy.c dirmod.c exec.c noblock.c path.c
+ pgcheckdir.c pg_crc.c pgmkdirp.c pgsleep.c pgstrcasecmp.c qsort.c qsort_arg.c
+ sprompt.c thread.c getopt.c getopt_long.c dirent.c rint.c win32env.c
+ win32error.c win32setlocale.c);
+
+ $libpgport = $solution->AddProject('libpgport','lib','misc');
+ $libpgport->AddDefine('FRONTEND');
+ $libpgport->AddFiles('src\port',@pgportfiles);
+
+ $postgres = $solution->AddProject('postgres','exe','','src\backend');
+ $postgres->AddIncludeDir('src\backend');
+ $postgres->AddDir('src\backend\port\win32');
+ $postgres->AddFile('src\backend\utils\fmgrtab.c');
+ $postgres->ReplaceFile('src\backend\port\dynloader.c','src\backend\port\dynloader\win32.c');
+ $postgres->ReplaceFile('src\backend\port\pg_sema.c','src\backend\port\win32_sema.c');
+ $postgres->ReplaceFile('src\backend\port\pg_shmem.c','src\backend\port\win32_shmem.c');
+ $postgres->ReplaceFile('src\backend\port\pg_latch.c','src\backend\port\win32_latch.c');
+ $postgres->AddFiles('src\port',@pgportfiles);
+ $postgres->AddDir('src\timezone');
+ $postgres->AddFiles('src\backend\parser','scan.l','gram.y');
+ $postgres->AddFiles('src\backend\bootstrap','bootscanner.l','bootparse.y');
+ $postgres->AddFiles('src\backend\utils\misc','guc-file.l');
+ $postgres->AddFiles('src\backend\replication', 'repl_scanner.l', 'repl_gram.y');
+ $postgres->AddDefine('BUILDING_DLL');
+ $postgres->AddLibrary('wsock32.lib');
+ $postgres->AddLibrary('ws2_32.lib');
+ $postgres->AddLibrary('secur32.lib');
+ $postgres->AddLibrary('wldap32.lib') if ($solution->{options}->{ldap});
+ $postgres->FullExportDLL('postgres.lib');
+
+ my $snowball = $solution->AddProject('dict_snowball','dll','','src\backend\snowball');
+ $snowball->RelocateFiles(
+ 'src\backend\snowball\libstemmer',
+ sub {
+ return shift !~ /dict_snowball.c$/;
+ }
+ );
+ $snowball->AddIncludeDir('src\include\snowball');
+ $snowball->AddReference($postgres);
+
+ my $plpgsql = $solution->AddProject('plpgsql','dll','PLs','src\pl\plpgsql\src');
+ $plpgsql->AddFiles('src\pl\plpgsql\src', 'gram.y');
+ $plpgsql->AddReference($postgres);
+
+ if ($solution->{options}->{perl})
+ {
+ my $plperlsrc = "src\\pl\\plperl\\";
+ my $plperl = $solution->AddProject('plperl','dll','PLs','src\pl\plperl');
+ $plperl->AddIncludeDir($solution->{options}->{perl} . '/lib/CORE');
+ $plperl->AddDefine('PLPERL_HAVE_UID_GID');
+ foreach my $xs ('SPI.xs', 'Util.xs')
+ {
+ (my $xsc = $xs) =~ s/\.xs/.c/;
+ if (Solution::IsNewer("$plperlsrc$xsc","$plperlsrc$xs"))
+ {
+ my $xsubppdir = first { -e "$_\\ExtUtils\\xsubpp" } @INC;
+ print "Building $plperlsrc$xsc...\n";
+ system( $solution->{options}->{perl}
+ . '/bin/perl '
+ . "$xsubppdir/ExtUtils/xsubpp -typemap "
+ . $solution->{options}->{perl}
+ . '/lib/ExtUtils/typemap '
+ . "$plperlsrc$xs "
+ . ">$plperlsrc$xsc");
+ if ((!(-f "$plperlsrc$xsc")) || -z "$plperlsrc$xsc")
+ {
+ unlink("$plperlsrc$xsc"); # if zero size
+ die "Failed to create $xsc.\n";
+ }
+ }
+ }
+ if (
+ Solution::IsNewer('src\pl\plperl\perlchunks.h',
+ 'src\pl\plperl\plc_perlboot.pl')
+ ||Solution::IsNewer(
+ 'src\pl\plperl\perlchunks.h','src\pl\plperl\plc_trusted.pl'
+ )
+ )
+ {
+ print 'Building src\pl\plperl\perlchunks.h ...' . "\n";
+ my $basedir = getcwd;
+ chdir 'src\pl\plperl';
+ system( $solution->{options}->{perl}
+ . '/bin/perl '
+ . 'text2macro.pl '
+ . '--strip="^(\#.*|\s*)$$" '
+ . 'plc_perlboot.pl plc_trusted.pl '
+ . '>perlchunks.h');
+ chdir $basedir;
+ if ((!(-f 'src\pl\plperl\perlchunks.h')) || -z 'src\pl\plperl\perlchunks.h')
+ {
+ unlink('src\pl\plperl\perlchunks.h'); # if zero size
+ die 'Failed to create perlchunks.h' . "\n";
+ }
+ }
+ if (
+ Solution::IsNewer(
+ 'src\pl\plperl\plperl_opmask.h',
+ 'src\pl\plperl\plperl_opmask.pl'
+ )
+ )
+ {
+ print 'Building src\pl\plperl\plperl_opmask.h ...' . "\n";
+ my $basedir = getcwd;
+ chdir 'src\pl\plperl';
+ system( $solution->{options}->{perl}
+ . '/bin/perl '
+ . 'plperl_opmask.pl '
+ . 'plperl_opmask.h');
+ chdir $basedir;
+ if ((!(-f 'src\pl\plperl\plperl_opmask.h'))
+ || -z 'src\pl\plperl\plperl_opmask.h')
+ {
+ unlink('src\pl\plperl\plperl_opmask.h'); # if zero size
+ die 'Failed to create plperl_opmask.h' . "\n";
+ }
+ }
+ $plperl->AddReference($postgres);
+ my @perl_libs =
+ grep {/perl\d+.lib$/ }glob($solution->{options}->{perl} . '\lib\CORE\perl*.lib');
+ if (@perl_libs == 1)
+ {
+ $plperl->AddLibrary($perl_libs[0]);
+ }
+ else
+ {
+ die "could not identify perl library version";
+ }
+ }
+
+ if ($solution->{options}->{python})
+ {
+
+ # Attempt to get python version and location.
+ # Assume python.exe in specified dir.
+ open(P,
+ $solution->{options}->{python}
+ . "\\python -c \"import sys;print(sys.prefix);print(str(sys.version_info[0])+str(sys.version_info[1]))\" |"
+ ) || die "Could not query for python version!\n";
+ my $pyprefix = <P>;
+ chomp($pyprefix);
+ my $pyver = <P>;
+ chomp($pyver);
+ close(P);
+
+ # Sometimes (always?) if python is not present, the execution
+ # appears to work, but gives no data...
+ die "Failed to query python for version information\n"
+ if (!(defined($pyprefix) && defined($pyver)));
+
+ my $pymajorver = substr($pyver, 0, 1);
+ my $plpython =
+ $solution->AddProject('plpython' . $pymajorver, 'dll','PLs', 'src\pl\plpython');
+ $plpython->AddIncludeDir($pyprefix . '\include');
+ $plpython->AddLibrary($pyprefix . "\\Libs\\python$pyver.lib");
+ $plpython->AddReference($postgres);
+ }
+
+ if ($solution->{options}->{tcl})
+ {
+ my $pltcl = $solution->AddProject('pltcl','dll','PLs','src\pl\tcl');
+ $pltcl->AddIncludeDir($solution->{options}->{tcl} . '\include');
+ $pltcl->AddReference($postgres);
+ if (-e $solution->{options}->{tcl} . '\lib\tcl85.lib')
+ {
+ $pltcl->AddLibrary($solution->{options}->{tcl} . '\lib\tcl85.lib');
+ }
+ else
+ {
+ $pltcl->AddLibrary($solution->{options}->{tcl} . '\lib\tcl84.lib');
+ }
+ }
+
+ $libpq = $solution->AddProject('libpq','dll','interfaces','src\interfaces\libpq');
+ $libpq->AddDefine('FRONTEND');
+ $libpq->AddDefine('UNSAFE_STAT_OK');
+ $libpq->AddIncludeDir('src\port');
+ $libpq->AddLibrary('wsock32.lib');
+ $libpq->AddLibrary('secur32.lib');
+ $libpq->AddLibrary('ws2_32.lib');
+ $libpq->AddLibrary('wldap32.lib') if ($solution->{options}->{ldap});
+ $libpq->UseDef('src\interfaces\libpq\libpqdll.def');
+ $libpq->ReplaceFile('src\interfaces\libpq\libpqrc.c','src\interfaces\libpq\libpq.rc');
+ $libpq->AddReference($libpgport);
+
+ my $libpqwalreceiver = $solution->AddProject('libpqwalreceiver', 'dll', '',
+ 'src\backend\replication\libpqwalreceiver');
+ $libpqwalreceiver->AddIncludeDir('src\interfaces\libpq');
+ $libpqwalreceiver->AddReference($postgres,$libpq);
+
+ my $pgtypes =
+ $solution->AddProject('libpgtypes','dll','interfaces','src\interfaces\ecpg\pgtypeslib');
+ $pgtypes->AddDefine('FRONTEND');
+ $pgtypes->AddReference($libpgport);
+ $pgtypes->UseDef('src\interfaces\ecpg\pgtypeslib\pgtypeslib.def');
+ $pgtypes->AddIncludeDir('src\interfaces\ecpg\include');
+
+ my $libecpg =
+ $solution->AddProject('libecpg','dll','interfaces','src\interfaces\ecpg\ecpglib');
+ $libecpg->AddDefine('FRONTEND');
+ $libecpg->AddIncludeDir('src\interfaces\ecpg\include');
+ $libecpg->AddIncludeDir('src\interfaces\libpq');
+ $libecpg->AddIncludeDir('src\port');
+ $libecpg->UseDef('src\interfaces\ecpg\ecpglib\ecpglib.def');
+ $libecpg->AddLibrary('wsock32.lib');
+ $libecpg->AddReference($libpq,$pgtypes,$libpgport);
+
+ my $libecpgcompat =$solution->AddProject('libecpg_compat','dll','interfaces',
+ 'src\interfaces\ecpg\compatlib');
+ $libecpgcompat->AddIncludeDir('src\interfaces\ecpg\include');
+ $libecpgcompat->AddIncludeDir('src\interfaces\libpq');
+ $libecpgcompat->UseDef('src\interfaces\ecpg\compatlib\compatlib.def');
+ $libecpgcompat->AddReference($pgtypes,$libecpg,$libpgport);
+
+ my $ecpg = $solution->AddProject('ecpg','exe','interfaces','src\interfaces\ecpg\preproc');
+ $ecpg->AddIncludeDir('src\interfaces\ecpg\include');
+ $ecpg->AddIncludeDir('src\interfaces\libpq');
+ $ecpg->AddPrefixInclude('src\interfaces\ecpg\preproc');
+ $ecpg->AddFiles('src\interfaces\ecpg\preproc','pgc.l','preproc.y');
+ $ecpg->AddDefine('MAJOR_VERSION=4');
+ $ecpg->AddDefine('MINOR_VERSION=2');
+ $ecpg->AddDefine('PATCHLEVEL=1');
+ $ecpg->AddDefine('ECPG_COMPILE');
+ $ecpg->AddReference($libpgport);
+
+ my $pgregress_ecpg = $solution->AddProject('pg_regress_ecpg','exe','misc');
+ $pgregress_ecpg->AddFile('src\interfaces\ecpg\test\pg_regress_ecpg.c');
+ $pgregress_ecpg->AddFile('src\test\regress\pg_regress.c');
+ $pgregress_ecpg->AddIncludeDir('src\port');
+ $pgregress_ecpg->AddIncludeDir('src\test\regress');
+ $pgregress_ecpg->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
+ $pgregress_ecpg->AddDefine('FRONTEND');
+ $pgregress_ecpg->AddReference($libpgport);
+
+ my $isolation_tester = $solution->AddProject('isolationtester','exe','misc');
+ $isolation_tester->AddFile('src\test\isolation\isolationtester.c');
+ $isolation_tester->AddFile('src\test\isolation\specparse.y');
+ $isolation_tester->AddFile('src\test\isolation\specscanner.l');
+ $isolation_tester->AddFile('src\test\isolation\specparse.c');
+ $isolation_tester->AddIncludeDir('src\test\isolation');
+ $isolation_tester->AddIncludeDir('src\port');
+ $isolation_tester->AddIncludeDir('src\test\regress');
+ $isolation_tester->AddIncludeDir('src\interfaces\libpq');
+ $isolation_tester->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
+ $isolation_tester->AddDefine('FRONTEND');
+ $isolation_tester->AddLibrary('wsock32.lib');
+ $isolation_tester->AddReference($libpq, $libpgport);
+
+ my $pgregress_isolation = $solution->AddProject('pg_isolation_regress','exe','misc');
+ $pgregress_isolation->AddFile('src\test\isolation\isolation_main.c');
+ $pgregress_isolation->AddFile('src\test\regress\pg_regress.c');
+ $pgregress_isolation->AddIncludeDir('src\port');
+ $pgregress_isolation->AddIncludeDir('src\test\regress');
+ $pgregress_isolation->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
+ $pgregress_isolation->AddDefine('FRONTEND');
+ $pgregress_isolation->AddReference($libpgport);
+
+ # src/bin
+ my $initdb = AddSimpleFrontend('initdb');
+ $initdb->AddIncludeDir('src\interfaces\libpq');
+ $initdb->AddIncludeDir('src\timezone');
+ $initdb->AddDefine('FRONTEND');
+ $initdb->AddLibrary('wsock32.lib');
+ $initdb->AddLibrary('ws2_32.lib');
+
+ my $pgbasebackup = AddSimpleFrontend('pg_basebackup', 1);
+ $pgbasebackup->AddFile('src\bin\pg_basebackup\pg_basebackup.c');
+ $pgbasebackup->AddLibrary('ws2_32.lib');
+
+ my $pgreceivexlog = AddSimpleFrontend('pg_basebackup', 1);
+ $pgreceivexlog->{name} = 'pg_receivexlog';
+ $pgreceivexlog->AddFile('src\bin\pg_basebackup\pg_receivexlog.c');
+ $pgreceivexlog->AddLibrary('ws2_32.lib');
+
+ my $pgconfig = AddSimpleFrontend('pg_config');
+
+ my $pgcontrol = AddSimpleFrontend('pg_controldata');
+
+ my $pgctl = AddSimpleFrontend('pg_ctl', 1);
+
+ my $pgreset = AddSimpleFrontend('pg_resetxlog');
+
+ my $pgevent = $solution->AddProject('pgevent','dll','bin');
+ $pgevent->AddFiles('src\bin\pgevent','pgevent.c','pgmsgevent.rc');
+ $pgevent->AddResourceFile('src\bin\pgevent','Eventlog message formatter');
+ $pgevent->RemoveFile('src\bin\pgevent\win32ver.rc');
+ $pgevent->UseDef('src\bin\pgevent\pgevent.def');
+ $pgevent->DisableLinkerWarnings('4104');
+
+ my $psql = AddSimpleFrontend('psql', 1);
+ $psql->AddIncludeDir('src\bin\pg_dump');
+ $psql->AddIncludeDir('src\backend');
+ $psql->AddFile('src\bin\psql\psqlscan.l');
+
+ my $pgdump = AddSimpleFrontend('pg_dump', 1);
+ $pgdump->AddIncludeDir('src\backend');
+ $pgdump->AddFile('src\bin\pg_dump\pg_dump.c');
+ $pgdump->AddFile('src\bin\pg_dump\common.c');
+ $pgdump->AddFile('src\bin\pg_dump\pg_dump_sort.c');
+ $pgdump->AddFile('src\bin\pg_dump\keywords.c');
+ $pgdump->AddFile('src\backend\parser\kwlookup.c');
+
+ my $pgdumpall = AddSimpleFrontend('pg_dump', 1);
+
+ # pg_dumpall doesn't use the files in the Makefile's $(OBJS), unlike
+ # pg_dump and pg_restore.
+ # So remove their sources from the object, keeping the other setup that
+ # AddSimpleFrontend() has done.
+ my @nodumpall = grep { m/src\\bin\\pg_dump\\.*\.c$/ }
+ keys %{$pgdumpall->{files}};
+ delete @{$pgdumpall->{files}}{@nodumpall};
+ $pgdumpall->{name} = 'pg_dumpall';
+ $pgdumpall->AddIncludeDir('src\backend');
+ $pgdumpall->AddFile('src\bin\pg_dump\pg_dumpall.c');
+ $pgdumpall->AddFile('src\bin\pg_dump\dumputils.c');
+ $pgdumpall->AddFile('src\bin\pg_dump\dumpmem.c');
+ $pgdumpall->AddFile('src\bin\pg_dump\keywords.c');
+ $pgdumpall->AddFile('src\backend\parser\kwlookup.c');
+
+ my $pgrestore = AddSimpleFrontend('pg_dump', 1);
+ $pgrestore->{name} = 'pg_restore';
+ $pgrestore->AddIncludeDir('src\backend');
+ $pgrestore->AddFile('src\bin\pg_dump\pg_restore.c');
+ $pgrestore->AddFile('src\bin\pg_dump\keywords.c');
+ $pgrestore->AddFile('src\backend\parser\kwlookup.c');
+
+ my $zic = $solution->AddProject('zic','exe','utils');
+ $zic->AddFiles('src\timezone','zic.c','ialloc.c','scheck.c','localtime.c');
+ $zic->AddReference($libpgport);
+
+ if ($solution->{options}->{xml})
+ {
+ $contrib_extraincludes->{'pgxml'} = [
+ $solution->{options}->{xml} . '\include',
+ $solution->{options}->{xslt} . '\include',
+ $solution->{options}->{iconv} . '\include'
+ ];
+
+ $contrib_extralibs->{'pgxml'} = [
+ $solution->{options}->{xml} . '\lib\libxml2.lib',
+ $solution->{options}->{xslt} . '\lib\libxslt.lib'
+ ];
+ }
+ else
+ {
+ push @contrib_excludes,'xml2';
+ }
+
+ if (!$solution->{options}->{openssl})
+ {
+ push @contrib_excludes,'sslinfo';
+ }
+
+ if ($solution->{options}->{uuid})
+ {
+ $contrib_extraincludes->{'uuid-ossp'} =
+ [ $solution->{options}->{uuid} . '\include' ];
+ $contrib_extralibs->{'uuid-ossp'} =
+ [ $solution->{options}->{uuid} . '\lib\uuid.lib' ];
+ }
+ else
+ {
+ push @contrib_excludes,'uuid-ossp';
+ }
+
+ # Pgcrypto makefile too complex to parse....
+ my $pgcrypto = $solution->AddProject('pgcrypto','dll','crypto');
+ $pgcrypto->AddFiles(
+ 'contrib\pgcrypto','pgcrypto.c','px.c','px-hmac.c',
+ 'px-crypt.c','crypt-gensalt.c','crypt-blowfish.c','crypt-des.c',
+ 'crypt-md5.c','mbuf.c','pgp.c','pgp-armor.c',
+ 'pgp-cfb.c','pgp-compress.c','pgp-decrypt.c','pgp-encrypt.c',
+ 'pgp-info.c','pgp-mpi.c','pgp-pubdec.c','pgp-pubenc.c',
+ 'pgp-pubkey.c','pgp-s2k.c','pgp-pgsql.c'
+ );
+ if ($solution->{options}->{openssl})
+ {
+ $pgcrypto->AddFiles('contrib\pgcrypto', 'openssl.c','pgp-mpi-openssl.c');
+ }
+ else
+ {
+ $pgcrypto->AddFiles(
+ 'contrib\pgcrypto', 'md5.c',
+ 'sha1.c','sha2.c',
+ 'internal.c','internal-sha2.c',
+ 'blf.c','rijndael.c',
+ 'fortuna.c','random.c',
+ 'pgp-mpi-internal.c','imath.c'
+ );
+ }
+ $pgcrypto->AddReference($postgres);
+ $pgcrypto->AddLibrary('wsock32.lib');
+ my $mf = Project::read_file('contrib/pgcrypto/Makefile');
+ GenerateContribSqlFiles('pgcrypto', $mf);
+
+ my $D;
+ opendir($D, 'contrib') || croak "Could not opendir on contrib!\n";
+ while (my $d = readdir($D))
+ {
+ next if ($d =~ /^\./);
+ next unless (-f "contrib/$d/Makefile");
+ next if (grep {/^$d$/} @contrib_excludes);
+ AddContrib($d);
+ }
+ closedir($D);
+
+ $mf = Project::read_file('src\backend\utils\mb\conversion_procs\Makefile');
+ $mf =~ s{\\s*[\r\n]+}{}mg;
+ $mf =~ m{SUBDIRS\s*=\s*(.*)$}m || die 'Could not match in conversion makefile' . "\n";
+ foreach my $sub (split /\s+/,$1)
+ {
+ my $mf = Project::read_file(
+ 'src\backend\utils\mb\conversion_procs\\' . $sub . '\Makefile');
+ my $p = $solution->AddProject($sub, 'dll', 'conversion procs');
+ $p->AddFile('src\backend\utils\mb\conversion_procs\\' . $sub . '\\' . $sub . '.c');
+ if ($mf =~ m{^SRCS\s*\+=\s*(.*)$}m)
+ {
+ $p->AddFile('src\backend\utils\mb\conversion_procs\\' . $sub . '\\' . $1);
+ }
+ $p->AddReference($postgres);
+ }
+
+ $mf = Project::read_file('src\bin\scripts\Makefile');
+ $mf =~ s{\\s*[\r\n]+}{}mg;
+ $mf =~ m{PROGRAMS\s*=\s*(.*)$}m || die 'Could not match in bin\scripts\Makefile' . "\n";
+ foreach my $prg (split /\s+/,$1)
+ {
+ my $proj = $solution->AddProject($prg,'exe','bin');
+ $mf =~ m{$prg\s*:\s*(.*)$}m || die 'Could not find script define for $prg' . "\n";
+ my @files = split /\s+/,$1;
+ foreach my $f (@files)
+ {
+ $f =~ s/\.o$/\.c/;
+ if ($f eq 'keywords.c')
+ {
+ $proj->AddFile('src\bin\pg_dump\keywords.c');
+ }
+ elsif ($f eq 'kwlookup.c')
+ {
+ $proj->AddFile('src\backend\parser\kwlookup.c');
+ }
+ elsif ($f eq 'dumputils.c')
+ {
+ $proj->AddFile('src\bin\pg_dump\dumputils.c');
+ }
+ elsif ($f =~ /print\.c$/)
+ { # Also catches mbprint.c
+ $proj->AddFile('src\bin\psql\\' . $f);
+ }
+ elsif ($f =~ /\.c$/)
+ {
+ $proj->AddFile('src\bin\scripts\\' . $f);
+ }
+ }
+ $proj->AddIncludeDir('src\interfaces\libpq');
+ $proj->AddIncludeDir('src\bin\pg_dump');
+ $proj->AddIncludeDir('src\bin\psql');
+ $proj->AddReference($libpq,$libpgport);
+ $proj->AddResourceFile('src\bin\scripts','PostgreSQL Utility');
+ }
+
+ # Regression DLL and EXE
+ my $regress = $solution->AddProject('regress','dll','misc');
+ $regress->AddFile('src\test\regress\regress.c');
+ $regress->AddReference($postgres);
+
+ my $pgregress = $solution->AddProject('pg_regress','exe','misc');
+ $pgregress->AddFile('src\test\regress\pg_regress.c');
+ $pgregress->AddFile('src\test\regress\pg_regress_main.c');
+ $pgregress->AddIncludeDir('src\port');
+ $pgregress->AddDefine('HOST_TUPLE="i686-pc-win32vc"');
+ $pgregress->AddReference($libpgport);
+
+ $solution->Save();
+ return $solution->{vcver};
}
#####################
@@ -521,174 +539,178 @@ sub mkvcbuild
# Add a simple frontend project (exe)
sub AddSimpleFrontend
{
- my $n = shift;
- my $uselibpq= shift;
-
- my $p = $solution->AddProject($n,'exe','bin');
- $p->AddDir('src\bin\\' . $n);
- $p->AddReference($libpgport);
- if ($uselibpq)
- {
- $p->AddIncludeDir('src\interfaces\libpq');
- $p->AddReference($libpq);
- }
- return $p;
+ my $n = shift;
+ my $uselibpq= shift;
+
+ my $p = $solution->AddProject($n,'exe','bin');
+ $p->AddDir('src\bin\\' . $n);
+ $p->AddReference($libpgport);
+ if ($uselibpq)
+ {
+ $p->AddIncludeDir('src\interfaces\libpq');
+ $p->AddReference($libpq);
+ }
+ return $p;
}
# Add a simple contrib project
sub AddContrib
{
- my $n = shift;
- my $mf = Project::read_file('contrib\\' . $n . '\Makefile');
-
- if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
- {
- my $dn = $1;
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- my $proj = $solution->AddProject($dn, 'dll', 'contrib');
- $mf =~ /^OBJS\s*=\s*(.*)$/gm || croak "Could not find objects in MODULE_big for $n\n";
- my $objs = $1;
- while ($objs =~ /\b([\w-]+\.o)\b/g)
- {
- my $o = $1;
- $o =~ s/\.o$/.c/;
- $proj->AddFile('contrib\\' . $n . '\\' . $o);
- }
- $proj->AddReference($postgres);
- if ($mf =~ /^SUBDIRS\s*:?=\s*(.*)$/mg)
- {
- foreach my $d (split /\s+/, $1)
- {
- my $mf2 = Project::read_file('contrib\\' . $n . '\\' . $d . '\Makefile');
- $mf2 =~ s{\\\s*[\r\n]+}{}mg;
- $mf2 =~ /^SUBOBJS\s*=\s*(.*)$/gm
- || croak "Could not find objects in MODULE_big for $n, subdir $d\n";
- $objs = $1;
- while ($objs =~ /\b([\w-]+\.o)\b/g)
- {
- my $o = $1;
- $o =~ s/\.o$/.c/;
- $proj->AddFile('contrib\\' . $n . '\\' . $d . '\\' . $o);
- }
- }
- }
- AdjustContribProj($proj);
- }
- elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg)
- {
- foreach my $mod (split /\s+/, $1)
- {
- my $proj = $solution->AddProject($mod, 'dll', 'contrib');
- $proj->AddFile('contrib\\' . $n . '\\' . $mod . '.c');
- $proj->AddReference($postgres);
- AdjustContribProj($proj);
- }
- }
- elsif ($mf =~ /^PROGRAM\s*=\s*(.*)$/mg)
- {
- my $proj = $solution->AddProject($1, 'exe', 'contrib');
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- $mf =~ /^OBJS\s*=\s*(.*)$/gm || croak "Could not find objects in PROGRAM for $n\n";
- my $objs = $1;
- while ($objs =~ /\b([\w-]+\.o)\b/g)
- {
- my $o = $1;
- $o =~ s/\.o$/.c/;
- $proj->AddFile('contrib\\' . $n . '\\' . $o);
- }
- AdjustContribProj($proj);
- }
- else
- {
- croak "Could not determine contrib module type for $n\n";
- }
-
- # Are there any output data files to build?
- GenerateContribSqlFiles($n, $mf);
+ my $n = shift;
+ my $mf = Project::read_file('contrib\\' . $n . '\Makefile');
+
+ if ($mf =~ /^MODULE_big\s*=\s*(.*)$/mg)
+ {
+ my $dn = $1;
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ my $proj = $solution->AddProject($dn, 'dll', 'contrib');
+ $mf =~ /^OBJS\s*=\s*(.*)$/gm
+ || croak "Could not find objects in MODULE_big for $n\n";
+ my $objs = $1;
+ while ($objs =~ /\b([\w-]+\.o)\b/g)
+ {
+ my $o = $1;
+ $o =~ s/\.o$/.c/;
+ $proj->AddFile('contrib\\' . $n . '\\' . $o);
+ }
+ $proj->AddReference($postgres);
+ if ($mf =~ /^SUBDIRS\s*:?=\s*(.*)$/mg)
+ {
+ foreach my $d (split /\s+/, $1)
+ {
+ my $mf2 =
+ Project::read_file('contrib\\' . $n . '\\' . $d . '\Makefile');
+ $mf2 =~ s{\\\s*[\r\n]+}{}mg;
+ $mf2 =~ /^SUBOBJS\s*=\s*(.*)$/gm
+ || croak
+ "Could not find objects in MODULE_big for $n, subdir $d\n";
+ $objs = $1;
+ while ($objs =~ /\b([\w-]+\.o)\b/g)
+ {
+ my $o = $1;
+ $o =~ s/\.o$/.c/;
+ $proj->AddFile('contrib\\' . $n . '\\' . $d . '\\' . $o);
+ }
+ }
+ }
+ AdjustContribProj($proj);
+ }
+ elsif ($mf =~ /^MODULES\s*=\s*(.*)$/mg)
+ {
+ foreach my $mod (split /\s+/, $1)
+ {
+ my $proj = $solution->AddProject($mod, 'dll', 'contrib');
+ $proj->AddFile('contrib\\' . $n . '\\' . $mod . '.c');
+ $proj->AddReference($postgres);
+ AdjustContribProj($proj);
+ }
+ }
+ elsif ($mf =~ /^PROGRAM\s*=\s*(.*)$/mg)
+ {
+ my $proj = $solution->AddProject($1, 'exe', 'contrib');
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ $mf =~ /^OBJS\s*=\s*(.*)$/gm || croak "Could not find objects in PROGRAM for $n\n";
+ my $objs = $1;
+ while ($objs =~ /\b([\w-]+\.o)\b/g)
+ {
+ my $o = $1;
+ $o =~ s/\.o$/.c/;
+ $proj->AddFile('contrib\\' . $n . '\\' . $o);
+ }
+ AdjustContribProj($proj);
+ }
+ else
+ {
+ croak "Could not determine contrib module type for $n\n";
+ }
+
+ # Are there any output data files to build?
+ GenerateContribSqlFiles($n, $mf);
}
sub GenerateContribSqlFiles
{
- my $n = shift;
- my $mf = shift;
- if ($mf =~ /^DATA_built\s*=\s*(.*)$/mg)
- {
- my $l = $1;
-
- # Strip out $(addsuffix) rules
- if (index($l, '$(addsuffix ') >= 0)
- {
- my $pcount = 0;
- my $i;
- for ($i = index($l, '$(addsuffix ') + 12; $i < length($l); $i++)
- {
- $pcount++ if (substr($l, $i, 1) eq '(');
- $pcount-- if (substr($l, $i, 1) eq ')');
- last if ($pcount < 0);
- }
- $l = substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i+1);
- }
-
- foreach my $d (split /\s+/, $l)
- {
- my $in = "$d.in";
- my $out = "$d";
-
- if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in"))
- {
- print "Building $out from $in (contrib/$n)...\n";
- my $cont = Project::read_file("contrib/$n/$in");
- my $dn = $out;
- $dn =~ s/\.sql$//;
- $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
- my $o;
- open($o,">contrib/$n/$out") || croak "Could not write to contrib/$n/$d";
- print $o $cont;
- close($o);
- }
- }
- }
+ my $n = shift;
+ my $mf = shift;
+ if ($mf =~ /^DATA_built\s*=\s*(.*)$/mg)
+ {
+ my $l = $1;
+
+ # Strip out $(addsuffix) rules
+ if (index($l, '$(addsuffix ') >= 0)
+ {
+ my $pcount = 0;
+ my $i;
+ for ($i = index($l, '$(addsuffix ') + 12; $i < length($l); $i++)
+ {
+ $pcount++ if (substr($l, $i, 1) eq '(');
+ $pcount-- if (substr($l, $i, 1) eq ')');
+ last if ($pcount < 0);
+ }
+ $l = substr($l, 0, index($l, '$(addsuffix ')) . substr($l, $i+1);
+ }
+
+ foreach my $d (split /\s+/, $l)
+ {
+ my $in = "$d.in";
+ my $out = "$d";
+
+ if (Solution::IsNewer("contrib/$n/$out", "contrib/$n/$in"))
+ {
+ print "Building $out from $in (contrib/$n)...\n";
+ my $cont = Project::read_file("contrib/$n/$in");
+ my $dn = $out;
+ $dn =~ s/\.sql$//;
+ $cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
+ my $o;
+ open($o,">contrib/$n/$out")
+ || croak "Could not write to contrib/$n/$d";
+ print $o $cont;
+ close($o);
+ }
+ }
+ }
}
sub AdjustContribProj
{
- my $proj = shift;
- my $n = $proj->{name};
-
- if ($contrib_defines->{$n})
- {
- foreach my $d ($contrib_defines->{$n})
- {
- $proj->AddDefine($d);
- }
- }
- if (grep {/^$n$/} @contrib_uselibpq)
- {
- $proj->AddIncludeDir('src\interfaces\libpq');
- $proj->AddReference($libpq);
- }
- if (grep {/^$n$/} @contrib_uselibpgport)
- {
- $proj->AddReference($libpgport);
- }
- if ($contrib_extralibs->{$n})
- {
- foreach my $l (@{$contrib_extralibs->{$n}})
- {
- $proj->AddLibrary($l);
- }
- }
- if ($contrib_extraincludes->{$n})
- {
- foreach my $i (@{$contrib_extraincludes->{$n}})
- {
- $proj->AddIncludeDir($i);
- }
- }
- if ($contrib_extrasource->{$n})
- {
- $proj->AddFiles('contrib\\' . $n, @{$contrib_extrasource->{$n}});
- }
+ my $proj = shift;
+ my $n = $proj->{name};
+
+ if ($contrib_defines->{$n})
+ {
+ foreach my $d ($contrib_defines->{$n})
+ {
+ $proj->AddDefine($d);
+ }
+ }
+ if (grep {/^$n$/} @contrib_uselibpq)
+ {
+ $proj->AddIncludeDir('src\interfaces\libpq');
+ $proj->AddReference($libpq);
+ }
+ if (grep {/^$n$/} @contrib_uselibpgport)
+ {
+ $proj->AddReference($libpgport);
+ }
+ if ($contrib_extralibs->{$n})
+ {
+ foreach my $l (@{$contrib_extralibs->{$n}})
+ {
+ $proj->AddLibrary($l);
+ }
+ }
+ if ($contrib_extraincludes->{$n})
+ {
+ foreach my $i (@{$contrib_extraincludes->{$n}})
+ {
+ $proj->AddIncludeDir($i);
+ }
+ }
+ if ($contrib_extrasource->{$n})
+ {
+ $proj->AddFiles('contrib\\' . $n, @{$contrib_extrasource->{$n}});
+ }
}
1;
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index 98db076e58..53cfdb1753 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -12,393 +12,394 @@ use File::Basename;
sub _new
{
- my ($classname, $name, $type, $solution) = @_;
- my $good_types = {
- lib => 1,
- exe => 1,
- dll => 1,
- };
- confess("Bad project type: $type\n") unless exists $good_types->{$type};
- my $self = {
- name => $name,
- type => $type,
- guid => Win32::GuidGen(),
- files => {},
- references => [],
- libraries => [],
- suffixlib => [],
- includes => '',
- prefixincludes => '',
- defines => ';',
- solution => $solution,
- disablewarnings => '4018;4244;4273;4102;4090;4267',
- disablelinkerwarnings => '',
- platform => $solution->{platform},
- };
-
- bless($self, $classname);
- return $self;
+ my ($classname, $name, $type, $solution) = @_;
+ my $good_types = {
+ lib => 1,
+ exe => 1,
+ dll => 1,
+ };
+ confess("Bad project type: $type\n") unless exists $good_types->{$type};
+ my $self = {
+ name => $name,
+ type => $type,
+ guid => Win32::GuidGen(),
+ files => {},
+ references => [],
+ libraries => [],
+ suffixlib => [],
+ includes => '',
+ prefixincludes => '',
+ defines => ';',
+ solution => $solution,
+ disablewarnings => '4018;4244;4273;4102;4090;4267',
+ disablelinkerwarnings => '',
+ platform => $solution->{platform},
+ };
+
+ bless($self, $classname);
+ return $self;
}
sub AddFile
{
- my ($self, $filename) = @_;
+ my ($self, $filename) = @_;
- $self->{files}->{$filename} = 1;
+ $self->{files}->{$filename} = 1;
}
sub AddFiles
{
- my $self = shift;
- my $dir = shift;
+ my $self = shift;
+ my $dir = shift;
- while (my $f = shift)
- {
- $self->{files}->{$dir . "\\" . $f} = 1;
- }
+ while (my $f = shift)
+ {
+ $self->{files}->{$dir . "\\" . $f} = 1;
+ }
}
sub ReplaceFile
{
- my ($self, $filename, $newname) = @_;
- my $re = "\\\\$filename\$";
-
- foreach my $file ( keys %{ $self->{files} } )
- {
-
- # Match complete filename
- if ($filename =~ /\\/)
- {
- if ($file eq $filename)
- {
- delete $self->{files}{$file};
- $self->{files}{$newname} = 1;
- return;
- }
- }
- elsif ($file =~ m/($re)/)
- {
- delete $self->{files}{$file};
- $self->{files}{"$newname\\$filename"} = 1;
- return;
- }
- }
- confess("Could not find file $filename to replace\n");
+ my ($self, $filename, $newname) = @_;
+ my $re = "\\\\$filename\$";
+
+ foreach my $file (keys %{$self->{files}})
+ {
+
+ # Match complete filename
+ if ($filename =~ /\\/)
+ {
+ if ($file eq $filename)
+ {
+ delete $self->{files}{$file};
+ $self->{files}{$newname} = 1;
+ return;
+ }
+ }
+ elsif ($file =~ m/($re)/)
+ {
+ delete $self->{files}{$file};
+ $self->{files}{"$newname\\$filename"} = 1;
+ return;
+ }
+ }
+ confess("Could not find file $filename to replace\n");
}
sub RemoveFile
{
- my ($self, $filename) = @_;
- my $orig = scalar keys %{ $self->{files} };
- delete $self->{files}->{$filename};
- if ($orig > scalar keys %{$self->{files}} )
- {
- return;
- }
- confess("Could not find file $filename to remove\n");
+ my ($self, $filename) = @_;
+ my $orig = scalar keys %{$self->{files}};
+ delete $self->{files}->{$filename};
+ if ($orig > scalar keys %{$self->{files}})
+ {
+ return;
+ }
+ confess("Could not find file $filename to remove\n");
}
sub RelocateFiles
{
- my ($self, $targetdir, $proc) = @_;
- foreach my $f (keys %{$self->{files}})
- {
- my $r = &$proc($f);
- if ($r)
- {
- $self->RemoveFile($f);
- $self->AddFile($targetdir . '\\' . basename($f));
- }
- }
+ my ($self, $targetdir, $proc) = @_;
+ foreach my $f (keys %{$self->{files}})
+ {
+ my $r = &$proc($f);
+ if ($r)
+ {
+ $self->RemoveFile($f);
+ $self->AddFile($targetdir . '\\' . basename($f));
+ }
+ }
}
sub AddReference
{
- my $self = shift;
+ my $self = shift;
- while (my $ref = shift)
- {
- push @{$self->{references}},$ref;
- $self->AddLibrary("__CFGNAME__\\" . $ref->{name} . "\\" . $ref->{name} . ".lib");
- }
+ while (my $ref = shift)
+ {
+ push @{$self->{references}},$ref;
+ $self->AddLibrary("__CFGNAME__\\" . $ref->{name} . "\\" . $ref->{name} . ".lib");
+ }
}
sub AddLibrary
{
- my ($self, $lib, $dbgsuffix) = @_;
-
- if ($lib =~ m/\s/)
- {
- $lib = '&quot;' . $lib . "&quot;";
- }
-
- push @{$self->{libraries}}, $lib;
- if ($dbgsuffix)
- {
- push @{$self->{suffixlib}}, $lib;
- }
+ my ($self, $lib, $dbgsuffix) = @_;
+
+ if ($lib =~ m/\s/)
+ {
+ $lib = '&quot;' . $lib . "&quot;";
+ }
+
+ push @{$self->{libraries}}, $lib;
+ if ($dbgsuffix)
+ {
+ push @{$self->{suffixlib}}, $lib;
+ }
}
sub AddIncludeDir
{
- my ($self, $inc) = @_;
+ my ($self, $inc) = @_;
- if ($self->{includes} ne '')
- {
- $self->{includes} .= ';';
- }
- $self->{includes} .= $inc;
+ if ($self->{includes} ne '')
+ {
+ $self->{includes} .= ';';
+ }
+ $self->{includes} .= $inc;
}
sub AddPrefixInclude
{
- my ($self, $inc) = @_;
+ my ($self, $inc) = @_;
- $self->{prefixincludes} = $inc . ';' . $self->{prefixincludes};
+ $self->{prefixincludes} = $inc . ';' . $self->{prefixincludes};
}
sub AddDefine
{
- my ($self, $def) = @_;
+ my ($self, $def) = @_;
- $def =~ s/"/&quot;&quot;/g;
- $self->{defines} .= $def . ';';
+ $def =~ s/"/&quot;&quot;/g;
+ $self->{defines} .= $def . ';';
}
sub FullExportDLL
{
- my ($self, $libname) = @_;
+ my ($self, $libname) = @_;
- $self->{builddef} = 1;
- $self->{def} = ".\\__CFGNAME__\\$self->{name}\\$self->{name}.def";
- $self->{implib} = "__CFGNAME__\\$self->{name}\\$libname";
+ $self->{builddef} = 1;
+ $self->{def} = ".\\__CFGNAME__\\$self->{name}\\$self->{name}.def";
+ $self->{implib} = "__CFGNAME__\\$self->{name}\\$libname";
}
sub UseDef
{
- my ($self, $def) = @_;
+ my ($self, $def) = @_;
- $self->{def} = $def;
+ $self->{def} = $def;
}
sub AddDir
{
- my ($self, $reldir) = @_;
- my $MF;
-
- my $t = $/;
- undef $/;
- open($MF,"$reldir\\Makefile")
- || open($MF,"$reldir\\GNUMakefile")
- || croak "Could not open $reldir\\Makefile\n";
- my $mf = <$MF>;
- close($MF);
-
- $mf =~ s{\\\s*[\r\n]+}{}mg;
- if ($mf =~ m{^(?:SUB)?DIRS[^=]*=\s*(.*)$}mg)
- {
- foreach my $subdir (split /\s+/,$1)
- {
- next
- if $subdir eq "\$(top_builddir)/src/timezone"; #special case for non-standard include
- next
- if $reldir . "\\" . $subdir eq "src\\backend\\port\\darwin";
-
- $self->AddDir($reldir . "\\" . $subdir);
- }
- }
- while ($mf =~ m{^(?:EXTRA_)?OBJS[^=]*=\s*(.*)$}m)
- {
- my $s = $1;
- my $filter_re = qr{\$\(filter ([^,]+),\s+\$\(([^\)]+)\)\)};
- while ($s =~ /$filter_re/)
- {
-
- # Process $(filter a b c, $(VAR)) expressions
- my $list = $1;
- my $filter = $2;
- $list =~ s/\.o/\.c/g;
- my @pieces = split /\s+/, $list;
- my $matches = "";
- foreach my $p (@pieces)
- {
-
- if ($filter eq "LIBOBJS")
- {
- if (grep(/$p/, @main::pgportfiles) == 1)
- {
- $p =~ s/\.c/\.o/;
- $matches .= $p . " ";
- }
- }
- else
- {
- confess "Unknown filter $filter\n";
- }
- }
- $s =~ s/$filter_re/$matches/;
- }
- foreach my $f (split /\s+/,$s)
- {
- next if $f =~ /^\s*$/;
- next if $f eq "\\";
- next if $f =~ /\/SUBSYS.o$/;
- $f =~ s/,$//; # Remove trailing comma that can show up from filter stuff
- next unless $f =~ /.*\.o$/;
- $f =~ s/\.o$/\.c/;
- if ($f =~ /^\$\(top_builddir\)\/(.*)/)
- {
- $f = $1;
- $f =~ s/\//\\/g;
- $self->{files}->{$f} = 1;
- }
- else
- {
- $f =~ s/\//\\/g;
- $self->{files}->{"$reldir\\$f"} = 1;
- }
- }
- $mf =~ s{OBJS[^=]*=\s*(.*)$}{}m;
- }
-
- # Match rules that pull in source files from different directories, eg
- # pgstrcasecmp.c rint.c snprintf.c: % : $(top_srcdir)/src/port/%
- my $replace_re = qr{^([^:\n\$]+\.c)\s*:\s*(?:%\s*: )?\$(\([^\)]+\))\/(.*)\/[^\/]+$}m;
- while ($mf =~ m{$replace_re}m)
- {
- my $match = $1;
- my $top = $2;
- my $target = $3;
- $target =~ s{/}{\\}g;
- my @pieces = split /\s+/,$match;
- foreach my $fn (@pieces)
- {
- if ($top eq "(top_srcdir)")
- {
- eval { $self->ReplaceFile($fn, $target) };
- }
- elsif ($top eq "(backend_src)")
- {
- eval { $self->ReplaceFile($fn, "src\\backend\\$target") };
- }
- else
- {
- confess "Bad replacement top: $top, on line $_\n";
- }
- }
- $mf =~ s{$replace_re}{}m;
- }
-
- # See if this Makefile contains a description, and should have a RC file
- if ($mf =~ /^PGFILEDESC\s*=\s*\"([^\"]+)\"/m)
- {
- my $desc = $1;
- my $ico;
- if ($mf =~ /^PGAPPICON\s*=\s*(.*)$/m) { $ico = $1; }
- $self->AddResourceFile($reldir,$desc,$ico);
- }
- $/ = $t;
+ my ($self, $reldir) = @_;
+ my $MF;
+
+ my $t = $/;
+ undef $/;
+ open($MF,"$reldir\\Makefile")
+ || open($MF,"$reldir\\GNUMakefile")
+ || croak "Could not open $reldir\\Makefile\n";
+ my $mf = <$MF>;
+ close($MF);
+
+ $mf =~ s{\\\s*[\r\n]+}{}mg;
+ if ($mf =~ m{^(?:SUB)?DIRS[^=]*=\s*(.*)$}mg)
+ {
+ foreach my $subdir (split /\s+/,$1)
+ {
+ next
+ if $subdir eq "\$(top_builddir)/src/timezone"
+ ; #special case for non-standard include
+ next
+ if $reldir . "\\" . $subdir eq "src\\backend\\port\\darwin";
+
+ $self->AddDir($reldir . "\\" . $subdir);
+ }
+ }
+ while ($mf =~ m{^(?:EXTRA_)?OBJS[^=]*=\s*(.*)$}m)
+ {
+ my $s = $1;
+ my $filter_re = qr{\$\(filter ([^,]+),\s+\$\(([^\)]+)\)\)};
+ while ($s =~ /$filter_re/)
+ {
+
+ # Process $(filter a b c, $(VAR)) expressions
+ my $list = $1;
+ my $filter = $2;
+ $list =~ s/\.o/\.c/g;
+ my @pieces = split /\s+/, $list;
+ my $matches = "";
+ foreach my $p (@pieces)
+ {
+
+ if ($filter eq "LIBOBJS")
+ {
+ if (grep(/$p/, @main::pgportfiles) == 1)
+ {
+ $p =~ s/\.c/\.o/;
+ $matches .= $p . " ";
+ }
+ }
+ else
+ {
+ confess "Unknown filter $filter\n";
+ }
+ }
+ $s =~ s/$filter_re/$matches/;
+ }
+ foreach my $f (split /\s+/,$s)
+ {
+ next if $f =~ /^\s*$/;
+ next if $f eq "\\";
+ next if $f =~ /\/SUBSYS.o$/;
+ $f =~ s/,$//; # Remove trailing comma that can show up from filter stuff
+ next unless $f =~ /.*\.o$/;
+ $f =~ s/\.o$/\.c/;
+ if ($f =~ /^\$\(top_builddir\)\/(.*)/)
+ {
+ $f = $1;
+ $f =~ s/\//\\/g;
+ $self->{files}->{$f} = 1;
+ }
+ else
+ {
+ $f =~ s/\//\\/g;
+ $self->{files}->{"$reldir\\$f"} = 1;
+ }
+ }
+ $mf =~ s{OBJS[^=]*=\s*(.*)$}{}m;
+ }
+
+ # Match rules that pull in source files from different directories, eg
+ # pgstrcasecmp.c rint.c snprintf.c: % : $(top_srcdir)/src/port/%
+ my $replace_re = qr{^([^:\n\$]+\.c)\s*:\s*(?:%\s*: )?\$(\([^\)]+\))\/(.*)\/[^\/]+$}m;
+ while ($mf =~ m{$replace_re}m)
+ {
+ my $match = $1;
+ my $top = $2;
+ my $target = $3;
+ $target =~ s{/}{\\}g;
+ my @pieces = split /\s+/,$match;
+ foreach my $fn (@pieces)
+ {
+ if ($top eq "(top_srcdir)")
+ {
+ eval { $self->ReplaceFile($fn, $target) };
+ }
+ elsif ($top eq "(backend_src)")
+ {
+ eval { $self->ReplaceFile($fn, "src\\backend\\$target") };
+ }
+ else
+ {
+ confess "Bad replacement top: $top, on line $_\n";
+ }
+ }
+ $mf =~ s{$replace_re}{}m;
+ }
+
+ # See if this Makefile contains a description, and should have a RC file
+ if ($mf =~ /^PGFILEDESC\s*=\s*\"([^\"]+)\"/m)
+ {
+ my $desc = $1;
+ my $ico;
+ if ($mf =~ /^PGAPPICON\s*=\s*(.*)$/m) { $ico = $1; }
+ $self->AddResourceFile($reldir,$desc,$ico);
+ }
+ $/ = $t;
}
sub AddResourceFile
{
- my ($self, $dir, $desc, $ico) = @_;
-
- my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
- my $d = ($year - 100) . "$yday";
-
- if (Solution::IsNewer("$dir\\win32ver.rc",'src\port\win32ver.rc'))
- {
- print "Generating win32ver.rc for $dir\n";
- open(I,'src\port\win32ver.rc') || confess "Could not open win32ver.rc";
- open(O,">$dir\\win32ver.rc") || confess "Could not write win32ver.rc";
- my $icostr = $ico?"IDI_ICON ICON \"src/port/$ico.ico\"":"";
- while (<I>)
- {
- s/FILEDESC/"$desc"/gm;
- s/_ICO_/$icostr/gm;
- s/(VERSION.*),0/$1,$d/;
- if ($self->{type} eq "dll")
- {
- s/VFT_APP/VFT_DLL/gm;
- }
- print O;
- }
- }
- close(O);
- close(I);
- $self->AddFile("$dir\\win32ver.rc");
+ my ($self, $dir, $desc, $ico) = @_;
+
+ my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ my $d = ($year - 100) . "$yday";
+
+ if (Solution::IsNewer("$dir\\win32ver.rc",'src\port\win32ver.rc'))
+ {
+ print "Generating win32ver.rc for $dir\n";
+ open(I,'src\port\win32ver.rc') || confess "Could not open win32ver.rc";
+ open(O,">$dir\\win32ver.rc") || confess "Could not write win32ver.rc";
+ my $icostr = $ico?"IDI_ICON ICON \"src/port/$ico.ico\"":"";
+ while (<I>)
+ {
+ s/FILEDESC/"$desc"/gm;
+ s/_ICO_/$icostr/gm;
+ s/(VERSION.*),0/$1,$d/;
+ if ($self->{type} eq "dll")
+ {
+ s/VFT_APP/VFT_DLL/gm;
+ }
+ print O;
+ }
+ }
+ close(O);
+ close(I);
+ $self->AddFile("$dir\\win32ver.rc");
}
sub DisableLinkerWarnings
{
- my ($self, $warnings) = @_;
+ my ($self, $warnings) = @_;
- $self->{disablelinkerwarnings} .= ',' unless ($self->{disablelinkerwarnings} eq '');
- $self->{disablelinkerwarnings} .= $warnings;
+ $self->{disablelinkerwarnings} .= ',' unless ($self->{disablelinkerwarnings} eq '');
+ $self->{disablelinkerwarnings} .= $warnings;
}
sub Save
{
- my ($self) = @_;
-
- # If doing DLL and haven't specified a DEF file, do a full export of all symbols
- # in the project.
- if ($self->{type} eq "dll" && !$self->{def})
- {
- $self->FullExportDLL($self->{name} . ".lib");
- }
-
- # Warning 4197 is about double exporting, disable this per
- # http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99193
- $self->DisableLinkerWarnings('4197') if ($self->{platform} eq 'x64');
-
- # Dump the project
- open(F, ">$self->{name}$self->{filenameExtension}")
- || croak("Could not write to $self->{name}$self->{filenameExtension}\n");
- $self->WriteHeader(*F);
- $self->WriteFiles(*F);
- $self->Footer(*F);
- close(F);
+ my ($self) = @_;
+
+ # If doing DLL and haven't specified a DEF file, do a full export of all symbols
+ # in the project.
+ if ($self->{type} eq "dll" && !$self->{def})
+ {
+ $self->FullExportDLL($self->{name} . ".lib");
+ }
+
+ # Warning 4197 is about double exporting, disable this per
+ # http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99193
+ $self->DisableLinkerWarnings('4197') if ($self->{platform} eq 'x64');
+
+ # Dump the project
+ open(F, ">$self->{name}$self->{filenameExtension}")
+ || croak("Could not write to $self->{name}$self->{filenameExtension}\n");
+ $self->WriteHeader(*F);
+ $self->WriteFiles(*F);
+ $self->Footer(*F);
+ close(F);
}
sub GetAdditionalLinkerDependencies
{
- my ($self, $cfgname, $seperator) = @_;
- my $libcfg = (uc $cfgname eq "RELEASE")?"MD":"MDd";
- my $libs = '';
- foreach my $lib (@{$self->{libraries}})
- {
- my $xlib = $lib;
- foreach my $slib (@{$self->{suffixlib}})
- {
- if ($slib eq $lib)
- {
- $xlib =~ s/\.lib$/$libcfg.lib/;
- last;
- }
- }
- $libs .= $xlib . $seperator;
- }
- $libs =~ s/.$//;
- $libs =~ s/__CFGNAME__/$cfgname/g;
- return $libs;
+ my ($self, $cfgname, $seperator) = @_;
+ my $libcfg = (uc $cfgname eq "RELEASE")?"MD":"MDd";
+ my $libs = '';
+ foreach my $lib (@{$self->{libraries}})
+ {
+ my $xlib = $lib;
+ foreach my $slib (@{$self->{suffixlib}})
+ {
+ if ($slib eq $lib)
+ {
+ $xlib =~ s/\.lib$/$libcfg.lib/;
+ last;
+ }
+ }
+ $libs .= $xlib . $seperator;
+ }
+ $libs =~ s/.$//;
+ $libs =~ s/__CFGNAME__/$cfgname/g;
+ return $libs;
}
# Utility function that loads a complete file
sub read_file
{
- my $filename = shift;
- my $F;
- my $t = $/;
+ my $filename = shift;
+ my $F;
+ my $t = $/;
- undef $/;
- open($F, $filename) || croak "Could not open file $filename\n";
- my $txt = <$F>;
- close($F);
- $/ = $t;
+ undef $/;
+ open($F, $filename) || croak "Could not open file $filename\n";
+ my $txt = <$F>;
+ close($F);
+ $/ = $t;
- return $txt;
+ return $txt;
}
1;
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index e1d85c85ad..0c50c05734 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -12,346 +12,359 @@ use VSObjectFactory;
sub _new
{
- my $classname = shift;
- my $options = shift;
- my $self = {
- projects => {},
- options => $options,
- numver => '',
- strver => '',
- vcver => undef,
- platform => undef,
- };
- bless($self, $classname);
-
- # integer_datetimes is now the default
- $options->{integer_datetimes} = 1
- unless exists $options->{integer_datetimes};
- $options->{float4byval} = 1
- unless exists $options->{float4byval};
- if ($options->{xml})
- {
- if (!($options->{xslt} && $options->{iconv}))
- {
- die "XML requires both XSLT and ICONV\n";
- }
- }
- $options->{blocksize} = 8
- unless $options->{blocksize}; # undef or 0 means default
- die "Bad blocksize $options->{blocksize}"
- unless grep {$_ == $options->{blocksize}} (1,2,4,8,16,32);
- $options->{segsize} = 1
- unless $options->{segsize}; # undef or 0 means default
- # only allow segsize 1 for now, as we can't do large files yet in windows
- die "Bad segsize $options->{segsize}"
- unless $options->{segsize} == 1;
- $options->{wal_blocksize} = 8
- unless $options->{wal_blocksize}; # undef or 0 means default
- die "Bad wal_blocksize $options->{wal_blocksize}"
- unless grep {$_ == $options->{wal_blocksize}} (1,2,4,8,16,32,64);
- $options->{wal_segsize} = 16
- unless $options->{wal_segsize}; # undef or 0 means default
- die "Bad wal_segsize $options->{wal_segsize}"
- unless grep {$_ == $options->{wal_segsize}} (1,2,4,8,16,32,64);
-
- $self->DeterminePlatform();
-
- return $self;
+ my $classname = shift;
+ my $options = shift;
+ my $self = {
+ projects => {},
+ options => $options,
+ numver => '',
+ strver => '',
+ vcver => undef,
+ platform => undef,
+ };
+ bless($self, $classname);
+
+ # integer_datetimes is now the default
+ $options->{integer_datetimes} = 1
+ unless exists $options->{integer_datetimes};
+ $options->{float4byval} = 1
+ unless exists $options->{float4byval};
+ if ($options->{xml})
+ {
+ if (!($options->{xslt} && $options->{iconv}))
+ {
+ die "XML requires both XSLT and ICONV\n";
+ }
+ }
+ $options->{blocksize} = 8
+ unless $options->{blocksize}; # undef or 0 means default
+ die "Bad blocksize $options->{blocksize}"
+ unless grep {$_ == $options->{blocksize}} (1,2,4,8,16,32);
+ $options->{segsize} = 1
+ unless $options->{segsize}; # undef or 0 means default
+ # only allow segsize 1 for now, as we can't do large files yet in windows
+ die "Bad segsize $options->{segsize}"
+ unless $options->{segsize} == 1;
+ $options->{wal_blocksize} = 8
+ unless $options->{wal_blocksize}; # undef or 0 means default
+ die "Bad wal_blocksize $options->{wal_blocksize}"
+ unless grep {$_ == $options->{wal_blocksize}} (1,2,4,8,16,32,64);
+ $options->{wal_segsize} = 16
+ unless $options->{wal_segsize}; # undef or 0 means default
+ die "Bad wal_segsize $options->{wal_segsize}"
+ unless grep {$_ == $options->{wal_segsize}} (1,2,4,8,16,32,64);
+
+ $self->DeterminePlatform();
+
+ return $self;
}
sub DeterminePlatform
{
- my $self = shift;
-
- # Determine if we are in 32 or 64-bit mode. Do this by seeing if CL has
- # 64-bit only parameters.
- $self->{platform} = 'Win32';
- open(P,"cl /? 2>NUL|") || die "cl command not found";
- while (<P>)
- {
- if (/^\/favor:</)
- {
- $self->{platform} = 'x64';
- last;
- }
- }
- close(P);
- print "Detected hardware platform: $self->{platform}\n";
+ my $self = shift;
+
+ # Determine if we are in 32 or 64-bit mode. Do this by seeing if CL has
+ # 64-bit only parameters.
+ $self->{platform} = 'Win32';
+ open(P,"cl /? 2>NUL|") || die "cl command not found";
+ while (<P>)
+ {
+ if (/^\/favor:</)
+ {
+ $self->{platform} = 'x64';
+ last;
+ }
+ }
+ close(P);
+ print "Detected hardware platform: $self->{platform}\n";
}
# Return 1 if $oldfile is newer than $newfile, or if $newfile doesn't exist.
# Special case - if config.pl has changed, always return 1
sub IsNewer
{
- my ($newfile, $oldfile) = @_;
- if ($oldfile ne 'src\tools\msvc\config.pl' && $oldfile ne 'src\tools\msvc\config_default.pl')
- {
- return 1
- if (-f 'src\tools\msvc\config.pl') && IsNewer($newfile, 'src\tools\msvc\config.pl');
- return 1
- if (-f 'src\tools\msvc\config_default.pl')
- && IsNewer($newfile, 'src\tools\msvc\config_default.pl');
- }
- return 1 if (!(-e $newfile));
- my @nstat = stat($newfile);
- my @ostat = stat($oldfile);
- return 1 if ($nstat[9] < $ostat[9]);
- return 0;
+ my ($newfile, $oldfile) = @_;
+ if ( $oldfile ne 'src\tools\msvc\config.pl'
+ && $oldfile ne 'src\tools\msvc\config_default.pl')
+ {
+ return 1
+ if (-f 'src\tools\msvc\config.pl')
+ && IsNewer($newfile, 'src\tools\msvc\config.pl');
+ return 1
+ if (-f 'src\tools\msvc\config_default.pl')
+ && IsNewer($newfile, 'src\tools\msvc\config_default.pl');
+ }
+ return 1 if (!(-e $newfile));
+ my @nstat = stat($newfile);
+ my @ostat = stat($oldfile);
+ return 1 if ($nstat[9] < $ostat[9]);
+ return 0;
}
# Copy a file, *not* preserving date. Only works for text files.
sub copyFile
{
- my ($src, $dest) = @_;
- open(I,$src) || croak "Could not open $src";
- open(O,">$dest") || croak "Could not open $dest";
- while (<I>)
- {
- print O;
- }
- close(I);
- close(O);
+ my ($src, $dest) = @_;
+ open(I,$src) || croak "Could not open $src";
+ open(O,">$dest") || croak "Could not open $dest";
+ while (<I>)
+ {
+ print O;
+ }
+ close(I);
+ close(O);
}
sub GenerateFiles
{
- my $self = shift;
- my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
-
- # Parse configure.in to get version numbers
- open(C,"configure.in") || confess("Could not open configure.in for reading\n");
- while (<C>)
- {
- if (/^AC_INIT\(\[PostgreSQL\], \[([^\]]+)\]/)
- {
- $self->{strver} = $1;
- if ($self->{strver} !~ /^(\d+)\.(\d+)(?:\.(\d+))?/)
- {
- confess "Bad format of version: $self->{strver}\n";
- }
- $self->{numver} = sprintf("%d%02d%02d", $1, $2, $3?$3:0);
- $self->{majorver} = sprintf("%d.%d", $1, $2);
- }
- }
- close(C);
- confess "Unable to parse configure.in for all variables!"
- if ($self->{strver} eq '' || $self->{numver} eq '');
-
- if (IsNewer("src\\include\\pg_config_os.h","src\\include\\port\\win32.h"))
- {
- print "Copying pg_config_os.h...\n";
- copyFile("src\\include\\port\\win32.h","src\\include\\pg_config_os.h");
- }
-
- if (IsNewer("src\\include\\pg_config.h","src\\include\\pg_config.h.win32"))
- {
- print "Generating pg_config.h...\n";
- open(I,"src\\include\\pg_config.h.win32") || confess "Could not open pg_config.h.win32\n";
- open(O,">src\\include\\pg_config.h") || confess "Could not write to pg_config.h\n";
- while (<I>)
- {
- s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}"};
- s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
+ my $self = shift;
+ my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
+
+ # Parse configure.in to get version numbers
+ open(C,"configure.in") || confess("Could not open configure.in for reading\n");
+ while (<C>)
+ {
+ if (/^AC_INIT\(\[PostgreSQL\], \[([^\]]+)\]/)
+ {
+ $self->{strver} = $1;
+ if ($self->{strver} !~ /^(\d+)\.(\d+)(?:\.(\d+))?/)
+ {
+ confess "Bad format of version: $self->{strver}\n";
+ }
+ $self->{numver} = sprintf("%d%02d%02d", $1, $2, $3?$3:0);
+ $self->{majorver} = sprintf("%d.%d", $1, $2);
+ }
+ }
+ close(C);
+ confess "Unable to parse configure.in for all variables!"
+ if ($self->{strver} eq '' || $self->{numver} eq '');
+
+ if (IsNewer("src\\include\\pg_config_os.h","src\\include\\port\\win32.h"))
+ {
+ print "Copying pg_config_os.h...\n";
+ copyFile("src\\include\\port\\win32.h","src\\include\\pg_config_os.h");
+ }
+
+ if (IsNewer("src\\include\\pg_config.h","src\\include\\pg_config.h.win32"))
+ {
+ print "Generating pg_config.h...\n";
+ open(I,"src\\include\\pg_config.h.win32")
+ || confess "Could not open pg_config.h.win32\n";
+ open(O,">src\\include\\pg_config.h") || confess "Could not write to pg_config.h\n";
+ while (<I>)
+ {
+ s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}"};
+ s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
- print O;
- }
- print O "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
- print O "#define LOCALEDIR \"/share/locale\"\n" if ($self->{options}->{nls});
- print O "/* defines added by config steps */\n";
- print O "#ifndef IGNORE_CONFIGURED_SETTINGS\n";
- print O "#define USE_ASSERT_CHECKING 1\n" if ($self->{options}->{asserts});
- print O "#define USE_INTEGER_DATETIMES 1\n" if ($self->{options}->{integer_datetimes});
- print O "#define USE_LDAP 1\n" if ($self->{options}->{ldap});
- print O "#define HAVE_LIBZ 1\n" if ($self->{options}->{zlib});
- print O "#define USE_SSL 1\n" if ($self->{options}->{openssl});
- print O "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
-
- print O "#define BLCKSZ ",1024 * $self->{options}->{blocksize},"\n";
- print O "#define RELSEG_SIZE ",
- (1024 / $self->{options}->{blocksize}) *$self->{options}->{segsize} * 1024, "\n";
- print O "#define XLOG_BLCKSZ ",1024 * $self->{options}->{wal_blocksize},"\n";
- print O "#define XLOG_SEG_SIZE (",$self->{options}->{wal_segsize}," * 1024 * 1024)\n";
-
- if ($self->{options}->{float4byval})
- {
- print O "#define USE_FLOAT4_BYVAL 1\n";
- print O "#define FLOAT4PASSBYVAL true\n";
- }
- else
- {
- print O "#define FLOAT4PASSBYVAL false\n";
- }
- if ($self->{options}->{float8byval})
- {
- print O "#define USE_FLOAT8_BYVAL 1\n";
- print O "#define FLOAT8PASSBYVAL true\n";
- }
- else
- {
- print O "#define FLOAT8PASSBYVAL false\n";
- }
-
- if ($self->{options}->{uuid})
- {
- print O "#define HAVE_UUID_H\n";
- }
- if ($self->{options}->{xml})
- {
- print O "#define HAVE_LIBXML2\n";
- print O "#define USE_LIBXML\n";
- }
- if ($self->{options}->{xslt})
- {
- print O "#define HAVE_LIBXSLT\n";
- print O "#define USE_LIBXSLT\n";
- }
- if ($self->{options}->{krb5})
- {
- print O "#define KRB5 1\n";
- print O "#define HAVE_KRB5_ERROR_TEXT_DATA 1\n";
- print O "#define HAVE_KRB5_TICKET_ENC_PART2 1\n";
- print O "#define HAVE_KRB5_FREE_UNPARSED_NAME 1\n";
- print O "#define ENABLE_GSS 1\n";
- }
- if (my $port = $self->{options}->{"--with-pgport"})
- {
- print O "#undef DEF_PGPORT\n";
- print O "#undef DEF_PGPORT_STR\n";
- print O "#define DEF_PGPORT $port\n";
- print O "#define DEF_PGPORT_STR \"$port\"\n";
- }
- print O "#define VAL_CONFIGURE \"" . $self->GetFakeConfigure() . "\"\n";
- print O "#endif /* IGNORE_CONFIGURED_SETTINGS */\n";
- close(O);
- close(I);
- }
-
- $self->GenerateDefFile("src\\interfaces\\libpq\\libpqdll.def",
- "src\\interfaces\\libpq\\exports.txt","LIBPQ");
- $self->GenerateDefFile(
- "src\\interfaces\\ecpg\\ecpglib\\ecpglib.def",
- "src\\interfaces\\ecpg\\ecpglib\\exports.txt",
- "LIBECPG"
- );
- $self->GenerateDefFile(
- "src\\interfaces\\ecpg\\compatlib\\compatlib.def",
- "src\\interfaces\\ecpg\\compatlib\\exports.txt",
- "LIBECPG_COMPAT"
- );
- $self->GenerateDefFile(
- "src\\interfaces\\ecpg\\pgtypeslib\\pgtypeslib.def",
- "src\\interfaces\\ecpg\\pgtypeslib\\exports.txt",
- "LIBPGTYPES"
- );
-
- if (IsNewer('src\backend\utils\fmgrtab.c','src\include\catalog\pg_proc.h'))
- {
- print "Generating fmgrtab.c and fmgroids.h...\n";
- chdir('src\backend\utils');
- system("perl -I ../catalog Gen_fmgrtab.pl ../../../src/include/catalog/pg_proc.h");
- chdir('..\..\..');
- copyFile('src\backend\utils\fmgroids.h','src\include\utils\fmgroids.h');
- }
-
- if (IsNewer('src\include\utils\probes.h','src\backend\utils\probes.d'))
- {
- print "Generating probes.h...\n";
- system(
+ print O;
+ }
+ print O "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
+ print O "#define LOCALEDIR \"/share/locale\"\n" if ($self->{options}->{nls});
+ print O "/* defines added by config steps */\n";
+ print O "#ifndef IGNORE_CONFIGURED_SETTINGS\n";
+ print O "#define USE_ASSERT_CHECKING 1\n" if ($self->{options}->{asserts});
+ print O "#define USE_INTEGER_DATETIMES 1\n"
+ if ($self->{options}->{integer_datetimes});
+ print O "#define USE_LDAP 1\n" if ($self->{options}->{ldap});
+ print O "#define HAVE_LIBZ 1\n" if ($self->{options}->{zlib});
+ print O "#define USE_SSL 1\n" if ($self->{options}->{openssl});
+ print O "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
+
+ print O "#define BLCKSZ ",1024 * $self->{options}->{blocksize},"\n";
+ print O "#define RELSEG_SIZE ",
+ (1024 / $self->{options}->{blocksize}) *$self->{options}->{segsize} * 1024, "\n";
+ print O "#define XLOG_BLCKSZ ",1024 * $self->{options}->{wal_blocksize},"\n";
+ print O "#define XLOG_SEG_SIZE (",$self->{options}->{wal_segsize},
+ " * 1024 * 1024)\n";
+
+ if ($self->{options}->{float4byval})
+ {
+ print O "#define USE_FLOAT4_BYVAL 1\n";
+ print O "#define FLOAT4PASSBYVAL true\n";
+ }
+ else
+ {
+ print O "#define FLOAT4PASSBYVAL false\n";
+ }
+ if ($self->{options}->{float8byval})
+ {
+ print O "#define USE_FLOAT8_BYVAL 1\n";
+ print O "#define FLOAT8PASSBYVAL true\n";
+ }
+ else
+ {
+ print O "#define FLOAT8PASSBYVAL false\n";
+ }
+
+ if ($self->{options}->{uuid})
+ {
+ print O "#define HAVE_UUID_H\n";
+ }
+ if ($self->{options}->{xml})
+ {
+ print O "#define HAVE_LIBXML2\n";
+ print O "#define USE_LIBXML\n";
+ }
+ if ($self->{options}->{xslt})
+ {
+ print O "#define HAVE_LIBXSLT\n";
+ print O "#define USE_LIBXSLT\n";
+ }
+ if ($self->{options}->{krb5})
+ {
+ print O "#define KRB5 1\n";
+ print O "#define HAVE_KRB5_ERROR_TEXT_DATA 1\n";
+ print O "#define HAVE_KRB5_TICKET_ENC_PART2 1\n";
+ print O "#define HAVE_KRB5_FREE_UNPARSED_NAME 1\n";
+ print O "#define ENABLE_GSS 1\n";
+ }
+ if (my $port = $self->{options}->{"--with-pgport"})
+ {
+ print O "#undef DEF_PGPORT\n";
+ print O "#undef DEF_PGPORT_STR\n";
+ print O "#define DEF_PGPORT $port\n";
+ print O "#define DEF_PGPORT_STR \"$port\"\n";
+ }
+ print O "#define VAL_CONFIGURE \"" . $self->GetFakeConfigure() . "\"\n";
+ print O "#endif /* IGNORE_CONFIGURED_SETTINGS */\n";
+ close(O);
+ close(I);
+ }
+
+ $self->GenerateDefFile("src\\interfaces\\libpq\\libpqdll.def",
+ "src\\interfaces\\libpq\\exports.txt","LIBPQ");
+ $self->GenerateDefFile(
+ "src\\interfaces\\ecpg\\ecpglib\\ecpglib.def",
+ "src\\interfaces\\ecpg\\ecpglib\\exports.txt",
+ "LIBECPG"
+ );
+ $self->GenerateDefFile(
+ "src\\interfaces\\ecpg\\compatlib\\compatlib.def",
+ "src\\interfaces\\ecpg\\compatlib\\exports.txt",
+ "LIBECPG_COMPAT"
+ );
+ $self->GenerateDefFile(
+ "src\\interfaces\\ecpg\\pgtypeslib\\pgtypeslib.def",
+ "src\\interfaces\\ecpg\\pgtypeslib\\exports.txt",
+ "LIBPGTYPES"
+ );
+
+ if (IsNewer('src\backend\utils\fmgrtab.c','src\include\catalog\pg_proc.h'))
+ {
+ print "Generating fmgrtab.c and fmgroids.h...\n";
+ chdir('src\backend\utils');
+ system("perl -I ../catalog Gen_fmgrtab.pl ../../../src/include/catalog/pg_proc.h");
+ chdir('..\..\..');
+ copyFile('src\backend\utils\fmgroids.h','src\include\utils\fmgroids.h');
+ }
+
+ if (IsNewer('src\include\utils\probes.h','src\backend\utils\probes.d'))
+ {
+ print "Generating probes.h...\n";
+ system(
'psed -f src\backend\utils\Gen_dummy_probes.sed src\backend\utils\probes.d > src\include\utils\probes.h'
- );
- }
-
- if ($self->{options}->{python}
- && IsNewer('src\pl\plpython\spiexceptions.h','src\include\backend\errcodes.txt'))
- {
- print "Generating spiexceptions.h...\n";
- system(
+ );
+ }
+
+ if ($self->{options}->{python}
+ && IsNewer('src\pl\plpython\spiexceptions.h','src\include\backend\errcodes.txt'))
+ {
+ print "Generating spiexceptions.h...\n";
+ system(
'perl src\pl\plpython\generate-spiexceptions.pl src\backend\utils\errcodes.txt > src\pl\plpython\spiexceptions.h'
- );
- }
+ );
+ }
- if (IsNewer('src\include\utils\errcodes.h','src\backend\utils\errcodes.txt'))
- {
- print "Generating errcodes.h...\n";
- system(
+ if (IsNewer('src\include\utils\errcodes.h','src\backend\utils\errcodes.txt'))
+ {
+ print "Generating errcodes.h...\n";
+ system(
'perl src\backend\utils\generate-errcodes.pl src\backend\utils\errcodes.txt > src\backend\utils\errcodes.h'
- );
- copyFile('src\backend\utils\errcodes.h','src\include\utils\errcodes.h');
- }
-
- if (IsNewer('src\pl\plpgsql\src\plerrcodes.h','src\backend\utils\errcodes.txt'))
- {
- print "Generating plerrcodes.h...\n";
- system(
+ );
+ copyFile('src\backend\utils\errcodes.h','src\include\utils\errcodes.h');
+ }
+
+ if (IsNewer('src\pl\plpgsql\src\plerrcodes.h','src\backend\utils\errcodes.txt'))
+ {
+ print "Generating plerrcodes.h...\n";
+ system(
'perl src\pl\plpgsql\src\generate-plerrcodes.pl src\backend\utils\errcodes.txt > src\pl\plpgsql\src\plerrcodes.h'
- );
- }
-
- if (IsNewer('src\backend\utils\sort\qsort_tuple.c','src\backend\utils\sort\gen_qsort_tuple.pl'))
- {
- print "Generating qsort_tuple.c...\n";
- system(
+ );
+ }
+
+ if (
+ IsNewer(
+ 'src\backend\utils\sort\qsort_tuple.c',
+ 'src\backend\utils\sort\gen_qsort_tuple.pl'
+ )
+ )
+ {
+ print "Generating qsort_tuple.c...\n";
+ system(
'perl src\backend\utils\sort\gen_qsort_tuple.pl > src\backend\utils\sort\qsort_tuple.c'
- );
- }
-
- if (IsNewer('src\interfaces\libpq\libpq.rc','src\interfaces\libpq\libpq.rc.in'))
- {
- print "Generating libpq.rc...\n";
- my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
- my $d = ($year - 100) . "$yday";
- open(I,'<', 'src\interfaces\libpq\libpq.rc.in') || confess "Could not open libpq.rc.in";
- open(O,'>', 'src\interfaces\libpq\libpq.rc') || confess "Could not open libpq.rc";
- while (<I>)
- {
- s/(VERSION.*),0/$1,$d/;
- print O;
- }
- close(I);
- close(O);
- }
-
- if (IsNewer('src\bin\psql\sql_help.h','src\bin\psql\create_help.pl'))
- {
- print "Generating sql_help.h...\n";
- chdir('src\bin\psql');
- system("perl create_help.pl ../../../doc/src/sgml/ref sql_help");
- chdir('..\..\..');
- }
-
- if (IsNewer('src\interfaces\ecpg\preproc\preproc.y','src\backend\parser\gram.y'))
- {
- print "Generating preproc.y...\n";
- chdir('src\interfaces\ecpg\preproc');
- system('perl parse.pl < ..\..\..\backend\parser\gram.y > preproc.y');
- chdir('..\..\..\..');
- }
-
- if (
- IsNewer(
- 'src\interfaces\ecpg\include\ecpg_config.h',
- 'src\interfaces\ecpg\include\ecpg_config.h.in'
- )
- )
- {
- print "Generating ecpg_config.h...\n";
- open(O,'>','src\interfaces\ecpg\include\ecpg_config.h')
- || confess "Could not open ecpg_config.h";
- print O <<EOF;
+ );
+ }
+
+ if (IsNewer('src\interfaces\libpq\libpq.rc','src\interfaces\libpq\libpq.rc.in'))
+ {
+ print "Generating libpq.rc...\n";
+ my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time);
+ my $d = ($year - 100) . "$yday";
+ open(I,'<', 'src\interfaces\libpq\libpq.rc.in')
+ || confess "Could not open libpq.rc.in";
+ open(O,'>', 'src\interfaces\libpq\libpq.rc') || confess "Could not open libpq.rc";
+ while (<I>)
+ {
+ s/(VERSION.*),0/$1,$d/;
+ print O;
+ }
+ close(I);
+ close(O);
+ }
+
+ if (IsNewer('src\bin\psql\sql_help.h','src\bin\psql\create_help.pl'))
+ {
+ print "Generating sql_help.h...\n";
+ chdir('src\bin\psql');
+ system("perl create_help.pl ../../../doc/src/sgml/ref sql_help");
+ chdir('..\..\..');
+ }
+
+ if (IsNewer('src\interfaces\ecpg\preproc\preproc.y','src\backend\parser\gram.y'))
+ {
+ print "Generating preproc.y...\n";
+ chdir('src\interfaces\ecpg\preproc');
+ system('perl parse.pl < ..\..\..\backend\parser\gram.y > preproc.y');
+ chdir('..\..\..\..');
+ }
+
+ if (
+ IsNewer(
+ 'src\interfaces\ecpg\include\ecpg_config.h',
+ 'src\interfaces\ecpg\include\ecpg_config.h.in'
+ )
+ )
+ {
+ print "Generating ecpg_config.h...\n";
+ open(O,'>','src\interfaces\ecpg\include\ecpg_config.h')
+ || confess "Could not open ecpg_config.h";
+ print O <<EOF;
#if (_MSC_VER > 1200)
#define HAVE_LONG_LONG_INT_64
#define ENABLE_THREAD_SAFETY 1
EOF
- print O "#define USE_INTEGER_DATETIMES 1\n" if ($self->{options}->{integer_datetimes});
- print O "#endif\n";
- close(O);
- }
-
- unless (-f "src\\port\\pg_config_paths.h")
- {
- print "Generating pg_config_paths.h...\n";
- open(O,'>', 'src\port\pg_config_paths.h') || confess "Could not open pg_config_paths.h";
- print O <<EOF;
+ print O "#define USE_INTEGER_DATETIMES 1\n"
+ if ($self->{options}->{integer_datetimes});
+ print O "#endif\n";
+ close(O);
+ }
+
+ unless (-f "src\\port\\pg_config_paths.h")
+ {
+ print "Generating pg_config_paths.h...\n";
+ open(O,'>', 'src\port\pg_config_paths.h')
+ || confess "Could not open pg_config_paths.h";
+ print O <<EOF;
#define PGBINDIR "/bin"
#define PGSHAREDIR "/share"
#define SYSCONFDIR "/etc"
@@ -365,149 +378,150 @@ EOF
#define HTMLDIR "/doc"
#define MANDIR "/man"
EOF
- close(O);
- }
-
- my $mf = Project::read_file('src\backend\catalog\Makefile');
- $mf =~ s{\\s*[\r\n]+}{}mg;
- $mf =~ /^POSTGRES_BKI_SRCS\s*:?=[^,]+,(.*)\)$/gm
- || croak "Could not find POSTGRES_BKI_SRCS in Makefile\n";
- my @allbki = split /\s+/, $1;
- foreach my $bki (@allbki)
- {
- next if $bki eq "";
- if (IsNewer('src/backend/catalog/postgres.bki', "src/include/catalog/$bki"))
- {
- print "Generating postgres.bki and schemapg.h...\n";
- chdir('src\backend\catalog');
- my $bki_srcs = join(' ../../../src/include/catalog/', @allbki);
- system(
+ close(O);
+ }
+
+ my $mf = Project::read_file('src\backend\catalog\Makefile');
+ $mf =~ s{\\s*[\r\n]+}{}mg;
+ $mf =~ /^POSTGRES_BKI_SRCS\s*:?=[^,]+,(.*)\)$/gm
+ || croak "Could not find POSTGRES_BKI_SRCS in Makefile\n";
+ my @allbki = split /\s+/, $1;
+ foreach my $bki (@allbki)
+ {
+ next if $bki eq "";
+ if (IsNewer('src/backend/catalog/postgres.bki', "src/include/catalog/$bki"))
+ {
+ print "Generating postgres.bki and schemapg.h...\n";
+ chdir('src\backend\catalog');
+ my $bki_srcs = join(' ../../../src/include/catalog/', @allbki);
+ system(
"perl genbki.pl -I../../../src/include/catalog --set-version=$self->{majorver} $bki_srcs"
- );
- chdir('..\..\..');
- copyFile('src\backend\catalog\schemapg.h', 'src\include\catalog\schemapg.h');
- last;
- }
- }
-
- open(O, ">doc/src/sgml/version.sgml") || croak "Could not write to version.sgml\n";
- print O <<EOF;
+ );
+ chdir('..\..\..');
+ copyFile('src\backend\catalog\schemapg.h',
+ 'src\include\catalog\schemapg.h');
+ last;
+ }
+ }
+
+ open(O, ">doc/src/sgml/version.sgml") || croak "Could not write to version.sgml\n";
+ print O <<EOF;
<!ENTITY version "$self->{strver}">
<!ENTITY majorversion "$self->{majorver}">
EOF
- close(O);
+ close(O);
}
sub GenerateDefFile
{
- my ($self, $deffile, $txtfile, $libname) = @_;
-
- if (IsNewer($deffile,$txtfile))
- {
- print "Generating $deffile...\n";
- open(I,$txtfile) || confess("Could not open $txtfile\n");
- open(O,">$deffile") || confess("Could not open $deffile\n");
- print O "LIBRARY $libname\nEXPORTS\n";
- while (<I>)
- {
- next if (/^#/);
- next if (/^\s*$/);
- my ($f, $o) = split;
- print O " $f @ $o\n";
- }
- close(O);
- close(I);
- }
+ my ($self, $deffile, $txtfile, $libname) = @_;
+
+ if (IsNewer($deffile,$txtfile))
+ {
+ print "Generating $deffile...\n";
+ open(I,$txtfile) || confess("Could not open $txtfile\n");
+ open(O,">$deffile") || confess("Could not open $deffile\n");
+ print O "LIBRARY $libname\nEXPORTS\n";
+ while (<I>)
+ {
+ next if (/^#/);
+ next if (/^\s*$/);
+ my ($f, $o) = split;
+ print O " $f @ $o\n";
+ }
+ close(O);
+ close(I);
+ }
}
sub AddProject
{
- my ($self, $name, $type, $folder, $initialdir) = @_;
-
- my $proj = VSObjectFactory::CreateProject($self->{vcver}, $name, $type, $self);
- push @{$self->{projects}->{$folder}}, $proj;
- $proj->AddDir($initialdir) if ($initialdir);
- if ($self->{options}->{zlib})
- {
- $proj->AddIncludeDir($self->{options}->{zlib} . '\include');
- $proj->AddLibrary($self->{options}->{zlib} . '\lib\zdll.lib');
- }
- if ($self->{options}->{openssl})
- {
- $proj->AddIncludeDir($self->{options}->{openssl} . '\include');
- $proj->AddLibrary($self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
- $proj->AddLibrary($self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
- }
- if ($self->{options}->{nls})
- {
- $proj->AddIncludeDir($self->{options}->{nls} . '\include');
- $proj->AddLibrary($self->{options}->{nls} . '\lib\libintl.lib');
- }
- if ($self->{options}->{krb5})
- {
- $proj->AddIncludeDir($self->{options}->{krb5} . '\inc\krb5');
- $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\krb5_32.lib');
- $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\comerr32.lib');
- $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\gssapi32.lib');
- }
- if ($self->{options}->{iconv})
- {
- $proj->AddIncludeDir($self->{options}->{iconv} . '\include');
- $proj->AddLibrary($self->{options}->{iconv} . '\lib\iconv.lib');
- }
- if ($self->{options}->{xml})
- {
- $proj->AddIncludeDir($self->{options}->{xml} . '\include');
- $proj->AddLibrary($self->{options}->{xml} . '\lib\libxml2.lib');
- }
- if ($self->{options}->{xslt})
- {
- $proj->AddIncludeDir($self->{options}->{xslt} . '\include');
- $proj->AddLibrary($self->{options}->{xslt} . '\lib\libxslt.lib');
- }
- return $proj;
+ my ($self, $name, $type, $folder, $initialdir) = @_;
+
+ my $proj = VSObjectFactory::CreateProject($self->{vcver}, $name, $type, $self);
+ push @{$self->{projects}->{$folder}}, $proj;
+ $proj->AddDir($initialdir) if ($initialdir);
+ if ($self->{options}->{zlib})
+ {
+ $proj->AddIncludeDir($self->{options}->{zlib} . '\include');
+ $proj->AddLibrary($self->{options}->{zlib} . '\lib\zdll.lib');
+ }
+ if ($self->{options}->{openssl})
+ {
+ $proj->AddIncludeDir($self->{options}->{openssl} . '\include');
+ $proj->AddLibrary($self->{options}->{openssl} . '\lib\VC\ssleay32.lib', 1);
+ $proj->AddLibrary($self->{options}->{openssl} . '\lib\VC\libeay32.lib', 1);
+ }
+ if ($self->{options}->{nls})
+ {
+ $proj->AddIncludeDir($self->{options}->{nls} . '\include');
+ $proj->AddLibrary($self->{options}->{nls} . '\lib\libintl.lib');
+ }
+ if ($self->{options}->{krb5})
+ {
+ $proj->AddIncludeDir($self->{options}->{krb5} . '\inc\krb5');
+ $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\krb5_32.lib');
+ $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\comerr32.lib');
+ $proj->AddLibrary($self->{options}->{krb5} . '\lib\i386\gssapi32.lib');
+ }
+ if ($self->{options}->{iconv})
+ {
+ $proj->AddIncludeDir($self->{options}->{iconv} . '\include');
+ $proj->AddLibrary($self->{options}->{iconv} . '\lib\iconv.lib');
+ }
+ if ($self->{options}->{xml})
+ {
+ $proj->AddIncludeDir($self->{options}->{xml} . '\include');
+ $proj->AddLibrary($self->{options}->{xml} . '\lib\libxml2.lib');
+ }
+ if ($self->{options}->{xslt})
+ {
+ $proj->AddIncludeDir($self->{options}->{xslt} . '\include');
+ $proj->AddLibrary($self->{options}->{xslt} . '\lib\libxslt.lib');
+ }
+ return $proj;
}
sub Save
{
- my ($self) = @_;
- my %flduid;
-
- $self->GenerateFiles();
- foreach my $fld (keys %{$self->{projects}})
- {
- foreach my $proj (@{$self->{projects}->{$fld}})
- {
- $proj->Save();
- }
- }
-
- open(SLN,">pgsql.sln") || croak "Could not write to pgsql.sln\n";
- print SLN <<EOF;
+ my ($self) = @_;
+ my %flduid;
+
+ $self->GenerateFiles();
+ foreach my $fld (keys %{$self->{projects}})
+ {
+ foreach my $proj (@{$self->{projects}->{$fld}})
+ {
+ $proj->Save();
+ }
+ }
+
+ open(SLN,">pgsql.sln") || croak "Could not write to pgsql.sln\n";
+ print SLN <<EOF;
Microsoft Visual Studio Solution File, Format Version $self->{solutionFileVersion}
# $self->{visualStudioName}
EOF
- foreach my $fld (keys %{$self->{projects}})
- {
- foreach my $proj (@{$self->{projects}->{$fld}})
- {
- print SLN <<EOF;
+ foreach my $fld (keys %{$self->{projects}})
+ {
+ foreach my $proj (@{$self->{projects}->{$fld}})
+ {
+ print SLN <<EOF;
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}$proj->{filenameExtension}", "$proj->{guid}"
EndProject
EOF
- }
- if ($fld ne "")
- {
- $flduid{$fld} = Win32::GuidGen();
- print SLN <<EOF;
+ }
+ if ($fld ne "")
+ {
+ $flduid{$fld} = Win32::GuidGen();
+ print SLN <<EOF;
Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "$fld", "$fld", "$flduid{$fld}"
EndProject
EOF
- }
- }
+ }
+ }
- print SLN <<EOF;
+ print SLN <<EOF;
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|$self->{platform}= Debug|$self->{platform}
@@ -516,20 +530,20 @@ Global
GlobalSection(ProjectConfigurationPlatforms) = postSolution
EOF
- foreach my $fld (keys %{$self->{projects}})
- {
- foreach my $proj (@{$self->{projects}->{$fld}})
- {
- print SLN <<EOF;
+ foreach my $fld (keys %{$self->{projects}})
+ {
+ foreach my $proj (@{$self->{projects}->{$fld}})
+ {
+ print SLN <<EOF;
$proj->{guid}.Debug|$self->{platform}.ActiveCfg = Debug|$self->{platform}
$proj->{guid}.Debug|$self->{platform}.Build.0 = Debug|$self->{platform}
$proj->{guid}.Release|$self->{platform}.ActiveCfg = Release|$self->{platform}
$proj->{guid}.Release|$self->{platform}.Build.0 = Release|$self->{platform}
EOF
- }
- }
+ }
+ }
- print SLN <<EOF;
+ print SLN <<EOF;
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@@ -537,42 +551,42 @@ EOF
GlobalSection(NestedProjects) = preSolution
EOF
- foreach my $fld (keys %{$self->{projects}})
- {
- next if ($fld eq "");
- foreach my $proj (@{$self->{projects}->{$fld}})
- {
- print SLN "\t\t$proj->{guid} = $flduid{$fld}\n";
- }
- }
+ foreach my $fld (keys %{$self->{projects}})
+ {
+ next if ($fld eq "");
+ foreach my $proj (@{$self->{projects}->{$fld}})
+ {
+ print SLN "\t\t$proj->{guid} = $flduid{$fld}\n";
+ }
+ }
- print SLN <<EOF;
+ print SLN <<EOF;
EndGlobalSection
EndGlobal
EOF
- close(SLN);
+ close(SLN);
}
sub GetFakeConfigure
{
- my $self = shift;
-
- my $cfg = '--enable-thread-safety';
- $cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
- $cfg .= ' --enable-integer-datetimes' if ($self->{options}->{integer_datetimes});
- $cfg .= ' --enable-nls' if ($self->{options}->{nls});
- $cfg .= ' --with-ldap' if ($self->{options}->{ldap});
- $cfg .= ' --without-zlib' unless ($self->{options}->{zlib});
- $cfg .= ' --with-openssl' if ($self->{options}->{ssl});
- $cfg .= ' --with-ossp-uuid' if ($self->{options}->{uuid});
- $cfg .= ' --with-libxml' if ($self->{options}->{xml});
- $cfg .= ' --with-libxslt' if ($self->{options}->{xslt});
- $cfg .= ' --with-krb5' if ($self->{options}->{krb5});
- $cfg .= ' --with-tcl' if ($self->{options}->{tcl});
- $cfg .= ' --with-perl' if ($self->{options}->{perl});
- $cfg .= ' --with-python' if ($self->{options}->{python});
-
- return $cfg;
+ my $self = shift;
+
+ my $cfg = '--enable-thread-safety';
+ $cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
+ $cfg .= ' --enable-integer-datetimes' if ($self->{options}->{integer_datetimes});
+ $cfg .= ' --enable-nls' if ($self->{options}->{nls});
+ $cfg .= ' --with-ldap' if ($self->{options}->{ldap});
+ $cfg .= ' --without-zlib' unless ($self->{options}->{zlib});
+ $cfg .= ' --with-openssl' if ($self->{options}->{ssl});
+ $cfg .= ' --with-ossp-uuid' if ($self->{options}->{uuid});
+ $cfg .= ' --with-libxml' if ($self->{options}->{xml});
+ $cfg .= ' --with-libxslt' if ($self->{options}->{xslt});
+ $cfg .= ' --with-krb5' if ($self->{options}->{krb5});
+ $cfg .= ' --with-tcl' if ($self->{options}->{tcl});
+ $cfg .= ' --with-perl' if ($self->{options}->{perl});
+ $cfg .= ' --with-python' if ($self->{options}->{python});
+
+ return $cfg;
}
package VS2005Solution;
@@ -587,15 +601,15 @@ use base qw(Solution);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{solutionFileVersion} = '9.00';
- $self->{vcver} = '8.00';
- $self->{visualStudioName} = 'Visual Studio 2005';
+ $self->{solutionFileVersion} = '9.00';
+ $self->{vcver} = '8.00';
+ $self->{visualStudioName} = 'Visual Studio 2005';
- return $self;
+ return $self;
}
package VS2008Solution;
@@ -610,15 +624,15 @@ use base qw(Solution);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{solutionFileVersion} = '10.00';
- $self->{vcver} = '9.00';
- $self->{visualStudioName} = 'Visual Studio 2008';
+ $self->{solutionFileVersion} = '10.00';
+ $self->{vcver} = '9.00';
+ $self->{visualStudioName} = 'Visual Studio 2008';
- return $self;
+ return $self;
}
package VS2010Solution;
@@ -634,15 +648,15 @@ use base qw(Solution);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{solutionFileVersion} = '11.00';
- $self->{vcver} = '10.00';
- $self->{visualStudioName} = 'Visual Studio 2010';
+ $self->{solutionFileVersion} = '11.00';
+ $self->{vcver} = '10.00';
+ $self->{visualStudioName} = 'Visual Studio 2010';
- return $self;
+ return $self;
}
1;
diff --git a/src/tools/msvc/VCBuildProject.pm b/src/tools/msvc/VCBuildProject.pm
index 97439d9d5c..a7fd0c0d9d 100644
--- a/src/tools/msvc/VCBuildProject.pm
+++ b/src/tools/msvc/VCBuildProject.pm
@@ -13,124 +13,124 @@ use base qw(Project);
sub _new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{filenameExtension} = '.vcproj';
+ $self->{filenameExtension} = '.vcproj';
- return $self;
+ return $self;
}
sub WriteHeader
{
- my ($self, $f) = @_;
+ my ($self, $f) = @_;
- print $f <<EOF;
+ print $f <<EOF;
<?xml version="1.0" encoding="Windows-1252"?>
<VisualStudioProject ProjectType="Visual C++" Version="$self->{vcver}" Name="$self->{name}" ProjectGUID="$self->{guid}">
<Platforms><Platform Name="$self->{platform}"/></Platforms>
<Configurations>
EOF
- $self->WriteConfiguration($f, 'Debug',
- { defs=>'_DEBUG;DEBUG=1;', wholeopt=>0, opt=>0, strpool=>'false', runtime=>3 });
- $self->WriteConfiguration($f, 'Release',
- { defs=>'', wholeopt=>0, opt=>3, strpool=>'true', runtime=>2 });
- print $f <<EOF;
+ $self->WriteConfiguration($f, 'Debug',
+ {defs=>'_DEBUG;DEBUG=1;', wholeopt=>0, opt=>0, strpool=>'false', runtime=>3});
+ $self->WriteConfiguration($f, 'Release',
+ {defs=>'', wholeopt=>0, opt=>3, strpool=>'true', runtime=>2});
+ print $f <<EOF;
</Configurations>
EOF
- $self->WriteReferences($f);
+ $self->WriteReferences($f);
}
sub WriteFiles
{
- my ($self, $f) = @_;
- print $f <<EOF;
+ my ($self, $f) = @_;
+ print $f <<EOF;
<Files>
EOF
- my @dirstack = ();
- my %uniquefiles;
- foreach my $fileNameWithPath (sort keys %{ $self->{files} })
- {
- confess "Bad format filename '$fileNameWithPath'\n"
- unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
- my $dir = $1;
- my $file = $2;
-
- # Walk backwards down the directory stack and close any dirs we're done with
- while ($#dirstack >= 0)
- {
- if (join('\\',@dirstack) eq substr($dir, 0, length(join('\\',@dirstack))))
- {
- last if (length($dir) == length(join('\\',@dirstack)));
- last if (substr($dir, length(join('\\',@dirstack)),1) eq '\\');
- }
- print $f ' ' x $#dirstack . " </Filter>\n";
- pop @dirstack;
- }
-
- # Now walk forwards and create whatever directories are needed
- while (join('\\',@dirstack) ne $dir)
- {
- my $left = substr($dir, length(join('\\',@dirstack)));
- $left =~ s/^\\//;
- my @pieces = split /\\/, $left;
- push @dirstack, $pieces[0];
- print $f ' ' x $#dirstack . " <Filter Name=\"$pieces[0]\" Filter=\"\">\n";
- }
-
- print $f ' ' x $#dirstack . " <File RelativePath=\"$fileNameWithPath\"";
- if ($fileNameWithPath =~ /\.y$/)
- {
- my $of = $fileNameWithPath;
- $of =~ s/\.y$/.c/;
- $of =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
- print $f '>'
- . $self->GenerateCustomTool('Running bison on ' . $fileNameWithPath,
- "perl src\\tools\\msvc\\pgbison.pl $fileNameWithPath", $of)
- . '</File>' . "\n";
- }
- elsif ($fileNameWithPath =~ /\.l$/)
- {
- my $of = $fileNameWithPath;
- $of =~ s/\.l$/.c/;
- print $f '>'
- . $self->GenerateCustomTool('Running flex on ' . $fileNameWithPath,
- "perl src\\tools\\msvc\\pgflex.pl $fileNameWithPath", $of)
- . '</File>' . "\n";
- }
- elsif (defined($uniquefiles{$file}))
- {
-
- # File already exists, so fake a new name
- my $obj = $dir;
- $obj =~ s/\\/_/g;
- print $f
+ my @dirstack = ();
+ my %uniquefiles;
+ foreach my $fileNameWithPath (sort keys %{$self->{files}})
+ {
+ confess "Bad format filename '$fileNameWithPath'\n"
+ unless ($fileNameWithPath =~ /^(.*)\\([^\\]+)\.[r]?[cyl]$/);
+ my $dir = $1;
+ my $file = $2;
+
+ # Walk backwards down the directory stack and close any dirs we're done with
+ while ($#dirstack >= 0)
+ {
+ if (join('\\',@dirstack) eq substr($dir, 0, length(join('\\',@dirstack))))
+ {
+ last if (length($dir) == length(join('\\',@dirstack)));
+ last if (substr($dir, length(join('\\',@dirstack)),1) eq '\\');
+ }
+ print $f ' ' x $#dirstack . " </Filter>\n";
+ pop @dirstack;
+ }
+
+ # Now walk forwards and create whatever directories are needed
+ while (join('\\',@dirstack) ne $dir)
+ {
+ my $left = substr($dir, length(join('\\',@dirstack)));
+ $left =~ s/^\\//;
+ my @pieces = split /\\/, $left;
+ push @dirstack, $pieces[0];
+ print $f ' ' x $#dirstack . " <Filter Name=\"$pieces[0]\" Filter=\"\">\n";
+ }
+
+ print $f ' ' x $#dirstack . " <File RelativePath=\"$fileNameWithPath\"";
+ if ($fileNameWithPath =~ /\.y$/)
+ {
+ my $of = $fileNameWithPath;
+ $of =~ s/\.y$/.c/;
+ $of =~ s{^src\\pl\\plpgsql\\src\\gram.c$}{src\\pl\\plpgsql\\src\\pl_gram.c};
+ print $f '>'
+ . $self->GenerateCustomTool('Running bison on ' . $fileNameWithPath,
+ "perl src\\tools\\msvc\\pgbison.pl $fileNameWithPath", $of)
+ . '</File>' . "\n";
+ }
+ elsif ($fileNameWithPath =~ /\.l$/)
+ {
+ my $of = $fileNameWithPath;
+ $of =~ s/\.l$/.c/;
+ print $f '>'
+ . $self->GenerateCustomTool('Running flex on ' . $fileNameWithPath,
+ "perl src\\tools\\msvc\\pgflex.pl $fileNameWithPath", $of)
+ . '</File>' . "\n";
+ }
+ elsif (defined($uniquefiles{$file}))
+ {
+
+ # File already exists, so fake a new name
+ my $obj = $dir;
+ $obj =~ s/\\/_/g;
+ print $f
"><FileConfiguration Name=\"Debug|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\debug\\$self->{name}\\$obj"
- . "_$file.obj\" /></FileConfiguration><FileConfiguration Name=\"Release|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\release\\$self->{name}\\$obj"
- . "_$file.obj\" /></FileConfiguration></File>\n";
- }
- else
- {
- $uniquefiles{$file} = 1;
- print $f " />\n";
- }
- }
- while ($#dirstack >= 0)
- {
- print $f ' ' x $#dirstack . " </Filter>\n";
- pop @dirstack;
- }
- print $f <<EOF;
+ . "_$file.obj\" /></FileConfiguration><FileConfiguration Name=\"Release|$self->{platform}\"><Tool Name=\"VCCLCompilerTool\" ObjectFile=\".\\release\\$self->{name}\\$obj"
+ . "_$file.obj\" /></FileConfiguration></File>\n";
+ }
+ else
+ {
+ $uniquefiles{$file} = 1;
+ print $f " />\n";
+ }
+ }
+ while ($#dirstack >= 0)
+ {
+ print $f ' ' x $#dirstack . " </Filter>\n";
+ pop @dirstack;
+ }
+ print $f <<EOF;
</Files>
EOF
}
sub Footer
{
- my ($self, $f) = @_;
+ my ($self, $f) = @_;
- print $f <<EOF;
+ print $f <<EOF;
<Globals/>
</VisualStudioProject>
EOF
@@ -138,13 +138,13 @@ EOF
sub WriteConfiguration
{
- my ($self, $f, $cfgname, $p) = @_;
- my $cfgtype = ($self->{type} eq "exe")?1:($self->{type} eq "dll"?2:4);
- my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ' ');
+ my ($self, $f, $cfgname, $p) = @_;
+ my $cfgtype = ($self->{type} eq "exe")?1:($self->{type} eq "dll"?2:4);
+ my $libs = $self->GetAdditionalLinkerDependencies($cfgname, ' ');
- my $targetmachine = $self->{platform} eq 'Win32' ? 1 : 17;
+ my $targetmachine = $self->{platform} eq 'Win32' ? 1 : 17;
- print $f <<EOF;
+ print $f <<EOF;
<Configuration Name="$cfgname|$self->{platform}" OutputDirectory=".\\$cfgname\\$self->{name}" IntermediateDirectory=".\\$cfgname\\$self->{name}"
ConfigurationType="$cfgtype" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE" CharacterSet="2" WholeProgramOptimization="$p->{wholeopt}">
<Tool Name="VCCLCompilerTool" Optimization="$p->{opt}"
@@ -154,7 +154,7 @@ sub WriteConfiguration
RuntimeLibrary="$p->{runtime}" DisableSpecificWarnings="$self->{disablewarnings}"
AdditionalOptions="/MP"
EOF
- print $f <<EOF;
+ print $f <<EOF;
AssemblerOutput="0" AssemblerListingLocation=".\\$cfgname\\$self->{name}\\" ObjectFile=".\\$cfgname\\$self->{name}\\"
ProgramDataBaseFileName=".\\$cfgname\\$self->{name}\\" BrowseInformation="0"
WarningLevel="3" SuppressStartupBanner="TRUE" DebugInformationFormat="3" CompileAs="0"/>
@@ -166,59 +166,59 @@ EOF
GenerateMapFile="FALSE" MapFileName=".\\$cfgname\\$self->{name}\\$self->{name}.map"
SubSystem="1" TargetMachine="$targetmachine"
EOF
- if ($self->{disablelinkerwarnings})
- {
- print $f "\t\tAdditionalOptions=\"/ignore:$self->{disablelinkerwarnings}\"\n";
- }
- if ($self->{implib})
- {
- my $l = $self->{implib};
- $l =~ s/__CFGNAME__/$cfgname/g;
- print $f "\t\tImportLibrary=\"$l\"\n";
- }
- if ($self->{def})
- {
- my $d = $self->{def};
- $d =~ s/__CFGNAME__/$cfgname/g;
- print $f "\t\tModuleDefinitionFile=\"$d\"\n";
- }
-
- print $f "\t/>\n";
- print $f
+ if ($self->{disablelinkerwarnings})
+ {
+ print $f "\t\tAdditionalOptions=\"/ignore:$self->{disablelinkerwarnings}\"\n";
+ }
+ if ($self->{implib})
+ {
+ my $l = $self->{implib};
+ $l =~ s/__CFGNAME__/$cfgname/g;
+ print $f "\t\tImportLibrary=\"$l\"\n";
+ }
+ if ($self->{def})
+ {
+ my $d = $self->{def};
+ $d =~ s/__CFGNAME__/$cfgname/g;
+ print $f "\t\tModuleDefinitionFile=\"$d\"\n";
+ }
+
+ print $f "\t/>\n";
+ print $f
"\t<Tool Name=\"VCLibrarianTool\" OutputFile=\".\\$cfgname\\$self->{name}\\$self->{name}.lib\" IgnoreDefaultLibraryNames=\"libc\" />\n";
- print $f
- "\t<Tool Name=\"VCResourceCompilerTool\" AdditionalIncludeDirectories=\"src\\include\" />\n";
- if ($self->{builddef})
- {
- print $f
+ print $f
+"\t<Tool Name=\"VCResourceCompilerTool\" AdditionalIncludeDirectories=\"src\\include\" />\n";
+ if ($self->{builddef})
+ {
+ print $f
"\t<Tool Name=\"VCPreLinkEventTool\" Description=\"Generate DEF file\" CommandLine=\"perl src\\tools\\msvc\\gendef.pl $cfgname\\$self->{name} $self->{platform}\" />\n";
- }
- print $f <<EOF;
+ }
+ print $f <<EOF;
</Configuration>
EOF
}
sub WriteReferences
{
- my ($self, $f) = @_;
- print $f " <References>\n";
- foreach my $ref (@{$self->{references}})
- {
- print $f
+ my ($self, $f) = @_;
+ print $f " <References>\n";
+ foreach my $ref (@{$self->{references}})
+ {
+ print $f
" <ProjectReference ReferencedProjectIdentifier=\"$ref->{guid}\" Name=\"$ref->{name}\" />\n";
- }
- print $f " </References>\n";
+ }
+ print $f " </References>\n";
}
sub GenerateCustomTool
{
- my ($self, $desc, $tool, $output, $cfg) = @_;
- if (!defined($cfg))
- {
- return $self->GenerateCustomTool($desc, $tool, $output, 'Debug')
- .$self->GenerateCustomTool($desc, $tool, $output, 'Release');
- }
- return
+ my ($self, $desc, $tool, $output, $cfg) = @_;
+ if (!defined($cfg))
+ {
+ return $self->GenerateCustomTool($desc, $tool, $output, 'Debug')
+ .$self->GenerateCustomTool($desc, $tool, $output, 'Release');
+ }
+ return
"<FileConfiguration Name=\"$cfg|$self->{platform}\"><Tool Name=\"VCCustomBuildTool\" Description=\"$desc\" CommandLine=\"$tool\" AdditionalDependencies=\"\" Outputs=\"$output\" /></FileConfiguration>";
}
@@ -234,13 +234,13 @@ use base qw(VCBuildProject);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{vcver} = '8.00';
+ $self->{vcver} = '8.00';
- return $self;
+ return $self;
}
package VC2008Project;
@@ -255,13 +255,13 @@ use base qw(VCBuildProject);
sub new
{
- my $classname = shift;
- my $self = $classname->SUPER::_new(@_);
- bless($self, $classname);
+ my $classname = shift;
+ my $self = $classname->SUPER::_new(@_);
+ bless($self, $classname);
- $self->{vcver} = '9.00';
+ $self->{vcver} = '9.00';
- return $self;
+ return $self;
}
1;
diff --git a/src/tools/msvc/VSObjectFactory.pm b/src/tools/msvc/VSObjectFactory.pm
index 3bed922991..e222b04c68 100644
--- a/src/tools/msvc/VSObjectFactory.pm
+++ b/src/tools/msvc/VSObjectFactory.pm
@@ -22,101 +22,103 @@ our (@ISA, @EXPORT);
sub CreateSolution
{
- my $visualStudioVersion = shift;
+ my $visualStudioVersion = shift;
- if (!defined($visualStudioVersion))
- {
- $visualStudioVersion = DetermineVisualStudioVersion();
- }
+ if (!defined($visualStudioVersion))
+ {
+ $visualStudioVersion = DetermineVisualStudioVersion();
+ }
- if ($visualStudioVersion eq '8.00')
- {
- return new VS2005Solution(@_);
- }
- elsif ($visualStudioVersion eq '9.00')
- {
- return new VS2008Solution(@_);
- }
- elsif ($visualStudioVersion eq '10.00')
- {
- return new VS2010Solution(@_);
- }
- else
- {
- croak "The requested Visual Studio version is not supported.";
- }
+ if ($visualStudioVersion eq '8.00')
+ {
+ return new VS2005Solution(@_);
+ }
+ elsif ($visualStudioVersion eq '9.00')
+ {
+ return new VS2008Solution(@_);
+ }
+ elsif ($visualStudioVersion eq '10.00')
+ {
+ return new VS2010Solution(@_);
+ }
+ else
+ {
+ croak "The requested Visual Studio version is not supported.";
+ }
}
sub CreateProject
{
- my $visualStudioVersion = shift;
+ my $visualStudioVersion = shift;
- if (!defined($visualStudioVersion))
- {
- $visualStudioVersion = DetermineVisualStudioVersion();
- }
+ if (!defined($visualStudioVersion))
+ {
+ $visualStudioVersion = DetermineVisualStudioVersion();
+ }
- if ($visualStudioVersion eq '8.00')
- {
- return new VC2005Project(@_);
- }
- elsif ($visualStudioVersion eq '9.00')
- {
- return new VC2008Project(@_);
- }
- elsif ($visualStudioVersion eq '10.00')
- {
- return new VC2010Project(@_);
- }
- else
- {
- croak "The requested Visual Studio version is not supported.";
- }
+ if ($visualStudioVersion eq '8.00')
+ {
+ return new VC2005Project(@_);
+ }
+ elsif ($visualStudioVersion eq '9.00')
+ {
+ return new VC2008Project(@_);
+ }
+ elsif ($visualStudioVersion eq '10.00')
+ {
+ return new VC2010Project(@_);
+ }
+ else
+ {
+ croak "The requested Visual Studio version is not supported.";
+ }
}
sub DetermineVisualStudioVersion
{
- my $nmakeVersion = shift;
+ my $nmakeVersion = shift;
- if (!defined($nmakeVersion))
- {
+ if (!defined($nmakeVersion))
+ {
- # Determine version of nmake command, to set proper version of visual studio
- # we use nmake as it has existed for a long time and still exists in visual studio 2010
- open(P,"nmake /? 2>&1 |")
- || croak "Unable to determine Visual Studio version: The nmake command wasn't found.";
- while(<P>)
- {
- chomp;
- if (/(\d+)\.(\d+)\.\d+(\.\d+)?$/)
- {
- return _GetVisualStudioVersion($1, $2);
- }
- }
- close(P);
- }
- elsif($nmakeVersion =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/)
- {
- return _GetVisualStudioVersion($1, $2);
- }
- croak "Unable to determine Visual Studio version: The nmake version could not be determined.";
+ # Determine version of nmake command, to set proper version of visual studio
+ # we use nmake as it has existed for a long time and still exists in visual studio 2010
+ open(P,"nmake /? 2>&1 |")
+ || croak
+ "Unable to determine Visual Studio version: The nmake command wasn't found.";
+ while(<P>)
+ {
+ chomp;
+ if (/(\d+)\.(\d+)\.\d+(\.\d+)?$/)
+ {
+ return _GetVisualStudioVersion($1, $2);
+ }
+ }
+ close(P);
+ }
+ elsif($nmakeVersion =~ /(\d+)\.(\d+)\.\d+(\.\d+)?$/)
+ {
+ return _GetVisualStudioVersion($1, $2);
+ }
+ croak
+ "Unable to determine Visual Studio version: The nmake version could not be determined.";
}
sub _GetVisualStudioVersion
{
- my($major, $minor) = @_;
- if ($major > 10)
- {
- carp
+ my($major, $minor) = @_;
+ if ($major > 10)
+ {
+ carp
"The determined version of Visual Studio is newer than the latest supported version. Returning the latest supported version instead.";
- return '10.00';
- }
- elsif ($major < 6)
- {
- croak
+ return '10.00';
+ }
+ elsif ($major < 6)
+ {
+ croak
"Unable to determine Visual Studio version: Visual Studio versions before 6.0 aren't supported.";
- }
- return "$major.$minor";
+ }
+ return "$major.$minor";
}
1;
diff --git a/src/tools/msvc/build.pl b/src/tools/msvc/build.pl
index 151849bba5..4fa309738b 100644
--- a/src/tools/msvc/build.pl
+++ b/src/tools/msvc/build.pl
@@ -5,7 +5,7 @@
BEGIN
{
- chdir("../../..") if (-d "../msvc" && -d "../../../src");
+ chdir("../../..") if (-d "../msvc" && -d "../../../src");
}
@@ -19,13 +19,13 @@ use Mkvcbuild;
# it should contain lines like:
# $ENV{PATH} = "c:/path/to/bison/bin;$ENV{PATH}";
-if ( -e "src/tools/msvc/buildenv.pl")
+if (-e "src/tools/msvc/buildenv.pl")
{
- require "src/tools/msvc/buildenv.pl";
+ require "src/tools/msvc/buildenv.pl";
}
-elsif (-e "./buildenv.pl" )
+elsif (-e "./buildenv.pl")
{
- require "./buildenv.pl";
+ require "./buildenv.pl";
}
# set up the project
@@ -41,26 +41,26 @@ my $bconf = $ENV{CONFIG} || "Release";
my $buildwhat = $ARGV[1] || "";
if ($ARGV[0] eq 'DEBUG')
{
- $bconf = "Debug";
+ $bconf = "Debug";
}
elsif ($ARGV[0] ne "RELEASE")
{
- $buildwhat = $ARGV[0] || "";
+ $buildwhat = $ARGV[0] || "";
}
# ... and do it
if ($buildwhat and $vcver eq '10.00')
{
- system("msbuild $buildwhat.vcxproj /verbosity:detailed /p:Configuration=$bconf");
+ system("msbuild $buildwhat.vcxproj /verbosity:detailed /p:Configuration=$bconf");
}
elsif ($buildwhat)
{
- system("vcbuild $buildwhat.vcproj $bconf");
+ system("vcbuild $buildwhat.vcproj $bconf");
}
else
{
- system("msbuild pgsql.sln /verbosity:detailed /p:Configuration=$bconf");
+ system("msbuild pgsql.sln /verbosity:detailed /p:Configuration=$bconf");
}
# report status
diff --git a/src/tools/msvc/builddoc.pl b/src/tools/msvc/builddoc.pl
index 8d8d8a35cc..b567f542a7 100644
--- a/src/tools/msvc/builddoc.pl
+++ b/src/tools/msvc/builddoc.pl
@@ -26,7 +26,7 @@ die "bad DOCROOT '$docroot'" unless ($docroot && -d $docroot);
my @notfound;
foreach my $dir ('docbook', $openjade, $dsssl)
{
- push(@notfound,$dir) unless -d "$docroot/$dir";
+ push(@notfound,$dir) unless -d "$docroot/$dir";
}
missing() if @notfound;
@@ -94,28 +94,28 @@ exit;
sub renamefiles
{
- # Rename ISO entity files
- my $savedir = getcwd();
- chdir "$docroot/docbook";
- foreach my $f (glob('ISO*'))
- {
- next if $f =~ /\.gml$/i;
- my $nf = $f;
- $nf =~ s/ISO(.*)/ISO-$1.gml/;
- move $f, $nf;
- }
- chdir $savedir;
+ # Rename ISO entity files
+ my $savedir = getcwd();
+ chdir "$docroot/docbook";
+ foreach my $f (glob('ISO*'))
+ {
+ next if $f =~ /\.gml$/i;
+ my $nf = $f;
+ $nf =~ s/ISO(.*)/ISO-$1.gml/;
+ move $f, $nf;
+ }
+ chdir $savedir;
}
sub missing
{
- print STDERR "could not find $docroot/$_\n" foreach (@notfound);
- exit 1;
+ print STDERR "could not find $docroot/$_\n" foreach (@notfound);
+ exit 1;
}
sub noversion
{
- print STDERR "Could not find version.sgml. ","Please run mkvcbuild.pl first!\n";
- exit 1;
+ print STDERR "Could not find version.sgml. ","Please run mkvcbuild.pl first!\n";
+ exit 1;
}
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index 971d740a9e..95e9cd93da 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -3,25 +3,25 @@ use strict;
use warnings;
our $config = {
- asserts=>0, # --enable-cassert
- # integer_datetimes=>1, # --enable-integer-datetimes - on is now default
- # float4byval=>1, # --disable-float4-byval, on by default
- # float8byval=>0, # --disable-float8-byval, off by default
- # blocksize => 8, # --with-blocksize, 8kB by default
- # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
- # wal_segsize => 16, # --with-wal-segsize, 16MB by default
- ldap=>1, # --with-ldap
- nls=>undef, # --enable-nls=<path>
- tcl=>undef, # --with-tls=<path>
- perl=>undef, # --with-perl
- python=>undef, # --with-python=<path>
- krb5=>undef, # --with-krb5=<path>
- openssl=>undef, # --with-ssl=<path>
- uuid=>undef, # --with-ossp-uuid
- xml=>undef, # --with-libxml=<path>
- xslt=>undef, # --with-libxslt=<path>
- iconv=>undef, # (not in configure, path to iconv)
- zlib=>undef # --with-zlib=<path>
+ asserts=>0, # --enable-cassert
+ # integer_datetimes=>1, # --enable-integer-datetimes - on is now default
+ # float4byval=>1, # --disable-float4-byval, on by default
+ # float8byval=>0, # --disable-float8-byval, off by default
+ # blocksize => 8, # --with-blocksize, 8kB by default
+ # wal_blocksize => 8, # --with-wal-blocksize, 8kB by default
+ # wal_segsize => 16, # --with-wal-segsize, 16MB by default
+ ldap=>1, # --with-ldap
+ nls=>undef, # --enable-nls=<path>
+ tcl=>undef, # --with-tls=<path>
+ perl=>undef, # --with-perl
+ python=>undef, # --with-python=<path>
+ krb5=>undef, # --with-krb5=<path>
+ openssl=>undef, # --with-ssl=<path>
+ uuid=>undef, # --with-ossp-uuid
+ xml=>undef, # --with-libxml=<path>
+ xslt=>undef, # --with-libxslt=<path>
+ iconv=>undef, # (not in configure, path to iconv)
+ zlib=>undef # --with-zlib=<path>
};
1;
diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl
index b8538dd79b..2fc8c4a290 100644
--- a/src/tools/msvc/gendef.pl
+++ b/src/tools/msvc/gendef.pl
@@ -13,40 +13,40 @@ my $platform = $ARGV[1];
if (-f "$ARGV[0]/$defname.def")
{
- print "Not re-generating $defname.DEF, file already exists.\n";
- exit(0);
+ print "Not re-generating $defname.DEF, file already exists.\n";
+ exit(0);
}
print "Generating $defname.DEF from directory $ARGV[0], platform $platform\n";
while (<$ARGV[0]/*.obj>)
{
- my $symfile = $_;
- $symfile=~ s/\.obj$/.sym/i;
- print ".";
- system("dumpbin /symbols /out:symbols.out $_ >NUL") && die "Could not call dumpbin";
- open(F, "<symbols.out") || die "Could not open symbols.out for $_\n";
- while (<F>)
- {
- s/\(\)//g;
- my @pieces = split;
- next unless $pieces[0] =~ /^[A-F0-9]{3,}$/;
- next unless $pieces[6];
- next if ($pieces[2] eq "UNDEF");
- next unless ($pieces[4] eq "External");
- next if $pieces[6] =~ /^@/;
- next if $pieces[6] =~ /^\(/;
- next if $pieces[6] =~ /^__real/;
- next if $pieces[6] =~ /^__imp/;
- next if $pieces[6] =~ /NULL_THUNK_DATA$/;
- next if $pieces[6] =~ /^__IMPORT_DESCRIPTOR/;
- next if $pieces[6] =~ /^__NULL_IMPORT/;
- next if $pieces[6] =~ /^\?\?_C/;
+ my $symfile = $_;
+ $symfile=~ s/\.obj$/.sym/i;
+ print ".";
+ system("dumpbin /symbols /out:symbols.out $_ >NUL") && die "Could not call dumpbin";
+ open(F, "<symbols.out") || die "Could not open symbols.out for $_\n";
+ while (<F>)
+ {
+ s/\(\)//g;
+ my @pieces = split;
+ next unless $pieces[0] =~ /^[A-F0-9]{3,}$/;
+ next unless $pieces[6];
+ next if ($pieces[2] eq "UNDEF");
+ next unless ($pieces[4] eq "External");
+ next if $pieces[6] =~ /^@/;
+ next if $pieces[6] =~ /^\(/;
+ next if $pieces[6] =~ /^__real/;
+ next if $pieces[6] =~ /^__imp/;
+ next if $pieces[6] =~ /NULL_THUNK_DATA$/;
+ next if $pieces[6] =~ /^__IMPORT_DESCRIPTOR/;
+ next if $pieces[6] =~ /^__NULL_IMPORT/;
+ next if $pieces[6] =~ /^\?\?_C/;
- push @def, $pieces[6];
- }
- close(F);
- rename("symbols.out",$symfile);
+ push @def, $pieces[6];
+ }
+ close(F);
+ rename("symbols.out",$symfile);
}
print "\n";
@@ -56,13 +56,13 @@ my $i = 0;
my $last = "";
foreach my $f (sort @def)
{
- next if ($f eq $last);
- $last = $f;
- $f =~ s/^_// unless ($platform eq "x64"); # win64 has new format of exports
- $i++;
+ next if ($f eq $last);
+ $last = $f;
+ $f =~ s/^_// unless ($platform eq "x64"); # win64 has new format of exports
+ $i++;
- # print DEF " $f \@ $i\n"; # ordinaled exports?
- print DEF " $f\n";
+ # print DEF " $f \@ $i\n"; # ordinaled exports?
+ print DEF " $f\n";
}
close(DEF);
print "Generated $i symbols\n";
diff --git a/src/tools/msvc/install.pl b/src/tools/msvc/install.pl
index 28563a930d..f27a7b3f16 100755
--- a/src/tools/msvc/install.pl
+++ b/src/tools/msvc/install.pl
@@ -13,6 +13,6 @@ Install($target);
sub Usage
{
- print "Usage: install.pl <targetdir>\n";
- exit(1);
+ print "Usage: install.pl <targetdir>\n";
+ exit(1);
}
diff --git a/src/tools/msvc/pgbison.pl b/src/tools/msvc/pgbison.pl
index c48863aff6..f0c9e26007 100644
--- a/src/tools/msvc/pgbison.pl
+++ b/src/tools/msvc/pgbison.pl
@@ -14,21 +14,21 @@ $bisonver=(split(/\s+/,$bisonver))[3]; # grab version number
unless ($bisonver eq '1.875' || $bisonver ge '2.2')
{
- print "WARNING! Bison install not found, or unsupported Bison version.\n";
- print "echo Attempting to build without.\n";
- exit 0;
+ print "WARNING! Bison install not found, or unsupported Bison version.\n";
+ print "echo Attempting to build without.\n";
+ exit 0;
}
my $input = shift;
if ($input !~ /\.y$/)
{
- print "Input must be a .y file\n";
- exit 1;
+ print "Input must be a .y file\n";
+ exit 1;
}
elsif (!-e $input)
{
- print "Input file $input not found\n";
- exit 1;
+ print "Input file $input not found\n";
+ exit 1;
}
(my $output = $input) =~ s/\.y$/.c/;
diff --git a/src/tools/msvc/pgflex.pl b/src/tools/msvc/pgflex.pl
index 7b9021d2ba..551b8f67ae 100644
--- a/src/tools/msvc/pgflex.pl
+++ b/src/tools/msvc/pgflex.pl
@@ -13,26 +13,26 @@ use File::Basename;
require 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
my ($flexver) = `flex -V`; # grab first line
-$flexver=(split(/\s+/,$flexver))[1];
-$flexver =~ s/[^0-9.]//g;
+$flexver=(split(/\s+/,$flexver))[1];
+$flexver =~ s/[^0-9.]//g;
my @verparts = split(/\./,$flexver);
unless ($verparts[0] == 2 && $verparts[1] == 5 && $verparts[2] >= 31)
{
- print "WARNING! Flex install not found, or unsupported Flex version.\n";
- print "echo Attempting to build without.\n";
- exit 0;
+ print "WARNING! Flex install not found, or unsupported Flex version.\n";
+ print "echo Attempting to build without.\n";
+ exit 0;
}
my $input = shift;
if ($input !~ /\.l$/)
{
- print "Input must be a .l file\n";
- exit 1;
+ print "Input must be a .l file\n";
+ exit 1;
}
elsif (!-e $input)
{
- print "Input file $input not found\n";
- exit 1;
+ print "Input file $input not found\n";
+ exit 1;
}
(my $output = $input) =~ s/\.l$/.c/;
@@ -50,25 +50,25 @@ system("flex $flexflags -o$output $input");
if ($? == 0)
{
- # For non-reentrant scanners we need to fix up the yywrap macro definition
- # to keep the MS compiler happy.
- # For reentrant scanners (like the core scanner) we do not
- # need to (and must not) change the yywrap definition.
- my $lfile;
- open($lfile,$input) || die "opening $input for reading: $!";
- my $lcode = <$lfile>;
- close($lfile);
- if ($lcode !~ /\%option\sreentrant/)
- {
- my $cfile;
- open($cfile,$output) || die "opening $output for reading: $!";
- my $ccode = <$cfile>;
- close($cfile);
- $ccode =~ s/yywrap\(n\)/yywrap()/;
- open($cfile,">$output") || die "opening $output for reading: $!";
- print $cfile $ccode;
- close($cfile);
- }
+ # For non-reentrant scanners we need to fix up the yywrap macro definition
+ # to keep the MS compiler happy.
+ # For reentrant scanners (like the core scanner) we do not
+ # need to (and must not) change the yywrap definition.
+ my $lfile;
+ open($lfile,$input) || die "opening $input for reading: $!";
+ my $lcode = <$lfile>;
+ close($lfile);
+ if ($lcode !~ /\%option\sreentrant/)
+ {
+ my $cfile;
+ open($cfile,$output) || die "opening $output for reading: $!";
+ my $ccode = <$cfile>;
+ close($cfile);
+ $ccode =~ s/yywrap\(n\)/yywrap()/;
+ open($cfile,">$output") || die "opening $output for reading: $!";
+ print $cfile $ccode;
+ close($cfile);
+ }
if ($flexflags =~ /\s-b\s/)
{
my $lexback = "lex.backup";
@@ -84,11 +84,11 @@ if ($? == 0)
unlink $lexback;
}
- exit 0;
+ exit 0;
}
else
{
- exit $? >> 8;
+ exit $? >> 8;
}
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index f81611bc3d..ef7035068b 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -20,19 +20,19 @@ require 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
# it should contian lines like:
# $ENV{PATH} = "c:/path/to/bison/bin;$ENV{PATH}";
-if ( -e "src/tools/msvc/buildenv.pl")
+if (-e "src/tools/msvc/buildenv.pl")
{
- require "src/tools/msvc/buildenv.pl";
+ require "src/tools/msvc/buildenv.pl";
}
my $what = shift || "";
if ($what =~ /^(check|installcheck|plcheck|contribcheck|ecpgcheck|isolationcheck)$/i)
{
- $what = uc $what;
+ $what = uc $what;
}
else
{
- usage();
+ usage();
}
# use a capital C here because config.pl has $config
@@ -48,8 +48,8 @@ $ENV{PATH} = "../../../$Config/libpq;../../$Config/libpq;$ENV{PATH}";
my $schedule = shift;
unless ($schedule)
{
- $schedule = "serial";
- $schedule = "parallel" if ($what eq 'CHECK' || $what =~ /PARALLEL/);
+ $schedule = "serial";
+ $schedule = "parallel" if ($what eq 'CHECK' || $what =~ /PARALLEL/);
}
my $topdir = getcwd();
@@ -67,12 +67,12 @@ $temp_config = "--temp-config=\"$ENV{TEMP_CONFIG}\""
chdir "src/test/regress";
my %command = (
- CHECK => \&check,
- PLCHECK => \&plcheck,
- INSTALLCHECK => \&installcheck,
- ECPGCHECK => \&ecpgcheck,
- CONTRIBCHECK => \&contribcheck,
- ISOLATIONCHECK => \&isolationcheck,
+ CHECK => \&check,
+ PLCHECK => \&plcheck,
+ INSTALLCHECK => \&installcheck,
+ ECPGCHECK => \&ecpgcheck,
+ CONTRIBCHECK => \&contribcheck,
+ ISOLATIONCHECK => \&isolationcheck,
);
my $proc = $command{$what};
@@ -87,229 +87,229 @@ exit 0;
sub installcheck
{
- my @args = (
- "../../../$Config/pg_regress/pg_regress","--dlpath=.",
- "--psqldir=../../../$Config/psql","--schedule=${schedule}_schedule",
- "--encoding=SQL_ASCII","--no-locale"
- );
- push(@args,$maxconn) if $maxconn;
- system(@args);
- my $status = $? >>8;
- exit $status if $status;
+ my @args = (
+ "../../../$Config/pg_regress/pg_regress","--dlpath=.",
+ "--psqldir=../../../$Config/psql","--schedule=${schedule}_schedule",
+ "--encoding=SQL_ASCII","--no-locale"
+ );
+ push(@args,$maxconn) if $maxconn;
+ system(@args);
+ my $status = $? >>8;
+ exit $status if $status;
}
sub check
{
- my @args = (
- "../../../$Config/pg_regress/pg_regress","--dlpath=.",
- "--psqldir=../../../$Config/psql","--schedule=${schedule}_schedule",
- "--encoding=SQL_ASCII","--no-locale",
- "--temp-install=./tmp_check","--top-builddir=\"$topdir\""
- );
- push(@args,$maxconn) if $maxconn;
- push(@args,$temp_config) if $temp_config;
- system(@args);
- my $status = $? >>8;
- exit $status if $status;
+ my @args = (
+ "../../../$Config/pg_regress/pg_regress","--dlpath=.",
+ "--psqldir=../../../$Config/psql","--schedule=${schedule}_schedule",
+ "--encoding=SQL_ASCII","--no-locale",
+ "--temp-install=./tmp_check","--top-builddir=\"$topdir\""
+ );
+ push(@args,$maxconn) if $maxconn;
+ push(@args,$temp_config) if $temp_config;
+ system(@args);
+ my $status = $? >>8;
+ exit $status if $status;
}
sub ecpgcheck
{
- chdir $startdir;
- system("msbuild ecpg_regression.proj /p:config=$Config");
- my $status = $? >>8;
- exit $status if $status;
- chdir "$topdir/src/interfaces/ecpg/test";
- $schedule="ecpg";
- my @args = (
- "../../../../$Config/pg_regress_ecpg/pg_regress_ecpg",
- "--psqldir=../../../$Config/psql",
- "--dbname=regress1,connectdb",
- "--create-role=connectuser,connectdb",
- "--schedule=${schedule}_schedule",
- "--encoding=SQL_ASCII",
- "--no-locale",
- "--temp-install=./tmp_chk",
- "--top-builddir=\"$topdir\""
- );
- push(@args,$maxconn) if $maxconn;
- system(@args);
- $status = $? >>8;
- exit $status if $status;
+ chdir $startdir;
+ system("msbuild ecpg_regression.proj /p:config=$Config");
+ my $status = $? >>8;
+ exit $status if $status;
+ chdir "$topdir/src/interfaces/ecpg/test";
+ $schedule="ecpg";
+ my @args = (
+ "../../../../$Config/pg_regress_ecpg/pg_regress_ecpg",
+ "--psqldir=../../../$Config/psql",
+ "--dbname=regress1,connectdb",
+ "--create-role=connectuser,connectdb",
+ "--schedule=${schedule}_schedule",
+ "--encoding=SQL_ASCII",
+ "--no-locale",
+ "--temp-install=./tmp_chk",
+ "--top-builddir=\"$topdir\""
+ );
+ push(@args,$maxconn) if $maxconn;
+ system(@args);
+ $status = $? >>8;
+ exit $status if $status;
}
sub isolationcheck
{
- chdir "../isolation";
- copy("../../../$Config/isolationtester/isolationtester.exe",".");
- my @args = (
- "../../../$Config/pg_isolation_regress/pg_isolation_regress",
- "--psqldir=../../../$Config/psql",
- "--inputdir=.","--schedule=./isolation_schedule"
- );
- push(@args,$maxconn) if $maxconn;
- system(@args);
- my $status = $? >>8;
- exit $status if $status;
+ chdir "../isolation";
+ copy("../../../$Config/isolationtester/isolationtester.exe",".");
+ my @args = (
+ "../../../$Config/pg_isolation_regress/pg_isolation_regress",
+ "--psqldir=../../../$Config/psql",
+ "--inputdir=.","--schedule=./isolation_schedule"
+ );
+ push(@args,$maxconn) if $maxconn;
+ system(@args);
+ my $status = $? >>8;
+ exit $status if $status;
}
sub plcheck
{
- chdir "../../pl";
-
- foreach my $pl (glob("*"))
- {
- next unless -d "$pl/sql" && -d "$pl/expected";
- my $lang = $pl eq 'tcl' ? 'pltcl' : $pl;
- next unless -d "../../$Config/$lang";
- $lang = 'plpythonu' if $lang eq 'plpython';
- my @lang_args = ("--load-extension=$lang");
- chdir $pl;
- my @tests = fetchTests();
- if ($lang eq 'plperl')
- {
-
- # run both trusted and untrusted perl tests
- push(@lang_args, "--load-extension=plperlu");
-
- # assume we're using this perl to built postgres
- # test if we can run two interpreters in one backend, and if so
- # run the trusted/untrusted interaction tests
- use Config;
- if ($Config{usemultiplicity} eq 'define')
- {
- push(@tests,'plperl_plperlu');
- }
- }
- print "============================================================\n";
- print "Checking $lang\n";
- my @args = (
- "../../../$Config/pg_regress/pg_regress",
- "--psqldir=../../../$Config/psql",
- "--dbname=pl_regression",@lang_args,@tests
- );
- system(@args);
- my $status = $? >> 8;
- exit $status if $status;
- chdir "..";
- }
-
- chdir "../../..";
+ chdir "../../pl";
+
+ foreach my $pl (glob("*"))
+ {
+ next unless -d "$pl/sql" && -d "$pl/expected";
+ my $lang = $pl eq 'tcl' ? 'pltcl' : $pl;
+ next unless -d "../../$Config/$lang";
+ $lang = 'plpythonu' if $lang eq 'plpython';
+ my @lang_args = ("--load-extension=$lang");
+ chdir $pl;
+ my @tests = fetchTests();
+ if ($lang eq 'plperl')
+ {
+
+ # run both trusted and untrusted perl tests
+ push(@lang_args, "--load-extension=plperlu");
+
+ # assume we're using this perl to built postgres
+ # test if we can run two interpreters in one backend, and if so
+ # run the trusted/untrusted interaction tests
+ use Config;
+ if ($Config{usemultiplicity} eq 'define')
+ {
+ push(@tests,'plperl_plperlu');
+ }
+ }
+ print "============================================================\n";
+ print "Checking $lang\n";
+ my @args = (
+ "../../../$Config/pg_regress/pg_regress",
+ "--psqldir=../../../$Config/psql",
+ "--dbname=pl_regression",@lang_args,@tests
+ );
+ system(@args);
+ my $status = $? >> 8;
+ exit $status if $status;
+ chdir "..";
+ }
+
+ chdir "../../..";
}
sub contribcheck
{
- chdir "../../../contrib";
- my $mstat = 0;
- foreach my $module (glob("*"))
- {
- next if ($module eq 'sepgsql');
- next if ($module eq 'xml2' && !$config->{xml});
- next
- unless -d "$module/sql"
- &&-d "$module/expected"
- &&(-f "$module/GNUmakefile" || -f "$module/Makefile");
- chdir $module;
- print "============================================================\n";
- print "Checking $module\n";
- my @tests = fetchTests();
- my @opts = fetchRegressOpts();
- my @args = (
- "../../$Config/pg_regress/pg_regress",
- "--psqldir=../../$Config/psql",
- "--dbname=contrib_regression",@opts,@tests
- );
- system(@args);
- my $status = $? >> 8;
- $mstat ||= $status;
- chdir "..";
- }
- exit $mstat if $mstat;
+ chdir "../../../contrib";
+ my $mstat = 0;
+ foreach my $module (glob("*"))
+ {
+ next if ($module eq 'sepgsql');
+ next if ($module eq 'xml2' && !$config->{xml});
+ next
+ unless -d "$module/sql"
+ &&-d "$module/expected"
+ &&(-f "$module/GNUmakefile" || -f "$module/Makefile");
+ chdir $module;
+ print "============================================================\n";
+ print "Checking $module\n";
+ my @tests = fetchTests();
+ my @opts = fetchRegressOpts();
+ my @args = (
+ "../../$Config/pg_regress/pg_regress",
+ "--psqldir=../../$Config/psql",
+ "--dbname=contrib_regression",@opts,@tests
+ );
+ system(@args);
+ my $status = $? >> 8;
+ $mstat ||= $status;
+ chdir "..";
+ }
+ exit $mstat if $mstat;
}
sub fetchRegressOpts
{
- my $handle;
- open($handle,"<GNUmakefile")
- || open($handle,"<Makefile")
- || die "Could not open Makefile";
- local($/) = undef;
- my $m = <$handle>;
- close($handle);
- my @opts;
- if ($m =~ /^\s*REGRESS_OPTS\s*=(.*)/m)
- {
-
- # ignore options that use makefile variables - can't handle those
- # ignore anything that isn't an option staring with --
- @opts = grep { $_ !~ /\$\(/ && $_ =~ /^--/ } split(/\s+/,$1);
- }
- if ($m =~ /^\s*ENCODING\s*=\s*(\S+)/m)
- {
- push @opts, "--encoding=$1";
- }
- if ($m =~ /^\s*NO_LOCALE\s*=\s*\S+/m)
- {
- push @opts, "--no-locale";
- }
- return @opts;
+ my $handle;
+ open($handle,"<GNUmakefile")
+ || open($handle,"<Makefile")
+ || die "Could not open Makefile";
+ local($/) = undef;
+ my $m = <$handle>;
+ close($handle);
+ my @opts;
+ if ($m =~ /^\s*REGRESS_OPTS\s*=(.*)/m)
+ {
+
+ # ignore options that use makefile variables - can't handle those
+ # ignore anything that isn't an option staring with --
+ @opts = grep { $_ !~ /\$\(/ && $_ =~ /^--/ } split(/\s+/,$1);
+ }
+ if ($m =~ /^\s*ENCODING\s*=\s*(\S+)/m)
+ {
+ push @opts, "--encoding=$1";
+ }
+ if ($m =~ /^\s*NO_LOCALE\s*=\s*\S+/m)
+ {
+ push @opts, "--no-locale";
+ }
+ return @opts;
}
sub fetchTests
{
- my $handle;
- open($handle,"<GNUmakefile")
- || open($handle,"<Makefile")
- || die "Could not open Makefile";
- local($/) = undef;
- my $m = <$handle>;
- close($handle);
- my $t = "";
-
- $m =~ s/\\[\r\n]*//gs;
- if ($m =~ /^REGRESS\s*=\s*(.*)$/gm)
- {
- $t = $1;
- $t =~ s/\s+/ /g;
-
- if ($m =~ /contrib\/pgcrypto/)
- {
-
- # pgcrypto is special since the tests depend on the
- # configuration of the build
-
- my $cftests =
- $config->{openssl}
- ?GetTests("OSSL_TESTS",$m)
- : GetTests("INT_TESTS",$m);
- my $pgptests =
- $config->{zlib}
- ?GetTests("ZLIB_TST",$m)
- : GetTests("ZLIB_OFF_TST",$m);
- $t =~ s/\$\(CF_TESTS\)/$cftests/;
- $t =~ s/\$\(CF_PGP_TESTS\)/$pgptests/;
- }
- }
-
- return split(/\s+/,$t);
+ my $handle;
+ open($handle,"<GNUmakefile")
+ || open($handle,"<Makefile")
+ || die "Could not open Makefile";
+ local($/) = undef;
+ my $m = <$handle>;
+ close($handle);
+ my $t = "";
+
+ $m =~ s/\\[\r\n]*//gs;
+ if ($m =~ /^REGRESS\s*=\s*(.*)$/gm)
+ {
+ $t = $1;
+ $t =~ s/\s+/ /g;
+
+ if ($m =~ /contrib\/pgcrypto/)
+ {
+
+ # pgcrypto is special since the tests depend on the
+ # configuration of the build
+
+ my $cftests =
+ $config->{openssl}
+ ?GetTests("OSSL_TESTS",$m)
+ : GetTests("INT_TESTS",$m);
+ my $pgptests =
+ $config->{zlib}
+ ?GetTests("ZLIB_TST",$m)
+ : GetTests("ZLIB_OFF_TST",$m);
+ $t =~ s/\$\(CF_TESTS\)/$cftests/;
+ $t =~ s/\$\(CF_PGP_TESTS\)/$pgptests/;
+ }
+ }
+
+ return split(/\s+/,$t);
}
sub GetTests
{
- my $testname = shift;
- my $m = shift;
- if ($m =~ /^$testname\s*=\s*(.*)$/gm)
- {
- return $1;
- }
- return "";
+ my $testname = shift;
+ my $m = shift;
+ if ($m =~ /^$testname\s*=\s*(.*)$/gm)
+ {
+ return $1;
+ }
+ return "";
}
sub usage
{
- print STDERR
- "Usage: vcregress.pl ",
- "<check|installcheck|plcheck|contribcheck|ecpgcheck> [schedule]\n";
- exit(1);
+ print STDERR
+ "Usage: vcregress.pl ",
+ "<check|installcheck|plcheck|contribcheck|ecpgcheck> [schedule]\n";
+ exit(1);
}