pgindent run before PG 9.1 beta 1.
authorBruce Momjian <bruce@momjian.us>
Sun, 10 Apr 2011 15:42:00 +0000 (11:42 -0400)
committerBruce Momjian <bruce@momjian.us>
Sun, 10 Apr 2011 15:42:00 +0000 (11:42 -0400)
446 files changed:
contrib/adminpack/adminpack.c
contrib/auth_delay/auth_delay.c
contrib/btree_gist/btree_cash.c
contrib/btree_gist/btree_date.c
contrib/btree_gist/btree_float4.c
contrib/btree_gist/btree_float8.c
contrib/btree_gist/btree_int2.c
contrib/btree_gist/btree_int4.c
contrib/btree_gist/btree_int8.c
contrib/btree_gist/btree_interval.c
contrib/btree_gist/btree_oid.c
contrib/btree_gist/btree_time.c
contrib/btree_gist/btree_ts.c
contrib/btree_gist/btree_utils_num.c
contrib/btree_gist/btree_utils_num.h
contrib/btree_gist/btree_utils_var.c
contrib/dummy_seclabel/dummy_seclabel.c
contrib/file_fdw/file_fdw.c
contrib/fuzzystrmatch/levenshtein.c
contrib/hstore/hstore_gin.c
contrib/hstore/hstore_op.c
contrib/intarray/_int_bool.c
contrib/intarray/_int_gin.c
contrib/intarray/_int_tool.c
contrib/isn/ISBN.h
contrib/pg_archivecleanup/pg_archivecleanup.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pg_test_fsync/pg_test_fsync.c
contrib/pg_trgm/trgm.h
contrib/pg_trgm/trgm_gin.c
contrib/pg_trgm/trgm_gist.c
contrib/pg_trgm/trgm_op.c
contrib/pg_upgrade/check.c
contrib/pg_upgrade/controldata.c
contrib/pg_upgrade/exec.c
contrib/pg_upgrade/file.c
contrib/pg_upgrade/function.c
contrib/pg_upgrade/info.c
contrib/pg_upgrade/pg_upgrade.c
contrib/pg_upgrade/pg_upgrade.h
contrib/pg_upgrade/relfilenode.c
contrib/pg_upgrade/server.c
contrib/pg_upgrade/tablespace.c
contrib/pg_upgrade/util.c
contrib/pg_upgrade/version_old_8_3.c
contrib/pg_upgrade_support/pg_upgrade_support.c
contrib/pgbench/pgbench.c
contrib/seg/seg.c
contrib/sepgsql/dml.c
contrib/sepgsql/hooks.c
contrib/sepgsql/label.c
contrib/sepgsql/proc.c
contrib/sepgsql/relation.c
contrib/sepgsql/schema.c
contrib/sepgsql/selinux.c
contrib/sepgsql/sepgsql.h
contrib/spi/moddatetime.c
contrib/xml2/xpath.c
contrib/xml2/xslt_proc.c
src/backend/access/common/heaptuple.c
src/backend/access/common/indextuple.c
src/backend/access/gin/ginarrayproc.c
src/backend/access/gin/ginbulk.c
src/backend/access/gin/gindatapage.c
src/backend/access/gin/ginentrypage.c
src/backend/access/gin/ginfast.c
src/backend/access/gin/ginget.c
src/backend/access/gin/gininsert.c
src/backend/access/gin/ginscan.c
src/backend/access/gin/ginutil.c
src/backend/access/gin/ginvacuum.c
src/backend/access/gin/ginxlog.c
src/backend/access/gist/gist.c
src/backend/access/gist/gistget.c
src/backend/access/gist/gistproc.c
src/backend/access/gist/gistscan.c
src/backend/access/gist/gistutil.c
src/backend/access/gist/gistxlog.c
src/backend/access/hash/hash.c
src/backend/access/heap/heapam.c
src/backend/access/heap/hio.c
src/backend/access/heap/rewriteheap.c
src/backend/access/index/indexam.c
src/backend/access/nbtree/nbtinsert.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/nbtree/nbtree.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtsort.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/catalog/aclchk.c
src/backend/catalog/catalog.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/namespace.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_collation.c
src/backend/catalog/pg_constraint.c
src/backend/catalog/pg_depend.c
src/backend/catalog/pg_enum.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_type.c
src/backend/catalog/storage.c
src/backend/catalog/toasting.c
src/backend/commands/alter.c
src/backend/commands/analyze.c
src/backend/commands/cluster.c
src/backend/commands/collationcmds.c
src/backend/commands/comment.c
src/backend/commands/conversioncmds.c
src/backend/commands/copy.c
src/backend/commands/dbcommands.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/foreigncmds.c
src/backend/commands/functioncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/opclasscmds.c
src/backend/commands/operatorcmds.c
src/backend/commands/portalcmds.c
src/backend/commands/prepare.c
src/backend/commands/seclabel.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/tsearchcmds.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/commands/variable.c
src/backend/commands/view.c
src/backend/executor/execMain.c
src/backend/executor/execQual.c
src/backend/executor/execUtils.c
src/backend/executor/functions.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeBitmapIndexscan.c
src/backend/executor/nodeForeignscan.c
src/backend/executor/nodeHash.c
src/backend/executor/nodeHashjoin.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeLimit.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeMergejoin.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeNestloop.c
src/backend/executor/nodeRecursiveunion.c
src/backend/executor/nodeSetOp.c
src/backend/executor/nodeWindowAgg.c
src/backend/executor/spi.c
src/backend/libpq/auth.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/main/main.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/params.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/equivclass.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/path/joinpath.c
src/backend/optimizer/path/joinrels.c
src/backend/optimizer/path/pathkeys.c
src/backend/optimizer/plan/analyzejoins.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/initsplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planmain.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/prep/prepjointree.c
src/backend/optimizer/prep/prepqual.c
src/backend/optimizer/prep/preptlist.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/placeholder.c
src/backend/optimizer/util/predtest.c
src/backend/optimizer/util/var.c
src/backend/parser/analyze.c
src/backend/parser/parse_agg.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_coerce.c
src/backend/parser/parse_collate.c
src/backend/parser/parse_cte.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_func.c
src/backend/parser/parse_node.c
src/backend/parser/parse_oper.c
src/backend/parser/parse_param.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_target.c
src/backend/parser/parse_utilcmd.c
src/backend/port/dynloader/freebsd.c
src/backend/port/dynloader/netbsd.c
src/backend/port/dynloader/openbsd.c
src/backend/port/pipe.c
src/backend/port/sysv_shmem.c
src/backend/port/unix_latch.c
src/backend/port/win32/crashdump.c
src/backend/port/win32/timer.c
src/backend/port/win32_latch.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/syslogger.c
src/backend/regex/regcomp.c
src/backend/replication/basebackup.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walreceiverfuncs.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rewriteSupport.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/freelist.c
src/backend/storage/buffer/localbuf.c
src/backend/storage/file/fd.c
src/backend/storage/file/reinit.c
src/backend/storage/ipc/pmsignal.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/standby.c
src/backend/storage/large_object/inv_api.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/smgr/md.c
src/backend/storage/smgr/smgr.c
src/backend/tcop/postgres.c
src/backend/tcop/pquery.c
src/backend/tcop/utility.c
src/backend/tsearch/spell.c
src/backend/tsearch/ts_locale.c
src/backend/tsearch/ts_selfuncs.c
src/backend/tsearch/wparser_def.c
src/backend/utils/adt/acl.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/cash.c
src/backend/utils/adt/date.c
src/backend/utils/adt/datetime.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/enum.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/genfile.c
src/backend/utils/adt/like.c
src/backend/utils/adt/nabstime.c
src/backend/utils/adt/network.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/numutils.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pgstatfuncs.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/tsginidx.c
src/backend/utils/adt/tsvector_op.c
src/backend/utils/adt/varbit.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/syscache.c
src/backend/utils/cache/ts_cache.c
src/backend/utils/cache/typcache.c
src/backend/utils/error/elog.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/fmgr/funcapi.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/mb/mbutils.c
src/backend/utils/misc/guc.c
src/backend/utils/misc/rbtree.c
src/backend/utils/misc/tzparser.c
src/backend/utils/mmgr/aset.c
src/backend/utils/mmgr/portalmem.c
src/backend/utils/resowner/resowner.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/time/snapmgr.c
src/bin/initdb/initdb.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/compress_io.c
src/bin/pg_dump/compress_io.h
src/bin/pg_dump/dumputils.c
src/bin/pg_dump/dumputils.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_backup_directory.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dumpall.c
src/bin/psql/command.c
src/bin/psql/describe.c
src/bin/psql/tab-complete.c
src/bin/scripts/droplang.c
src/include/access/gin.h
src/include/access/gin_private.h
src/include/access/gist.h
src/include/access/gist_private.h
src/include/access/hio.h
src/include/access/htup.h
src/include/access/itup.h
src/include/access/relscan.h
src/include/access/tupdesc.h
src/include/access/xact.h
src/include/access/xlog.h
src/include/access/xlogdefs.h
src/include/catalog/catalog.h
src/include/catalog/dependency.h
src/include/catalog/namespace.h
src/include/catalog/objectaccess.h
src/include/catalog/pg_am.h
src/include/catalog/pg_amop.h
src/include/catalog/pg_authid.h
src/include/catalog/pg_class.h
src/include/catalog/pg_collation.h
src/include/catalog/pg_collation_fn.h
src/include/catalog/pg_constraint.h
src/include/catalog/pg_control.h
src/include/catalog/pg_enum.h
src/include/catalog/pg_extension.h
src/include/catalog/pg_index.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_seclabel.h
src/include/catalog/pg_type.h
src/include/catalog/storage.h
src/include/commands/alter.h
src/include/commands/collationcmds.h
src/include/commands/copy.h
src/include/commands/dbcommands.h
src/include/commands/defrem.h
src/include/commands/explain.h
src/include/commands/extension.h
src/include/commands/proclang.h
src/include/commands/seclabel.h
src/include/commands/trigger.h
src/include/commands/typecmds.h
src/include/commands/vacuum.h
src/include/executor/executor.h
src/include/executor/functions.h
src/include/executor/hashjoin.h
src/include/executor/nodeHash.h
src/include/fmgr.h
src/include/foreign/fdwapi.h
src/include/foreign/foreign.h
src/include/libpq/auth.h
src/include/libpq/libpq-be.h
src/include/libpq/libpq.h
src/include/nodes/execnodes.h
src/include/nodes/makefuncs.h
src/include/nodes/params.h
src/include/nodes/parsenodes.h
src/include/nodes/pg_list.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/paths.h
src/include/optimizer/placeholder.h
src/include/optimizer/subselect.h
src/include/parser/parse_collate.h
src/include/parser/parse_func.h
src/include/parser/parse_type.h
src/include/parser/parser.h
src/include/pgstat.h
src/include/port/win32.h
src/include/replication/replnodes.h
src/include/replication/syncrep.h
src/include/replication/walprotocol.h
src/include/replication/walreceiver.h
src/include/replication/walsender.h
src/include/rewrite/rewriteSupport.h
src/include/storage/backendid.h
src/include/storage/buf_internals.h
src/include/storage/latch.h
src/include/storage/pmsignal.h
src/include/storage/predicate_internals.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/storage/relfilenode.h
src/include/tsearch/dicts/spell.h
src/include/utils/acl.h
src/include/utils/builtins.h
src/include/utils/bytea.h
src/include/utils/datetime.h
src/include/utils/elog.h
src/include/utils/guc.h
src/include/utils/lsyscache.h
src/include/utils/numeric.h
src/include/utils/portal.h
src/include/utils/rbtree.h
src/include/utils/rel.h
src/include/utils/tuplesort.h
src/include/utils/typcache.h
src/include/utils/varbit.h
src/include/utils/xml.h
src/interfaces/ecpg/compatlib/informix.c
src/interfaces/ecpg/ecpglib/connect.c
src/interfaces/ecpg/ecpglib/memory.c
src/interfaces/ecpg/ecpglib/prepare.c
src/interfaces/ecpg/preproc/ecpg.c
src/interfaces/ecpg/preproc/extern.h
src/interfaces/libpq/fe-auth.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-exec.c
src/interfaces/libpq/fe-protocol2.c
src/interfaces/libpq/fe-protocol3.c
src/interfaces/libpq/fe-secure.c
src/interfaces/libpq/libpq-fe.h
src/interfaces/libpq/libpq-int.h
src/pl/plperl/plperl.c
src/pl/plperl/plperl.h
src/pl/plperl/plperl_helpers.h
src/pl/plpgsql/src/pl_comp.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpython.c
src/pl/tcl/pltcl.c
src/port/chklocale.c
src/port/crypt.c
src/port/dirmod.c
src/port/exec.c
src/port/getaddrinfo.c
src/port/inet_net_ntop.c
src/port/path.c
src/port/pgmkdirp.c
src/port/snprintf.c
src/port/unsetenv.c
src/test/isolation/isolation_main.c
src/test/isolation/isolationtester.c
src/test/isolation/isolationtester.h
src/test/regress/pg_regress.c

index c149dd6c6352587706067fbc5145f32d856e6f3e..99fa02e81365f0ba04d1a642b6862bc415b1da91 100644 (file)
@@ -78,18 +78,19 @@ convert_and_check_filename(text *arg, bool logAllowed)
                /* Disallow '/a/b/data/..' */
                if (path_contains_parent_reference(filename))
                        ereport(ERROR,
-                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                               (errmsg("reference to parent directory (\"..\") not allowed"))));
+                                       (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                       (errmsg("reference to parent directory (\"..\") not allowed"))));
+
                /*
-                *      Allow absolute paths if within DataDir or Log_directory, even
-                *      though Log_directory might be outside DataDir.
+                * Allow absolute paths if within DataDir or Log_directory, even
+                * though Log_directory might be outside DataDir.
                 */
                if (!path_is_prefix_of_path(DataDir, filename) &&
                        (!logAllowed || !is_absolute_path(Log_directory) ||
                         !path_is_prefix_of_path(Log_directory, filename)))
                        ereport(ERROR,
-                               (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                (errmsg("absolute path not allowed"))));
+                                       (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+                                        (errmsg("absolute path not allowed"))));
        }
        else if (!path_is_relative_and_below_cwd(filename))
                ereport(ERROR,
index ca388c4498414d86cb0fbd889ef1f851e9f93ddf..4e0d5959d19ca9852b60386d995b25af41a3ef8d 100644 (file)
 
 PG_MODULE_MAGIC;
 
-void _PG_init(void);
+void           _PG_init(void);
 
 /* GUC Variables */
 static int     auth_delay_milliseconds;
 
 /* Original Hook */
-static ClientAuthentication_hook_type  original_client_auth_hook = NULL;
+static ClientAuthentication_hook_type original_client_auth_hook = NULL;
 
 /*
  * Check authentication
@@ -55,7 +55,7 @@ _PG_init(void)
 {
        /* Define custom GUC variables */
        DefineCustomIntVariable("auth_delay.milliseconds",
-                                                       "Milliseconds to delay before reporting authentication failure",
+                        "Milliseconds to delay before reporting authentication failure",
                                                        NULL,
                                                        &auth_delay_milliseconds,
                                                        0,
index 7938a70f17a120c1e181df0804b558b6608bef22..2664a2687057d4b001839043480c204952dcb43e 100644 (file)
@@ -169,7 +169,7 @@ gbt_cash_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index ccd7e2ad3f383a3fef458454b3bbe5a05fb1cca7..8a675e2f1d884a5e0625c3f00f3ea4715622ba54 100644 (file)
@@ -90,9 +90,9 @@ static float8
 gdb_date_dist(const void *a, const void *b)
 {
        /* we assume the difference can't overflow */
-       Datum diff = DirectFunctionCall2(date_mi,
+       Datum           diff = DirectFunctionCall2(date_mi,
                                                                         DateADTGetDatum(*((const DateADT *) a)),
-                                                                        DateADTGetDatum(*((const DateADT *) b)));
+                                                                       DateADTGetDatum(*((const DateADT *) b)));
 
        return (float8) Abs(DatumGetInt32(diff));
 }
@@ -113,14 +113,14 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(date_dist);
-Datum       date_dist(PG_FUNCTION_ARGS);
+Datum          date_dist(PG_FUNCTION_ARGS);
 Datum
 date_dist(PG_FUNCTION_ARGS)
 {
        /* we assume the difference can't overflow */
-       Datum diff = DirectFunctionCall2(date_mi,
-                                                                        PG_GETARG_DATUM(0),
-                                                                        PG_GETARG_DATUM(1));
+       Datum           diff = DirectFunctionCall2(date_mi,
+                                                                                  PG_GETARG_DATUM(0),
+                                                                                  PG_GETARG_DATUM(1));
 
        PG_RETURN_INT32(Abs(DatumGetInt32(diff)));
 }
@@ -181,7 +181,7 @@ gbt_date_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 932a941f889e4b9d3e91fd4f58ad7654221c7187..266256b23cf973cdaebec5b5160164baca5b66a0 100644 (file)
@@ -94,18 +94,18 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(float4_dist);
-Datum       float4_dist(PG_FUNCTION_ARGS);
+Datum          float4_dist(PG_FUNCTION_ARGS);
 Datum
 float4_dist(PG_FUNCTION_ARGS)
 {
-    float4             a = PG_GETARG_FLOAT4(0);
+       float4          a = PG_GETARG_FLOAT4(0);
        float4          b = PG_GETARG_FLOAT4(1);
        float4          r;
 
        r = a - b;
        CHECKFLOATVAL(r, isinf(a) || isinf(b), true);
 
-       PG_RETURN_FLOAT4( Abs(r) );
+       PG_RETURN_FLOAT4(Abs(r));
 }
 
 
@@ -162,7 +162,7 @@ gbt_float4_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 0c39980ba1e0ee456c05a0d7644364b5b11bd8b2..efbee0f3e4b66531310bb6f98a8eec991f2954b3 100644 (file)
@@ -76,8 +76,8 @@ gbt_float8key_cmp(const void *a, const void *b)
 static float8
 gbt_float8_dist(const void *a, const void *b)
 {
-       float8          arg1 = *(const float8 *)a;
-       float8          arg2 = *(const float8 *)b;
+       float8          arg1 = *(const float8 *) a;
+       float8          arg2 = *(const float8 *) b;
        float8          r;
 
        r = arg1 - arg2;
@@ -102,7 +102,7 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(float8_dist);
-Datum       float8_dist(PG_FUNCTION_ARGS);
+Datum          float8_dist(PG_FUNCTION_ARGS);
 Datum
 float8_dist(PG_FUNCTION_ARGS)
 {
@@ -113,7 +113,7 @@ float8_dist(PG_FUNCTION_ARGS)
        r = a - b;
        CHECKFLOATVAL(r, isinf(a) || isinf(b), true);
 
-       PG_RETURN_FLOAT8( Abs(r) );
+       PG_RETURN_FLOAT8(Abs(r));
 }
 
 /**************************************************
@@ -169,7 +169,7 @@ gbt_float8_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index c06d170a5e1af5c272fff56d0d775bdc47ed5ffc..7841145b53f6e49da3d8f9e13918bd1358023f8f 100644 (file)
@@ -94,12 +94,12 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(int2_dist);
-Datum       int2_dist(PG_FUNCTION_ARGS);
+Datum          int2_dist(PG_FUNCTION_ARGS);
 Datum
 int2_dist(PG_FUNCTION_ARGS)
 {
-       int2        a = PG_GETARG_INT16(0);
-       int2        b = PG_GETARG_INT16(1);
+       int2            a = PG_GETARG_INT16(0);
+       int2            b = PG_GETARG_INT16(1);
        int2            r;
        int2            ra;
 
@@ -169,7 +169,7 @@ gbt_int2_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index ef7af524e76bdfab212fd332b8bd3b0778237ba6..0e4b4f85b0eb448ce004e2863b7b9cdc79c8b40d 100644 (file)
@@ -95,14 +95,14 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(int4_dist);
-Datum       int4_dist(PG_FUNCTION_ARGS);
+Datum          int4_dist(PG_FUNCTION_ARGS);
 Datum
 int4_dist(PG_FUNCTION_ARGS)
 {
-       int4        a = PG_GETARG_INT32(0);
-       int4        b = PG_GETARG_INT32(1);
-       int4        r;
-       int4        ra;
+       int4            a = PG_GETARG_INT32(0);
+       int4            b = PG_GETARG_INT32(1);
+       int4            r;
+       int4            ra;
 
        r = a - b;
        ra = Abs(r);
@@ -111,7 +111,7 @@ int4_dist(PG_FUNCTION_ARGS)
        if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a)))
                ereport(ERROR,
                                (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
-                                                errmsg("integer out of range")));
+                                errmsg("integer out of range")));
 
        PG_RETURN_INT32(ra);
 }
@@ -170,7 +170,7 @@ gbt_int4_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 1f14d82891d8c47cd59487d2fd8a1d41a4054e96..d54113d3936660a1b5ab5b5b8a5d700b71e09551 100644 (file)
@@ -95,14 +95,14 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(int8_dist);
-Datum       int8_dist(PG_FUNCTION_ARGS);
+Datum          int8_dist(PG_FUNCTION_ARGS);
 Datum
 int8_dist(PG_FUNCTION_ARGS)
 {
-       int64       a = PG_GETARG_INT64(0);
-       int64       b = PG_GETARG_INT64(1);
-       int64       r;
-       int64       ra;
+       int64           a = PG_GETARG_INT64(0);
+       int64           b = PG_GETARG_INT64(1);
+       int64           r;
+       int64           ra;
 
        r = a - b;
        ra = Abs(r);
@@ -111,7 +111,7 @@ int8_dist(PG_FUNCTION_ARGS)
        if (ra < 0 || (!SAMESIGN(a, b) && !SAMESIGN(r, a)))
                ereport(ERROR,
                                (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
-                                                errmsg("bigint out of range")));
+                                errmsg("bigint out of range")));
 
        PG_RETURN_INT64(ra);
 }
@@ -170,7 +170,7 @@ gbt_int8_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 5195284afa1be65f3a66acf779d7a1a6c99236f6..137a5fcd7f4e2e9487782b28d9a8b6ec46ecc506 100644 (file)
@@ -88,7 +88,7 @@ intr2num(const Interval *i)
 static float8
 gbt_intv_dist(const void *a, const void *b)
 {
-       return (float8)Abs(intr2num((Interval*)a) - intr2num((Interval*)b));
+       return (float8) Abs(intr2num((Interval *) a) - intr2num((Interval *) b));
 }
 
 /*
@@ -127,7 +127,7 @@ abs_interval(Interval *a)
 }
 
 PG_FUNCTION_INFO_V1(interval_dist);
-Datum       interval_dist(PG_FUNCTION_ARGS);
+Datum          interval_dist(PG_FUNCTION_ARGS);
 Datum
 interval_dist(PG_FUNCTION_ARGS)
 {
@@ -240,7 +240,7 @@ gbt_intv_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo)
+                        gbt_num_distance(&key, (void *) query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index c81dd31799eb9a2f61b86ab29d2f05adc8a2db31..3b0929b42bb0147a6dbcbb240d676df5a21fb83b 100644 (file)
@@ -101,13 +101,13 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(oid_dist);
-Datum       oid_dist(PG_FUNCTION_ARGS);
+Datum          oid_dist(PG_FUNCTION_ARGS);
 Datum
 oid_dist(PG_FUNCTION_ARGS)
 {
-    Oid                a = PG_GETARG_OID(0);
-       Oid             b = PG_GETARG_OID(1);
-       Oid             res;
+       Oid                     a = PG_GETARG_OID(0);
+       Oid                     b = PG_GETARG_OID(1);
+       Oid                     res;
 
        if (a < b)
                res = b - a;
@@ -170,7 +170,7 @@ gbt_oid_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 44f6923409f4b6a6fb1682751deae1507f76c5be..e9cfe33f45594fbe5db9077dfe97ef4472f7b025 100644 (file)
@@ -119,7 +119,7 @@ gbt_time_dist(const void *a, const void *b)
 {
        const TimeADT *aa = (const TimeADT *) a;
        const TimeADT *bb = (const TimeADT *) b;
-       Interval          *i;
+       Interval   *i;
 
        i = DatumGetIntervalP(DirectFunctionCall2(time_mi_time,
                                                                                          TimeADTGetDatumFast(*aa),
@@ -143,7 +143,7 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(time_dist);
-Datum       time_dist(PG_FUNCTION_ARGS);
+Datum          time_dist(PG_FUNCTION_ARGS);
 Datum
 time_dist(PG_FUNCTION_ARGS)
 {
@@ -239,7 +239,7 @@ gbt_time_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 9a0ec07a1e742bf4003ee5b0a5f1b7ce2ef40d72..9d3a5919a0ee078eb0be8c4097c4a01471ab396e 100644 (file)
@@ -120,7 +120,7 @@ gbt_ts_dist(const void *a, const void *b)
 {
        const Timestamp *aa = (const Timestamp *) a;
        const Timestamp *bb = (const Timestamp *) b;
-       Interval      *i;
+       Interval   *i;
 
        if (TIMESTAMP_NOT_FINITE(*aa) || TIMESTAMP_NOT_FINITE(*bb))
                return get_float8_infinity();
@@ -147,17 +147,17 @@ static const gbtree_ninfo tinfo =
 
 
 PG_FUNCTION_INFO_V1(ts_dist);
-Datum       ts_dist(PG_FUNCTION_ARGS);
+Datum          ts_dist(PG_FUNCTION_ARGS);
 Datum
 ts_dist(PG_FUNCTION_ARGS)
 {
        Timestamp       a = PG_GETARG_TIMESTAMP(0);
        Timestamp       b = PG_GETARG_TIMESTAMP(1);
-    Interval      *r;
+       Interval   *r;
 
        if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b))
        {
-               Interval *p = palloc(sizeof(Interval));
+               Interval   *p = palloc(sizeof(Interval));
 
                p->day = INT_MAX;
                p->month = INT_MAX;
@@ -169,25 +169,24 @@ ts_dist(PG_FUNCTION_ARGS)
                PG_RETURN_INTERVAL_P(p);
        }
        else
-
-       r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
-                                                                                         PG_GETARG_DATUM(0),
-                                                                                         PG_GETARG_DATUM(1)));
-       PG_RETURN_INTERVAL_P( abs_interval(r) );
+               r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
+                                                                                                 PG_GETARG_DATUM(0),
+                                                                                                 PG_GETARG_DATUM(1)));
+       PG_RETURN_INTERVAL_P(abs_interval(r));
 }
 
 PG_FUNCTION_INFO_V1(tstz_dist);
-Datum       tstz_dist(PG_FUNCTION_ARGS);
+Datum          tstz_dist(PG_FUNCTION_ARGS);
 Datum
 tstz_dist(PG_FUNCTION_ARGS)
 {
-       TimestampTz     a = PG_GETARG_TIMESTAMPTZ(0);
-       TimestampTz     b = PG_GETARG_TIMESTAMPTZ(1);
-    Interval      *r;
+       TimestampTz a = PG_GETARG_TIMESTAMPTZ(0);
+       TimestampTz b = PG_GETARG_TIMESTAMPTZ(1);
+       Interval   *r;
 
        if (TIMESTAMP_NOT_FINITE(a) || TIMESTAMP_NOT_FINITE(b))
        {
-               Interval *p = palloc(sizeof(Interval));
+               Interval   *p = palloc(sizeof(Interval));
 
                p->day = INT_MAX;
                p->month = INT_MAX;
@@ -202,7 +201,7 @@ tstz_dist(PG_FUNCTION_ARGS)
        r = DatumGetIntervalP(DirectFunctionCall2(timestamp_mi,
                                                                                          PG_GETARG_DATUM(0),
                                                                                          PG_GETARG_DATUM(1)));
-       PG_RETURN_INTERVAL_P( abs_interval(r) );
+       PG_RETURN_INTERVAL_P(abs_interval(r));
 }
 
 
@@ -309,7 +308,7 @@ gbt_ts_distance(PG_FUNCTION_ARGS)
        key.upper = (GBT_NUMKEY *) &kkk->upper;
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
+                       gbt_num_distance(&key, (void *) &query, GIST_LEAF(entry), &tinfo)
                );
 }
 
@@ -354,7 +353,7 @@ gbt_tstz_distance(PG_FUNCTION_ARGS)
        qqq = tstz_to_ts_gmt(query);
 
        PG_RETURN_FLOAT8(
-                                  gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo)
+                         gbt_num_distance(&key, (void *) &qqq, GIST_LEAF(entry), &tinfo)
                );
 }
 
index 17440a191b271a57afe3e445a3f522a3ead39a69..64c95854df89d15376ecc6dd7994fe6a971276e0 100644 (file)
@@ -223,8 +223,8 @@ gbt_num_consistent(const GBT_NUMKEY_R *key,
                        retval = (*tinfo->f_le) (query, key->upper);
                        break;
                case BtreeGistNotEqualStrategyNumber:
-                       retval = (! ((*tinfo->f_eq) (query, key->lower) &&
-                               (*tinfo->f_eq) (query, key->upper)))  ? true : false;
+                       retval = (!((*tinfo->f_eq) (query, key->lower) &&
+                                               (*tinfo->f_eq) (query, key->upper))) ? true : false;
                        break;
                default:
                        retval = false;
@@ -249,9 +249,9 @@ gbt_num_distance(const GBT_NUMKEY_R *key,
        if (tinfo->f_dist == NULL)
                elog(ERROR, "KNN search is not supported for btree_gist type %d",
                         (int) tinfo->t);
-       if ( tinfo->f_le(query, key->lower) )
+       if (tinfo->f_le(query, key->lower))
                retval = tinfo->f_dist(query, key->lower);
-       else if ( tinfo->f_ge(query, key->upper) )
+       else if (tinfo->f_ge(query, key->upper))
                retval = tinfo->f_dist(query, key->upper);
        else
                retval = 0.0;
index 243d3b5cb99087a3c166c922c72031c5bc3cac39..8935ed66306a1079fd29daabc10b435f12462c77 100644 (file)
@@ -46,7 +46,7 @@ typedef struct
        bool            (*f_le) (const void *, const void *);   /* less or equal */
        bool            (*f_lt) (const void *, const void *);   /* less than */
        int                     (*f_cmp) (const void *, const void *);  /* key compare function */
-       float8          (*f_dist) (const void *, const void *); /* key distance function */
+       float8          (*f_dist) (const void *, const void *); /* key distance function */
 } gbtree_ninfo;
 
 
@@ -94,7 +94,7 @@ typedef struct
 
 #define GET_FLOAT_DISTANCE(t, arg1, arg2)      Abs( ((float8) *((const t *) (arg1))) - ((float8) *((const t *) (arg2))) )
 
-#define SAMESIGN(a,b)   (((a) < 0) == ((b) < 0))
+#define SAMESIGN(a,b)  (((a) < 0) == ((b) < 0))
 
 /*
  * check to see if a float4/8 val has underflowed or overflowed
@@ -121,7 +121,7 @@ extern bool gbt_num_consistent(const GBT_NUMKEY_R *key, const void *query,
                                   const gbtree_ninfo *tinfo);
 
 extern float8 gbt_num_distance(const GBT_NUMKEY_R *key, const void *query,
-                                  bool is_leaf, const gbtree_ninfo *tinfo);
+                                bool is_leaf, const gbtree_ninfo *tinfo);
 
 extern GIST_SPLITVEC *gbt_num_picksplit(const GistEntryVector *entryvec, GIST_SPLITVEC *v,
                                  const gbtree_ninfo *tinfo);
index 8f3173e499789c02f7d756ff6db42220b5a6cd06..d74013af8828d987a3c0bc4ba5a9b12f74597b21 100644 (file)
@@ -598,7 +598,7 @@ gbt_var_consistent(
                                        || gbt_var_node_pf_match(key, query, tinfo);
                        break;
                case BtreeGistNotEqualStrategyNumber:
-                       retval = ! ((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper));
+                       retval = !((*tinfo->f_eq) (query, key->lower) && (*tinfo->f_eq) (query, key->upper));
                        break;
                default:
                        retval = FALSE;
index 974806f1b6ca9e3d22b9db47dbfb5e26cbfa5217..5deb43fa9b819299f21635544920ea7ca6f39580 100644 (file)
@@ -18,7 +18,7 @@
 PG_MODULE_MAGIC;
 
 /* Entrypoint of the module */
-void _PG_init(void);
+void           _PG_init(void);
 
 static void
 dummy_object_relabel(const ObjectAddress *object, const char *seclabel)
index 6a84a00e8d39558b07d7767d496fa5f6c334d24d..466c015107db28b46601b759ad4542d6589a597c 100644 (file)
@@ -45,17 +45,17 @@ struct FileFdwOption
  */
 static struct FileFdwOption valid_options[] = {
        /* File options */
-       { "filename",           ForeignTableRelationId },
+       {"filename", ForeignTableRelationId},
 
        /* Format options */
        /* oids option is not supported */
-       { "format",                     ForeignTableRelationId },
-       { "header",                     ForeignTableRelationId },
-       { "delimiter",          ForeignTableRelationId },
-       { "quote",                      ForeignTableRelationId },
-       { "escape",                     ForeignTableRelationId },
-       { "null",                       ForeignTableRelationId },
-       { "encoding",           ForeignTableRelationId },
+       {"format", ForeignTableRelationId},
+       {"header", ForeignTableRelationId},
+       {"delimiter", ForeignTableRelationId},
+       {"quote", ForeignTableRelationId},
+       {"escape", ForeignTableRelationId},
+       {"null", ForeignTableRelationId},
+       {"encoding", ForeignTableRelationId},
 
        /*
         * force_quote is not supported by file_fdw because it's for COPY TO.
@@ -68,7 +68,7 @@ static struct FileFdwOption valid_options[] = {
         */
 
        /* Sentinel */
-       { NULL,                 InvalidOid }
+       {NULL, InvalidOid}
 };
 
 /*
@@ -76,9 +76,9 @@ static struct FileFdwOption valid_options[] = {
  */
 typedef struct FileFdwExecutionState
 {
-       char               *filename;   /* file to read */
-       List               *options;    /* merged COPY options, excluding filename */
-       CopyState               cstate;         /* state of reading file */
+       char       *filename;           /* file to read */
+       List       *options;            /* merged COPY options, excluding filename */
+       CopyState       cstate;                 /* state of reading file */
 } FileFdwExecutionState;
 
 /*
@@ -94,8 +94,8 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
  * FDW callback routines
  */
 static FdwPlan *filePlanForeignScan(Oid foreigntableid,
-                                                                       PlannerInfo *root,
-                                                                       RelOptInfo *baserel);
+                                       PlannerInfo *root,
+                                       RelOptInfo *baserel);
 static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
 static void fileBeginForeignScan(ForeignScanState *node, int eflags);
 static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
@@ -109,8 +109,8 @@ static bool is_valid_option(const char *option, Oid context);
 static void fileGetOptions(Oid foreigntableid,
                           char **filename, List **other_options);
 static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
-                                                  const char *filename,
-                                                  Cost *startup_cost, Cost *total_cost);
+                          const char *filename,
+                          Cost *startup_cost, Cost *total_cost);
 
 
 /*
@@ -149,16 +149,16 @@ file_fdw_validator(PG_FUNCTION_ARGS)
 
        /*
         * Only superusers are allowed to set options of a file_fdw foreign table.
-        * This is because the filename is one of those options, and we don't
-        * want non-superusers to be able to determine which file gets read.
+        * This is because the filename is one of those options, and we don't want
+        * non-superusers to be able to determine which file gets read.
         *
         * Putting this sort of permissions check in a validator is a bit of a
         * crock, but there doesn't seem to be any other place that can enforce
         * the check more cleanly.
         *
-        * Note that the valid_options[] array disallows setting filename at
-        * any options level other than foreign table --- otherwise there'd
-        * still be a security hole.
+        * Note that the valid_options[] array disallows setting filename at any
+        * options level other than foreign table --- otherwise there'd still be a
+        * security hole.
         */
        if (catalog == ForeignTableRelationId && !superuser())
                ereport(ERROR,
@@ -171,7 +171,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
         */
        foreach(cell, options_list)
        {
-               DefElem    *def = (DefElem *) lfirst(cell);
+               DefElem    *def = (DefElem *) lfirst(cell);
 
                if (!is_valid_option(def->defname, catalog))
                {
@@ -276,7 +276,7 @@ fileGetOptions(Oid foreigntableid,
        prev = NULL;
        foreach(lc, options)
        {
-               DefElem    *def = (DefElem *) lfirst(lc);
+               DefElem    *def = (DefElem *) lfirst(lc);
 
                if (strcmp(def->defname, "filename") == 0)
                {
@@ -302,7 +302,7 @@ filePlanForeignScan(Oid foreigntableid,
                                        PlannerInfo *root,
                                        RelOptInfo *baserel)
 {
-       FdwPlan    *fdwplan;
+       FdwPlan    *fdwplan;
        char       *filename;
        List       *options;
 
@@ -313,7 +313,7 @@ filePlanForeignScan(Oid foreigntableid,
        fdwplan = makeNode(FdwPlan);
        estimate_costs(root, baserel, filename,
                                   &fdwplan->startup_cost, &fdwplan->total_cost);
-       fdwplan->fdw_private = NIL;                             /* not used */
+       fdwplan->fdw_private = NIL; /* not used */
 
        return fdwplan;
 }
@@ -337,7 +337,7 @@ fileExplainForeignScan(ForeignScanState *node, ExplainState *es)
        /* Suppress file size if we're not showing cost details */
        if (es->costs)
        {
-               struct stat             stat_buf;
+               struct stat stat_buf;
 
                if (stat(filename, &stat_buf) == 0)
                        ExplainPropertyLong("Foreign File Size", (long) stat_buf.st_size,
@@ -368,8 +368,8 @@ fileBeginForeignScan(ForeignScanState *node, int eflags)
                                   &filename, &options);
 
        /*
-        * Create CopyState from FDW options.  We always acquire all columns,
-        * so as to match the expected ScanTupleSlot signature.
+        * Create CopyState from FDW options.  We always acquire all columns, so
+        * as to match the expected ScanTupleSlot signature.
         */
        cstate = BeginCopyFrom(node->ss.ss_currentRelation,
                                                   filename,
@@ -398,7 +398,7 @@ fileIterateForeignScan(ForeignScanState *node)
 {
        FileFdwExecutionState *festate = (FileFdwExecutionState *) node->fdw_state;
        TupleTableSlot *slot = node->ss.ss_ScanTupleSlot;
-       bool                    found;
+       bool            found;
        ErrorContextCallback errcontext;
 
        /* Set up callback to identify error line number. */
@@ -410,8 +410,8 @@ fileIterateForeignScan(ForeignScanState *node)
        /*
         * The protocol for loading a virtual tuple into a slot is first
         * ExecClearTuple, then fill the values/isnull arrays, then
-        * ExecStoreVirtualTuple.  If we don't find another row in the file,
-        * we just skip the last step, leaving the slot empty as required.
+        * ExecStoreVirtualTuple.  If we don't find another row in the file, we
+        * just skip the last step, leaving the slot empty as required.
         *
         * We can pass ExprContext = NULL because we read all columns from the
         * file, so no need to evaluate default expressions.
@@ -471,17 +471,17 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
                           const char *filename,
                           Cost *startup_cost, Cost *total_cost)
 {
-       struct stat             stat_buf;
-       BlockNumber             pages;
-       int                             tuple_width;
-       double                  ntuples;
-       double                  nrows;
-       Cost                    run_cost = 0;
-       Cost                    cpu_per_tuple;
+       struct stat stat_buf;
+       BlockNumber pages;
+       int                     tuple_width;
+       double          ntuples;
+       double          nrows;
+       Cost            run_cost = 0;
+       Cost            cpu_per_tuple;
 
        /*
-        * Get size of the file.  It might not be there at plan time, though,
-        * in which case we have to use a default estimate.
+        * Get size of the file.  It might not be there at plan time, though, in
+        * which case we have to use a default estimate.
         */
        if (stat(filename, &stat_buf) < 0)
                stat_buf.st_size = 10 * BLCKSZ;
@@ -489,7 +489,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
        /*
         * Convert size to pages for use in I/O cost estimate below.
         */
-       pages = (stat_buf.st_size + (BLCKSZ-1)) / BLCKSZ;
+       pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ;
        if (pages < 1)
                pages = 1;
 
@@ -505,10 +505,9 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
        ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width);
 
        /*
-        * Now estimate the number of rows returned by the scan after applying
-        * the baserestrictinfo quals.  This is pretty bogus too, since the
-        * planner will have no stats about the relation, but it's better than
-        * nothing.
+        * Now estimate the number of rows returned by the scan after applying the
+        * baserestrictinfo quals.      This is pretty bogus too, since the planner
+        * will have no stats about the relation, but it's better than nothing.
         */
        nrows = ntuples *
                clauselist_selectivity(root,
@@ -523,7 +522,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
        baserel->rows = nrows;
 
        /*
-        * Now estimate costs.  We estimate costs almost the same way as
+        * Now estimate costs.  We estimate costs almost the same way as
         * cost_seqscan(), thus assuming that I/O costs are equivalent to a
         * regular table file of the same size.  However, we take per-tuple CPU
         * costs as 10x of a seqscan, to account for the cost of parsing records.
index 3d85d4175fb1e08be0e0b10172a78521df7f1fcf..a84c46a4a408fc7d17ba44137d871e9a45b20e05 100644 (file)
@@ -23,7 +23,7 @@
  */
 #ifdef LEVENSHTEIN_LESS_EQUAL
 static int levenshtein_less_equal_internal(text *s, text *t,
-                                        int ins_c, int del_c, int sub_c, int max_d);
+                                                               int ins_c, int del_c, int sub_c, int max_d);
 #else
 static int levenshtein_internal(text *s, text *t,
                                         int ins_c, int del_c, int sub_c);
@@ -50,7 +50,7 @@ static int levenshtein_internal(text *s, text *t,
  * array.
  *
  * If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound.  From any cell in the matrix, there is
+ * is less than or equal to the bound. From any cell in the matrix, there is
  * theoretical "minimum residual distance" from that cell to the last column
  * of the final row.  This minimum residual distance is zero when the
  * untransformed portions of the strings are of equal length (because we might
@@ -87,11 +87,13 @@ levenshtein_internal(text *s, text *t,
 
        /*
         * For levenshtein_less_equal_internal, we have real variables called
-        * start_column and stop_column; otherwise it's just short-hand for 0
-        * and m.
+        * start_column and stop_column; otherwise it's just short-hand for 0 and
+        * m.
         */
 #ifdef LEVENSHTEIN_LESS_EQUAL
-       int        start_column, stop_column;
+       int                     start_column,
+                               stop_column;
+
 #undef START_COLUMN
 #undef STOP_COLUMN
 #define START_COLUMN start_column
@@ -139,16 +141,16 @@ levenshtein_internal(text *s, text *t,
        stop_column = m + 1;
 
        /*
-        * If max_d >= 0, determine whether the bound is impossibly tight.  If so,
+        * If max_d >= 0, determine whether the bound is impossibly tight.      If so,
         * return max_d + 1 immediately.  Otherwise, determine whether it's tight
         * enough to limit the computation we must perform.  If so, figure out
         * initial stop column.
         */
        if (max_d >= 0)
        {
-               int             min_theo_d;             /* Theoretical minimum distance. */
-               int             max_theo_d;             /* Theoretical maximum distance. */
-               int             net_inserts = n - m;
+               int                     min_theo_d; /* Theoretical minimum distance. */
+               int                     max_theo_d; /* Theoretical maximum distance. */
+               int                     net_inserts = n - m;
 
                min_theo_d = net_inserts < 0 ?
                        -net_inserts * del_c : net_inserts * ins_c;
@@ -162,20 +164,20 @@ levenshtein_internal(text *s, text *t,
                else if (ins_c + del_c > 0)
                {
                        /*
-                        * Figure out how much of the first row of the notional matrix
-                        * we need to fill in.  If the string is growing, the theoretical
+                        * Figure out how much of the first row of the notional matrix we
+                        * need to fill in.  If the string is growing, the theoretical
                         * minimum distance already incorporates the cost of deleting the
-                        * number of characters necessary to make the two strings equal
-                        * in length.  Each additional deletion forces another insertion,
-                        * so the best-case total cost increases by ins_c + del_c.
-                        * If the string is shrinking, the minimum theoretical cost
-                        * assumes no excess deletions; that is, we're starting no futher
-                        * right than column n - m.  If we do start further right, the
-                        * best-case total cost increases by ins_c + del_c for each move
-                        * right.
+                        * number of characters necessary to make the two strings equal in
+                        * length.      Each additional deletion forces another insertion, so
+                        * the best-case total cost increases by ins_c + del_c. If the
+                        * string is shrinking, the minimum theoretical cost assumes no
+                        * excess deletions; that is, we're starting no futher right than
+                        * column n - m.  If we do start further right, the best-case
+                        * total cost increases by ins_c + del_c for each move right.
                         */
-                       int slack_d = max_d - min_theo_d;
-                       int best_column = net_inserts < 0 ? -net_inserts : 0;
+                       int                     slack_d = max_d - min_theo_d;
+                       int                     best_column = net_inserts < 0 ? -net_inserts : 0;
+
                        stop_column = best_column + (slack_d / (ins_c + del_c)) + 1;
                        if (stop_column > m)
                                stop_column = m + 1;
@@ -185,15 +187,15 @@ levenshtein_internal(text *s, text *t,
 
        /*
         * In order to avoid calling pg_mblen() repeatedly on each character in s,
-        * we cache all the lengths before starting the main loop -- but if all the
-        * characters in both strings are single byte, then we skip this and use
-        * a fast-path in the main loop.  If only one string contains multi-byte
-        * characters, we still build the array, so that the fast-path needn't
-        * deal with the case where the array hasn't been initialized.
+        * we cache all the lengths before starting the main loop -- but if all
+        * the characters in both strings are single byte, then we skip this and
+        * use a fast-path in the main loop.  If only one string contains
+        * multi-byte characters, we still build the array, so that the fast-path
+        * needn't deal with the case where the array hasn't been initialized.
         */
        if (m != s_bytes || n != t_bytes)
        {
-               int             i;
+               int                     i;
                const char *cp = s_data;
 
                s_char_len = (int *) palloc((m + 1) * sizeof(int));
@@ -214,8 +216,8 @@ levenshtein_internal(text *s, text *t,
        curr = prev + m;
 
        /*
-        * To transform the first i characters of s into the first 0 characters
-        * of t, we must perform i deletions.
+        * To transform the first i characters of s into the first 0 characters of
+        * t, we must perform i deletions.
         */
        for (i = START_COLUMN; i < STOP_COLUMN; i++)
                prev[i] = i * del_c;
@@ -228,6 +230,7 @@ levenshtein_internal(text *s, text *t,
                int                     y_char_len = n != t_bytes + 1 ? pg_mblen(y) : 1;
 
 #ifdef LEVENSHTEIN_LESS_EQUAL
+
                /*
                 * In the best case, values percolate down the diagonal unchanged, so
                 * we must increment stop_column unless it's already on the right end
@@ -241,10 +244,10 @@ levenshtein_internal(text *s, text *t,
                }
 
                /*
-                * The main loop fills in curr, but curr[0] needs a special case:
-                * to transform the first 0 characters of s into the first j
-                * characters of t, we must perform j insertions.  However, if
-                * start_column > 0, this special case does not apply.
+                * The main loop fills in curr, but curr[0] needs a special case: to
+                * transform the first 0 characters of s into the first j characters
+                * of t, we must perform j insertions.  However, if start_column > 0,
+                * this special case does not apply.
                 */
                if (start_column == 0)
                {
@@ -285,7 +288,7 @@ levenshtein_internal(text *s, text *t,
                                 */
                                ins = prev[i] + ins_c;
                                del = curr[i - 1] + del_c;
-                               if (x[x_char_len-1] == y[y_char_len-1]
+                               if (x[x_char_len - 1] == y[y_char_len - 1]
                                        && x_char_len == y_char_len &&
                                        (x_char_len == 1 || rest_of_char_same(x, y, x_char_len)))
                                        sub = prev[i - 1];
@@ -331,6 +334,7 @@ levenshtein_internal(text *s, text *t,
                y += y_char_len;
 
 #ifdef LEVENSHTEIN_LESS_EQUAL
+
                /*
                 * This chunk of code represents a significant performance hit if used
                 * in the case where there is no max_d bound.  This is probably not
@@ -348,15 +352,16 @@ levenshtein_internal(text *s, text *t,
                         * string, so we want to find the value for zp where where (n - 1)
                         * - j = (m - 1) - zp.
                         */
-                       int zp = j - (n - m);
+                       int                     zp = j - (n - m);
 
                        /* Check whether the stop column can slide left. */
                        while (stop_column > 0)
                        {
-                               int     ii = stop_column - 1;
-                               int     net_inserts = ii - zp;
+                               int                     ii = stop_column - 1;
+                               int                     net_inserts = ii - zp;
+
                                if (prev[ii] + (net_inserts > 0 ? net_inserts * ins_c :
-                                       -net_inserts * del_c) <= max_d)
+                                                               -net_inserts * del_c) <= max_d)
                                        break;
                                stop_column--;
                        }
@@ -364,14 +369,16 @@ levenshtein_internal(text *s, text *t,
                        /* Check whether the start column can slide right. */
                        while (start_column < stop_column)
                        {
-                               int net_inserts = start_column - zp;
+                               int                     net_inserts = start_column - zp;
+
                                if (prev[start_column] +
                                        (net_inserts > 0 ? net_inserts * ins_c :
-                                       -net_inserts * del_c) <= max_d)
+                                        -net_inserts * del_c) <= max_d)
                                        break;
+
                                /*
-                                * We'll never again update these values, so we must make
-                                * sure there's nothing here that could confuse any future
+                                * We'll never again update these values, so we must make sure
+                                * there's nothing here that could confuse any future
                                 * iteration of the outer loop.
                                 */
                                prev[start_column] = max_d + 1;
index d55674c79f902f5a4091899a48e3121adf12dd84..2007801cf0c97300cc201af5a19cdfd9f3c14a04 100644 (file)
@@ -13,7 +13,7 @@
 /*
  * When using a GIN index for hstore, we choose to index both keys and values.
  * The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values.  (As of 9.1 it might be better to
+ * to indicate key, value, or null values.     (As of 9.1 it might be better to
  * store null values as nulls, but we'll keep it this way for on-disk
  * compatibility.)
  */
@@ -168,7 +168,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS)
        {
                /*
                 * Index doesn't have information about correspondence of keys and
-                * values, so we need recheck.  However, if not all the keys are
+                * values, so we need recheck.  However, if not all the keys are
                 * present, we can fail at once.
                 */
                *recheck = true;
index cb6200ab1dcfbb9807e39c190e9c662215418424..5b278c14ffe249f3e3208f992073b337e9fff622 100644 (file)
@@ -437,7 +437,7 @@ hstore_delete_hstore(PG_FUNCTION_ARGS)
                        if (snullval != HS_VALISNULL(es2, j)
                                || (!snullval
                                        && (svallen != HS_VALLEN(es2, j)
-                                               || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
+                       || memcmp(HS_VAL(es, ps, i), HS_VAL(es2, ps2, j), svallen) != 0)))
                        {
                                HS_COPYITEM(ed, bufd, pd,
                                                        HS_KEY(es, ps, i), HS_KEYLEN(es, i),
@@ -1000,7 +1000,7 @@ hstore_contains(PG_FUNCTION_ARGS)
                        if (nullval != HS_VALISNULL(ve, idx)
                                || (!nullval
                                        && (vallen != HS_VALLEN(ve, idx)
-                       || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
+                        || memcmp(HS_VAL(te, tstr, i), HS_VAL(ve, vstr, idx), vallen))))
                                res = false;
                }
                else
index 072e8cc89773b052be3f0a805067db7ab3b0c8bc..4e63f6d66c145072984360be989723cd682b9924 100644 (file)
@@ -98,7 +98,7 @@ gettoken(WORKSTATE *state, int4 *val)
                                }
                                else
                                {
-                                       long    lval;
+                                       long            lval;
 
                                        nnn[innn] = '\0';
                                        errno = 0;
@@ -355,8 +355,8 @@ gin_bool_consistent(QUERYTYPE *query, bool *check)
                return FALSE;
 
        /*
-        * Set up data for checkcondition_gin.  This must agree with the
-        * query extraction code in ginint4_queryextract.
+        * Set up data for checkcondition_gin.  This must agree with the query
+        * extraction code in ginint4_queryextract.
         */
        gcv.first = items;
        gcv.mapped_check = (bool *) palloc(sizeof(bool) * query->size);
index 3ef5c4635a150b94a90651a375e1e523ee9f78cd..9abe54e55f9f4501c2999237e62b06dcaf86ca66 100644 (file)
@@ -34,8 +34,8 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
 
                /*
                 * If the query doesn't have any required primitive values (for
-                * instance, it's something like '! 42'), we have to do a full
-                * index scan.
+                * instance, it's something like '! 42'), we have to do a full index
+                * scan.
                 */
                if (query_has_required_values(query))
                        *searchMode = GIN_SEARCH_MODE_DEFAULT;
@@ -95,7 +95,7 @@ ginint4_queryextract(PG_FUNCTION_ARGS)
                        case RTOldContainsStrategyNumber:
                                if (*nentries > 0)
                                        *searchMode = GIN_SEARCH_MODE_DEFAULT;
-                               else                            /* everything contains the empty set */
+                               else    /* everything contains the empty set */
                                        *searchMode = GIN_SEARCH_MODE_ALL;
                                break;
                        default:
@@ -116,6 +116,7 @@ ginint4_consistent(PG_FUNCTION_ARGS)
        bool       *check = (bool *) PG_GETARG_POINTER(0);
        StrategyNumber strategy = PG_GETARG_UINT16(1);
        int32           nkeys = PG_GETARG_INT32(3);
+
        /* Pointer         *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
        bool       *recheck = (bool *) PG_GETARG_POINTER(5);
        bool            res = FALSE;
index ddf07f042b24140a51f2311099c4accfae26e1f4..bfc55501dbcdd6f9ba361808b69a712207cb0031 100644 (file)
@@ -183,7 +183,7 @@ rt__int_size(ArrayType *a, float *size)
        *size = (float) ARRNELEMS(a);
 }
 
-/* Sort the given data (len >= 2).  Return true if any duplicates found */
+/* Sort the given data (len >= 2).     Return true if any duplicates found */
 bool
 isort(int4 *a, int len)
 {
@@ -195,7 +195,7 @@ isort(int4 *a, int len)
        bool            r = FALSE;
 
        /*
-        * We use a simple insertion sort.  While this is O(N^2) in the worst
+        * We use a simple insertion sort.      While this is O(N^2) in the worst
         * case, it's quite fast if the input is already sorted or nearly so.
         * Also, for not-too-large inputs it's faster than more complex methods
         * anyhow.
index c0301ced1e0ceea3e8265903603485ed2f1ec151..dbda6fb7241400063b86ca2deb1bd28872a3361c 100644 (file)
@@ -988,4 +988,3 @@ const char *ISBN_range_new[][2] = {
        {"10-976000", "10-999999"},
        {NULL, NULL},
 };
-
index 79892077c8c012225cf82fc716d3208a41b3564a..d96eef2c5a618910bbf80ed43f3a41cac1dd82f4 100644 (file)
@@ -25,9 +25,9 @@
 #ifdef HAVE_GETOPT_H
 #include <getopt.h>
 #endif
-#else  /* WIN32 */
+#else                                                  /* WIN32 */
 extern int     getopt(int argc, char *const argv[], const char *optstring);
-#endif /* ! WIN32 */
+#endif   /* ! WIN32 */
 
 extern char *optarg;
 extern int     optind;
index 87cf8c55cf491831bbf5a9db79ca1eb3e9b81cc9..0236b87498fa27a4c445046469f89b7615e75632 100644 (file)
@@ -137,7 +137,7 @@ typedef enum
        PGSS_TRACK_NONE,                        /* track no statements */
        PGSS_TRACK_TOP,                         /* only top level statements */
        PGSS_TRACK_ALL                          /* all statements, including nested ones */
-} PGSSTrackLevel;
+}      PGSSTrackLevel;
 
 static const struct config_enum_entry track_options[] =
 {
index 49a7b3c2c006d3e2e405cd3dd6d64e3b7febfe8f..305b3d0723ddf23014d56b87c0e4aa2ab3afd6a0 100644 (file)
 
 static const char *progname;
 
-static int             ops_per_test = 2000;
-static char        full_buf[XLOG_SEG_SIZE], *buf, *filename = FSYNC_FILENAME;
-static struct timeval start_t, stop_t;
-
-
-static void    handle_args(int argc, char *argv[]);
-static void    prepare_buf(void);
-static void    test_open(void);
-static void    test_non_sync(void);
-static void    test_sync(int writes_per_op);
-static void    test_open_syncs(void);
-static void    test_open_sync(const char *msg, int writes_size);
-static void    test_file_descriptor_sync(void);
+static int     ops_per_test = 2000;
+static char full_buf[XLOG_SEG_SIZE],
+                  *buf,
+                  *filename = FSYNC_FILENAME;
+static struct timeval start_t,
+                       stop_t;
+
+
+static void handle_args(int argc, char *argv[]);
+static void prepare_buf(void);
+static void test_open(void);
+static void test_non_sync(void);
+static void test_sync(int writes_per_op);
+static void test_open_syncs(void);
+static void test_open_sync(const char *msg, int writes_size);
+static void test_file_descriptor_sync(void);
+
 #ifdef HAVE_FSYNC_WRITETHROUGH
 static int     pg_fsync_writethrough(int fd);
 #endif
-static void    print_elapse(struct timeval start_t, struct timeval stop_t);
-static void    die(const char *str);
+static void print_elapse(struct timeval start_t, struct timeval stop_t);
+static void die(const char *str);
 
 
 int
@@ -103,7 +107,7 @@ handle_args(int argc, char *argv[])
        }
 
        while ((option = getopt_long(argc, argv, "f:o:",
-                       long_options, &optindex)) != -1)
+                                                                long_options, &optindex)) != -1)
        {
                switch (option)
                {
@@ -176,7 +180,9 @@ test_open(void)
 static void
 test_sync(int writes_per_op)
 {
-       int                     tmpfile, ops, writes;
+       int                     tmpfile,
+                               ops,
+                               writes;
        bool            fs_warning = false;
 
        if (writes_per_op == 1)
@@ -353,7 +359,9 @@ test_open_syncs(void)
 static void
 test_open_sync(const char *msg, int writes_size)
 {
-       int             tmpfile, ops, writes;
+       int                     tmpfile,
+                               ops,
+                               writes;
 
        printf(LABEL_FORMAT, msg);
        fflush(stdout);
@@ -377,7 +385,6 @@ test_open_sync(const char *msg, int writes_size)
                close(tmpfile);
                print_elapse(start_t, stop_t);
        }
-
 #else
        printf(NA_FORMAT, "n/a\n");
 #endif
@@ -386,22 +393,22 @@ test_open_sync(const char *msg, int writes_size)
 static void
 test_file_descriptor_sync(void)
 {
-       int                     tmpfile, ops;
+       int                     tmpfile,
+                               ops;
 
        /*
-        * Test whether fsync can sync data written on a different
-        * descriptor for the same file.  This checks the efficiency
-        * of multi-process fsyncs against the same file.
-        * Possibly this should be done with writethrough on platforms
-        * which support it.
+        * Test whether fsync can sync data written on a different descriptor for
+        * the same file.  This checks the efficiency of multi-process fsyncs
+        * against the same file. Possibly this should be done with writethrough
+        * on platforms which support it.
         */
        printf("\nTest if fsync on non-write file descriptor is honored:\n");
        printf("(If the times are similar, fsync() can sync data written\n");
        printf("on a different descriptor.)\n");
 
        /*
-        * first write, fsync and close, which is the
-        * normal behavior without multiple descriptors
+        * first write, fsync and close, which is the normal behavior without
+        * multiple descriptors
         */
        printf(LABEL_FORMAT, "write, fsync, close");
        fflush(stdout);
@@ -416,9 +423,10 @@ test_file_descriptor_sync(void)
                if (fsync(tmpfile) != 0)
                        die("fsync failed");
                close(tmpfile);
+
                /*
-                * open and close the file again to be consistent
-                * with the following test
+                * open and close the file again to be consistent with the following
+                * test
                 */
                if ((tmpfile = open(filename, O_RDWR, 0)) == -1)
                        die("could not open output file");
@@ -428,9 +436,8 @@ test_file_descriptor_sync(void)
        print_elapse(start_t, stop_t);
 
        /*
-        * Now open, write, close, open again and fsync
-        * This simulates processes fsyncing each other's
-        * writes.
+        * Now open, write, close, open again and fsync This simulates processes
+        * fsyncing each other's writes.
         */
        printf(LABEL_FORMAT, "write, close, fsync");
        fflush(stdout);
@@ -458,7 +465,8 @@ test_file_descriptor_sync(void)
 static void
 test_non_sync(void)
 {
-       int                     tmpfile, ops;
+       int                     tmpfile,
+                               ops;
 
        /*
         * Test a simple write without fsync
@@ -494,7 +502,6 @@ pg_fsync_writethrough(int fd)
        return -1;
 #endif
 }
-
 #endif
 
 /*
index f3644fcce76271f1713791184ad9a072145fb1d2..61de5d89d161bf7e4d9a5965d0211b1447c0c2c3 100644 (file)
@@ -51,8 +51,9 @@ uint32                trgm2int(trgm *ptr);
 #endif
 #define ISPRINTABLETRGM(t)     ( ISPRINTABLECHAR( ((char*)(t)) ) && ISPRINTABLECHAR( ((char*)(t))+1 ) && ISPRINTABLECHAR( ((char*)(t))+2 ) )
 
-#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */
-#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%')  /* Wildcard meta-character */
+#define ISESCAPECHAR(x) (*(x) == '\\') /* Wildcard escape character */
+#define ISWILDCARDCHAR(x) (*(x) == '_' || *(x) == '%') /* Wildcard
+                                                                                                                * meta-character */
 
 typedef struct
 {
@@ -105,4 +106,4 @@ TRGM           *generate_wildcard_trgm(const char *str, int slen);
 float4         cnt_sml(TRGM *trg1, TRGM *trg2);
 bool           trgm_contained_by(TRGM *trg1, TRGM *trg2);
 
-#endif /* __TRGM_H__ */
+#endif   /* __TRGM_H__ */
index aaca1f97377f23f939b62d270910de033505365f..43ac0b0c6575a78124da79cf4c353a40cc2a0aec 100644 (file)
@@ -67,7 +67,7 @@ gin_extract_value_trgm(PG_FUNCTION_ARGS)
                ptr = GETARR(trg);
                for (i = 0; i < trglen; i++)
                {
-                       int32   item = trgm2int(ptr);
+                       int32           item = trgm2int(ptr);
 
                        entries[i] = Int32GetDatum(item);
                        ptr++;
@@ -83,10 +83,11 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
        text       *val = (text *) PG_GETARG_TEXT_P(0);
        int32      *nentries = (int32 *) PG_GETARG_POINTER(1);
        StrategyNumber strategy = PG_GETARG_UINT16(2);
-       /* bool   **pmatch = (bool **) PG_GETARG_POINTER(3); */
-       /* Pointer    *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
-       /* bool   **nullFlags = (bool **) PG_GETARG_POINTER(5); */
-       int32      *searchMode = (int32 *) PG_GETARG_POINTER(6);
+
+       /* bool   **pmatch = (bool **) PG_GETARG_POINTER(3); */
+       /* Pointer        *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+       /* bool   **nullFlags = (bool **) PG_GETARG_POINTER(5); */
+       int32      *searchMode = (int32 *) PG_GETARG_POINTER(6);
        Datum      *entries = NULL;
        TRGM       *trg;
        int32           trglen;
@@ -104,6 +105,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
 #endif
                        /* FALL THRU */
                case LikeStrategyNumber:
+
                        /*
                         * For wildcard search we extract all the trigrams that every
                         * potentially-matching string must include.
@@ -112,7 +114,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
                        break;
                default:
                        elog(ERROR, "unrecognized strategy number: %d", strategy);
-                       trg = NULL;             /* keep compiler quiet */
+                       trg = NULL;                     /* keep compiler quiet */
                        break;
        }
 
@@ -125,7 +127,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
                ptr = GETARR(trg);
                for (i = 0; i < trglen; i++)
                {
-                       int32   item = trgm2int(ptr);
+                       int32           item = trgm2int(ptr);
 
                        entries[i] = Int32GetDatum(item);
                        ptr++;
@@ -146,9 +148,11 @@ gin_trgm_consistent(PG_FUNCTION_ARGS)
 {
        bool       *check = (bool *) PG_GETARG_POINTER(0);
        StrategyNumber strategy = PG_GETARG_UINT16(1);
+
        /* text    *query = PG_GETARG_TEXT_P(2); */
        int32           nkeys = PG_GETARG_INT32(3);
-       /* Pointer    *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
+
+       /* Pointer        *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
        bool       *recheck = (bool *) PG_GETARG_POINTER(5);
        bool            res;
        int32           i,
index d83265c11c38fcb1c37e17babe4d37f5dfd8da8e..b328a09f41fee50beb96a28835e15ef835222cd6 100644 (file)
@@ -190,17 +190,18 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
        GISTENTRY  *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
        text       *query = PG_GETARG_TEXT_P(1);
        StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
        /* Oid          subtype = PG_GETARG_OID(3); */
        bool       *recheck = (bool *) PG_GETARG_POINTER(4);
        TRGM       *key = (TRGM *) DatumGetPointer(entry->key);
        TRGM       *qtrg;
        bool            res;
        char       *cache = (char *) fcinfo->flinfo->fn_extra,
-                      *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber));
+                          *cacheContents = cache + MAXALIGN(sizeof(StrategyNumber));
 
        /*
         * Store both the strategy number and extracted trigrams in cache, because
-        * trigram extraction is relatively CPU-expensive.  We must include
+        * trigram extraction is relatively CPU-expensive.      We must include
         * strategy number because trigram extraction depends on strategy.
         */
        if (cache == NULL || strategy != *((StrategyNumber *) cache) ||
@@ -222,7 +223,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
                                break;
                        default:
                                elog(ERROR, "unrecognized strategy number: %d", strategy);
-                               qtrg = NULL;            /* keep compiler quiet */
+                               qtrg = NULL;    /* keep compiler quiet */
                                break;
                }
 
@@ -251,20 +252,20 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
                        *recheck = false;
 
                        if (GIST_LEAF(entry))
-                       {                                                       /* all leafs contains orig trgm */
-                               float4      tmpsml = cnt_sml(key, qtrg);
+                       {                                       /* all leafs contains orig trgm */
+                               float4          tmpsml = cnt_sml(key, qtrg);
 
                                /* strange bug at freebsd 5.2.1 and gcc 3.3.3 */
                                res = (*(int *) &tmpsml == *(int *) &trgm_limit || tmpsml > trgm_limit) ? true : false;
                        }
                        else if (ISALLTRUE(key))
-                       {                                                       /* non-leaf contains signature */
+                       {                                       /* non-leaf contains signature */
                                res = true;
                        }
                        else
-                       {                                                       /* non-leaf contains signature */
-                               int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
-                               int4 len = ARRNELEM(qtrg);
+                       {                                       /* non-leaf contains signature */
+                               int4            count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+                               int4            len = ARRNELEM(qtrg);
 
                                if (len == 0)
                                        res = false;
@@ -286,20 +287,20 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
                         * nodes.
                         */
                        if (GIST_LEAF(entry))
-                       {                                                       /* all leafs contains orig trgm */
+                       {                                       /* all leafs contains orig trgm */
                                res = trgm_contained_by(qtrg, key);
                        }
                        else if (ISALLTRUE(key))
-                       {                                                       /* non-leaf contains signature */
+                       {                                       /* non-leaf contains signature */
                                res = true;
                        }
                        else
-                       {                                                       /* non-leaf contains signature */
-                               int32   k,
-                                               tmp = 0,
-                                               len = ARRNELEM(qtrg);
-                               trgm *ptr = GETARR(qtrg);
-                               BITVECP sign = GETSIGN(key);
+                       {                                       /* non-leaf contains signature */
+                               int32           k,
+                                                       tmp = 0,
+                                                       len = ARRNELEM(qtrg);
+                               trgm       *ptr = GETARR(qtrg);
+                               BITVECP         sign = GETSIGN(key);
 
                                res = true;
                                for (k = 0; k < len; k++)
@@ -328,6 +329,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
        GISTENTRY  *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
        text       *query = PG_GETARG_TEXT_P(1);
        StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
+
        /* Oid          subtype = PG_GETARG_OID(3); */
        TRGM       *key = (TRGM *) DatumGetPointer(entry->key);
        TRGM       *qtrg;
@@ -355,17 +357,17 @@ gtrgm_distance(PG_FUNCTION_ARGS)
        {
                case DistanceStrategyNumber:
                        if (GIST_LEAF(entry))
-                       {                                                       /* all leafs contains orig trgm */
+                       {                                       /* all leafs contains orig trgm */
                                res = 1.0 - cnt_sml(key, qtrg);
                        }
                        else if (ISALLTRUE(key))
-                       {                                                       /* all leafs contains orig trgm */
+                       {                                       /* all leafs contains orig trgm */
                                res = 0.0;
                        }
                        else
-                       {                                                       /* non-leaf contains signature */
-                               int4 count = cnt_sml_sign_common(qtrg, GETSIGN(key));
-                               int4 len = ARRNELEM(qtrg);
+                       {                                       /* non-leaf contains signature */
+                               int4            count = cnt_sml_sign_common(qtrg, GETSIGN(key));
+                               int4            len = ARRNELEM(qtrg);
 
                                res = (len == 0) ? -1.0 : 1.0 - ((float8) count) / ((float8) len);
                        }
index 52f9172f6d52d3e307341b1a5446082315ce8e27..dfb2df50488223a576120c1d11bebc5a8d2c2ac1 100644 (file)
@@ -273,9 +273,9 @@ get_wildcard_part(const char *str, int lenstr,
        const char *beginword = str;
        const char *endword;
        char       *s = buf;
-       bool        in_wildcard_meta = false;
-       bool        in_escape = false;
-       int         clen;
+       bool            in_wildcard_meta = false;
+       bool            in_escape = false;
+       int                     clen;
 
        /*
         * Find the first word character remembering whether last character was
@@ -410,14 +410,14 @@ generate_wildcard_trgm(const char *str, int slen)
 {
        TRGM       *trg;
        char       *buf,
-                      *buf2;
+                          *buf2;
        trgm       *tptr;
        int                     len,
                                charlen,
                                bytelen;
        const char *eword;
 
-       trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) * 3);
+       trg = (TRGM *) palloc(TRGMHDRSIZE + sizeof(trgm) * (slen / 2 + 1) *3);
        trg->flag = ARRKEY;
        SET_VARSIZE(trg, TRGMHDRSIZE);
 
@@ -638,6 +638,7 @@ similarity_dist(PG_FUNCTION_ARGS)
        float4          res = DatumGetFloat4(DirectFunctionCall2(similarity,
                                                                                                                 PG_GETARG_DATUM(0),
                                                                                                                 PG_GETARG_DATUM(1)));
+
        PG_RETURN_FLOAT4(1.0 - res);
 }
 
index 05aac8fde9cd5a05c8e351f25b9ab79e0e31eb42..747244072d434f8a5317dc5f37587d7684bb9963 100644 (file)
@@ -212,7 +212,10 @@ check_cluster_versions(void)
        old_cluster.major_version = get_major_server_version(&old_cluster);
        new_cluster.major_version = get_major_server_version(&new_cluster);
 
-       /* We allow upgrades from/to the same major version for alpha/beta upgrades */
+       /*
+        * We allow upgrades from/to the same major version for alpha/beta
+        * upgrades
+        */
 
        if (GET_MAJOR_VERSION(old_cluster.major_version) < 803)
                pg_log(PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
@@ -516,7 +519,7 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
        }
 
        if (script)
-                       fclose(script);
+               fclose(script);
 
        if (found)
        {
index 78c75e8a8438bae5e632c4e40802e0712ab7bd57..3ac2180d49bf76296c4521eb4d87f206314818c9 100644 (file)
@@ -505,8 +505,7 @@ check_control_data(ControlData *oldctrl,
                           "\nOld and new pg_controldata date/time storage types do not match.\n");
 
                /*
-                * This is a common 8.3 -> 8.4 upgrade problem, so we are more
-                * verbose
+                * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
                 */
                pg_log(PG_FATAL,
                           "You will need to rebuild the new server with configure\n"
index 7095ba62a8013a4d2f21ef4edfb615b784d53642..59a76bc8aec075b72b0848c361ef4e2c2fe7fe50 100644 (file)
@@ -15,7 +15,7 @@
 
 static void check_data_dir(const char *pg_data);
 static void check_bin_dir(ClusterInfo *cluster);
-static void    validate_exec(const char *dir, const char *cmdName);
+static void validate_exec(const char *dir, const char *cmdName);
 
 
 /*
index 0024b6ee00597bbba47a0b7f754c542aa54f7a76..f8f7233593d5ae4dfaf30c955b6c7fc660ac602f 100644 (file)
@@ -377,4 +377,5 @@ win32_pghardlink(const char *src, const char *dst)
        else
                return 0;
 }
+
 #endif
index c01ff046bbd6a780f0f88ffec68285774745d69d..322014cd235cb404f586f72d63a845ba7c1405d7 100644 (file)
 void
 install_support_functions_in_new_db(const char *db_name)
 {
-       PGconn *conn = connectToServer(&new_cluster, db_name);
-       
+       PGconn     *conn = connectToServer(&new_cluster, db_name);
+
        /* suppress NOTICE of dropped objects */
        PQclear(executeQueryOrDie(conn,
                                                          "SET client_min_messages = warning;"));
        PQclear(executeQueryOrDie(conn,
-                                          "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
+                                                  "DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
        PQclear(executeQueryOrDie(conn,
                                                          "RESET client_min_messages;"));
 
@@ -42,31 +42,31 @@ install_support_functions_in_new_db(const char *db_name)
                                                          "LANGUAGE C STRICT;"));
        PQclear(executeQueryOrDie(conn,
                                                          "CREATE OR REPLACE FUNCTION "
-                                                         "binary_upgrade.set_next_array_pg_type_oid(OID) "
+                                                       "binary_upgrade.set_next_array_pg_type_oid(OID) "
                                                          "RETURNS VOID "
                                                          "AS '$libdir/pg_upgrade_support' "
                                                          "LANGUAGE C STRICT;"));
        PQclear(executeQueryOrDie(conn,
                                                          "CREATE OR REPLACE FUNCTION "
-                                                         "binary_upgrade.set_next_toast_pg_type_oid(OID) "
+                                                       "binary_upgrade.set_next_toast_pg_type_oid(OID) "
                                                          "RETURNS VOID "
                                                          "AS '$libdir/pg_upgrade_support' "
                                                          "LANGUAGE C STRICT;"));
        PQclear(executeQueryOrDie(conn,
                                                          "CREATE OR REPLACE FUNCTION "
-                                                         "binary_upgrade.set_next_heap_pg_class_oid(OID) "
+                                                       "binary_upgrade.set_next_heap_pg_class_oid(OID) "
                                                          "RETURNS VOID "
                                                          "AS '$libdir/pg_upgrade_support' "
                                                          "LANGUAGE C STRICT;"));
        PQclear(executeQueryOrDie(conn,
                                                          "CREATE OR REPLACE FUNCTION "
-                                                         "binary_upgrade.set_next_index_pg_class_oid(OID) "
+                                                  "binary_upgrade.set_next_index_pg_class_oid(OID) "
                                                          "RETURNS VOID "
                                                          "AS '$libdir/pg_upgrade_support' "
                                                          "LANGUAGE C STRICT;"));
        PQclear(executeQueryOrDie(conn,
                                                          "CREATE OR REPLACE FUNCTION "
-                                                         "binary_upgrade.set_next_toast_pg_class_oid(OID) "
+                                                  "binary_upgrade.set_next_toast_pg_class_oid(OID) "
                                                          "RETURNS VOID "
                                                          "AS '$libdir/pg_upgrade_support' "
                                                          "LANGUAGE C STRICT;"));
index ceb1601cc6d8922bdd0fc33177500e480506bd9d..f0cd8e5ede29a02db5c4f45ce414897f91772e7f 100644 (file)
@@ -13,9 +13,9 @@
 
 
 static void create_rel_filename_map(const char *old_data, const char *new_data,
-                         const DbInfo *old_db, const DbInfo *new_db,
-                         const RelInfo *old_rel, const RelInfo *new_rel,
-                         FileNameMap *map);
+                                               const DbInfo *old_db, const DbInfo *new_db,
+                                               const RelInfo *old_rel, const RelInfo *new_rel,
+                                               FileNameMap *map);
 static void get_db_infos(ClusterInfo *cluster);
 static void get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo);
 static void free_rel_infos(RelInfoArr *rel_arr);
@@ -40,7 +40,7 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
 
        if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
                pg_log(PG_FATAL, "old and new databases \"%s\" have a different number of relations\n",
-                                       old_db->db_name);
+                          old_db->db_name);
 
        maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) *
                                                                         old_db->rel_arr.nrels);
@@ -52,24 +52,24 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
 
                if (old_rel->reloid != new_rel->reloid)
                        pg_log(PG_FATAL, "Mismatch of relation id: database \"%s\", old relid %d, new relid %d\n",
-                       old_db->db_name, old_rel->reloid, new_rel->reloid);
+                                  old_db->db_name, old_rel->reloid, new_rel->reloid);
 
                /*
-                *      In pre-8.4, TOAST table names change during CLUSTER;  in >= 8.4
-                *      TOAST relation names always use heap table oids, hence we
-                *      cannot check relation names when upgrading from pre-8.4.
+                * In pre-8.4, TOAST table names change during CLUSTER;  in >= 8.4
+                * TOAST relation names always use heap table oids, hence we cannot
+                * check relation names when upgrading from pre-8.4.
                 */
                if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
                        ((GET_MAJOR_VERSION(old_cluster.major_version) >= 804 ||
                          strcmp(old_rel->nspname, "pg_toast") != 0) &&
                         strcmp(old_rel->relname, new_rel->relname) != 0))
                        pg_log(PG_FATAL, "Mismatch of relation names: database \"%s\", "
-                               "old rel %s.%s, new rel %s.%s\n",
-                               old_db->db_name, old_rel->nspname, old_rel->relname,
-                               new_rel->nspname, new_rel->relname);
+                                  "old rel %s.%s, new rel %s.%s\n",
+                                  old_db->db_name, old_rel->nspname, old_rel->relname,
+                                  new_rel->nspname, new_rel->relname);
 
                create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
-                               old_rel, new_rel, maps + num_maps);
+                                                               old_rel, new_rel, maps + num_maps);
                num_maps++;
        }
 
@@ -85,9 +85,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
  */
 static void
 create_rel_filename_map(const char *old_data, const char *new_data,
-                         const DbInfo *old_db, const DbInfo *new_db,
-                         const RelInfo *old_rel, const RelInfo *new_rel,
-                         FileNameMap *map)
+                                               const DbInfo *old_db, const DbInfo *new_db,
+                                               const RelInfo *old_rel, const RelInfo *new_rel,
+                                               FileNameMap *map)
 {
        if (strlen(old_rel->tablespace) == 0)
        {
@@ -110,8 +110,8 @@ create_rel_filename_map(const char *old_data, const char *new_data,
        }
 
        /*
-        *      old_relfilenode might differ from pg_class.oid (and hence
-        *      new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
+        * old_relfilenode might differ from pg_class.oid (and hence
+        * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
         */
        map->old_relfilenode = old_rel->relfilenode;
 
@@ -185,7 +185,9 @@ get_db_infos(ClusterInfo *cluster)
        int                     ntups;
        int                     tupnum;
        DbInfo     *dbinfos;
-       int                     i_datname, i_oid, i_spclocation;
+       int                     i_datname,
+                               i_oid,
+                               i_spclocation;
 
        res = executeQueryOrDie(conn,
                                                        "SELECT d.oid, d.datname, t.spclocation "
@@ -241,15 +243,19 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
        int                     num_rels = 0;
        char       *nspname = NULL;
        char       *relname = NULL;
-       int                     i_spclocation, i_nspname, i_relname, i_oid, i_relfilenode;
+       int                     i_spclocation,
+                               i_nspname,
+                               i_relname,
+                               i_oid,
+                               i_relfilenode;
        char            query[QUERY_ALLOC];
 
        /*
         * pg_largeobject contains user data that does not appear in pg_dumpall
         * --schema-only output, so we have to copy that system table heap and
-        * index.  We could grab the pg_largeobject oids from template1, but
-        * it is easy to treat it as a normal table.
-        * Order by oid so we can join old/new structures efficiently.
+        * index.  We could grab the pg_largeobject oids from template1, but it is
+        * easy to treat it as a normal table. Order by oid so we can join old/new
+        * structures efficiently.
         */
 
        snprintf(query, sizeof(query),
@@ -263,7 +269,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                         "  ((n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
                         "        c.oid >= %u) "
                         "  OR (n.nspname = 'pg_catalog' AND "
-                        "    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
+       "    relname IN ('pg_largeobject', 'pg_largeobject_loid_pn_index'%s) )) "
        /* we preserve pg_class.oid so we sort by it to match old/new */
                         "ORDER BY 1;",
        /* see the comment at the top of old_8_3_create_sequence_script() */
@@ -273,7 +279,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                         FirstNormalObjectId,
        /* does pg_largeobject_metadata need to be migrated? */
                         (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) ?
-                        "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");
+       "" : ", 'pg_largeobject_metadata', 'pg_largeobject_metadata_oid_index'");
 
        res = executeQueryOrDie(conn, query);
 
index 061544cac889fcb1bd77878a9b810f811c146838..e435aaef08c88a581344bedb6aae67093e3f1450 100644 (file)
@@ -18,7 +18,7 @@
  *     FYI, while pg_class.oid and pg_class.relfilenode are intially the same
  *     in a cluster, but they can diverge due to CLUSTER, REINDEX, or VACUUM
  *     FULL.  The new cluster will have matching pg_class.oid and
- *     pg_class.relfilenode values and be based on the old oid value.  This can
+ *     pg_class.relfilenode values and be based on the old oid value.  This can
  *     cause the old and new pg_class.relfilenode values to differ.  In summary,
  *     old and new pg_class.oid and new pg_class.relfilenode will have the
  *     same value, and old pg_class.relfilenode might differ.
@@ -34,7 +34,7 @@
  */
 
 
+
 #include "pg_upgrade.h"
 
 #ifdef HAVE_LANGINFO_H
@@ -53,7 +53,8 @@ static void cleanup(void);
 /* This is the database used by pg_dumpall to restore global tables */
 #define GLOBAL_DUMP_DB "postgres"
 
-ClusterInfo old_cluster, new_cluster;
+ClusterInfo old_cluster,
+                       new_cluster;
 OSInfo         os_info;
 
 int
@@ -192,7 +193,7 @@ prepare_new_cluster(void)
        exec_prog(true,
                          SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
                          "--all --analyze >> %s 2>&1" SYSTEMQUOTE,
-                  new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
+         new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
        check_ok();
 
        /*
@@ -205,7 +206,7 @@ prepare_new_cluster(void)
        exec_prog(true,
                          SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
                          "--all --freeze >> %s 2>&1" SYSTEMQUOTE,
-                  new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
+         new_cluster.bindir, new_cluster.port, os_info.user, log_opts.filename);
        check_ok();
 
        get_pg_database_relfilenode(&new_cluster);
@@ -229,16 +230,16 @@ prepare_new_databases(void)
        prep_status("Creating databases in the new cluster");
 
        /*
-        *      Install support functions in the global-restore database
-        *      to preserve pg_authid.oid.
+        * Install support functions in the global-restore database to preserve
+        * pg_authid.oid.
         */
        install_support_functions_in_new_db(GLOBAL_DUMP_DB);
 
        /*
         * We have to create the databases first so we can install support
-        * functions in all the other databases.  Ideally we could create
-        * the support functions in template1 but pg_dumpall creates database
-        * using the template0 template.
+        * functions in all the other databases.  Ideally we could create the
+        * support functions in template1 but pg_dumpall creates database using
+        * the template0 template.
         */
        exec_prog(true,
                          SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on "
index 8f72ea80d7ad9eb448a602de1f33b70a362b23f4..5ca570eb157c7e7287464025d7d526a59c568b89 100644 (file)
@@ -85,6 +85,7 @@ typedef struct
 {
        char            old_dir[MAXPGPATH];
        char            new_dir[MAXPGPATH];
+
        /*
         * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes
         * due to VACUUM FULL or REINDEX.  Other relfilenodes are preserved.
@@ -92,7 +93,7 @@ typedef struct
        Oid                     old_relfilenode;
        Oid                     new_relfilenode;
        /* the rest are used only for logging and error reporting */
-       char            nspname[NAMEDATALEN];           /* namespaces */
+       char            nspname[NAMEDATALEN];   /* namespaces */
        char            relname[NAMEDATALEN];
 } FileNameMap;
 
@@ -180,7 +181,7 @@ typedef struct
        char       *bindir;                     /* pathname for cluster's executable directory */
        unsigned short port;            /* port number where postmaster is waiting */
        uint32          major_version;  /* PG_VERSION of cluster */
-       char       major_version_str[64];               /* string PG_VERSION of cluster */
+       char            major_version_str[64];  /* string PG_VERSION of cluster */
        Oid                     pg_database_oid;        /* OID of pg_database relation */
        char       *libpath;            /* pathname for cluster's pkglibdir */
        char       *tablespace_suffix;          /* directory specification */
@@ -232,9 +233,10 @@ typedef struct
 /*
  * Global variables
  */
-extern LogOpts log_opts;
+extern LogOpts log_opts;
 extern UserOpts user_opts;
-extern ClusterInfo old_cluster, new_cluster;
+extern ClusterInfo old_cluster,
+                       new_cluster;
 extern OSInfo os_info;
 extern char scandir_file_pattern[];
 
@@ -246,8 +248,8 @@ void check_old_cluster(bool live_check,
                                  char **sequence_script_file_name);
 void           check_new_cluster(void);
 void           report_clusters_compatible(void);
-void issue_warnings(char *sequence_script_file_name);
-void output_completion_banner(char *deletion_script_file_name);
+void           issue_warnings(char *sequence_script_file_name);
+void           output_completion_banner(char *deletion_script_file_name);
 void           check_cluster_versions(void);
 void           check_cluster_compatibility(bool live_check);
 void           create_script_for_old_cluster_deletion(char **deletion_script_file_name);
@@ -309,11 +311,11 @@ typedef void *pageCnvCtx;
 
 int                    dir_matching_filenames(const struct dirent * scan_ent);
 int pg_scandir(const char *dirname, struct dirent *** namelist,
-                          int (*selector) (const struct dirent *));
+                  int (*selector) (const struct dirent *));
 const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
                                  const char *dst, bool force);
 const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
-                                                         const char *dst);
+                                 const char *dst);
 
 void           check_hard_link(void);
 
@@ -329,10 +331,10 @@ void              check_loadable_libraries(void);
 FileNameMap *gen_db_file_maps(DbInfo *old_db,
                                 DbInfo *new_db, int *nmaps, const char *old_pgdata,
                                 const char *new_pgdata);
-void           get_db_and_rel_infos(ClusterInfo *cluster);
+void           get_db_and_rel_infos(ClusterInfo *cluster);
 void           free_db_and_rel_infos(DbInfoArr *db_arr);
-void           print_maps(FileNameMap *maps, int n,
-                               const char *db_name);
+void print_maps(FileNameMap *maps, int n,
+                  const char *db_name);
 
 /* option.c */
 
@@ -352,12 +354,12 @@ void              init_tablespaces(void);
 
 /* server.c */
 
-PGconn *connectToServer(ClusterInfo *cluster, const char *db_name);
-PGresult *executeQueryOrDie(PGconn *conn, const char *fmt,...);
+PGconn    *connectToServer(ClusterInfo *cluster, const char *db_name);
+PGresult   *executeQueryOrDie(PGconn *conn, const char *fmt,...);
 
 void           start_postmaster(ClusterInfo *cluster, bool quiet);
 void           stop_postmaster(bool fast, bool quiet);
-uint32 get_major_server_version(ClusterInfo *cluster);
+uint32         get_major_server_version(ClusterInfo *cluster);
 void           check_for_libpq_envvars(void);
 
 
@@ -380,14 +382,14 @@ unsigned int str2uint(const char *str);
 /* version.c */
 
 void new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster,
-                                                                                         bool check_mode);
+                                                                                bool check_mode);
 
 /* version_old_8_3.c */
 
 void           old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster);
 void           old_8_3_check_for_tsquery_usage(ClusterInfo *cluster);
-void old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode);
-void old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode);
+void           old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode);
+void           old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode);
 void old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
-                                                                                                  bool check_mode);
+                                                                                         bool check_mode);
 char      *old_8_3_create_sequence_script(ClusterInfo *cluster);
index d111b13de905ffe8b5022874e5bf37cc87a74605..9a0a3ac18d9557cf696e452d4e61cab1b9c08485 100644 (file)
@@ -30,7 +30,7 @@ char          scandir_file_pattern[MAXPGPATH];
  */
 const char *
 transfer_all_new_dbs(DbInfoArr *old_db_arr,
-                                        DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
+                                  DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
 {
        int                     dbnum;
        const char *msg = NULL;
@@ -39,7 +39,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
 
        if (old_db_arr->ndbs != new_db_arr->ndbs)
                pg_log(PG_FATAL, "old and new clusters have a different number of databases\n");
-       
+
        for (dbnum = 0; dbnum < old_db_arr->ndbs; dbnum++)
        {
                DbInfo     *old_db = &old_db_arr->dbs[dbnum];
@@ -50,8 +50,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
 
                if (strcmp(old_db->db_name, new_db->db_name) != 0)
                        pg_log(PG_FATAL, "old and new databases have different names: old \"%s\", new \"%s\"\n",
-                               old_db->db_name, new_db->db_name);
-               
+                                  old_db->db_name, new_db->db_name);
+
                n_maps = 0;
                mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
                                                                        new_pgdata);
@@ -169,7 +169,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
                        for (fileno = 0; fileno < numFiles; fileno++)
                        {
                                if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
-                                       strlen(scandir_file_pattern)) == 0)
+                                                       strlen(scandir_file_pattern)) == 0)
                                {
                                        snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
                                                         namelist[fileno]->d_name);
@@ -178,7 +178,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
 
                                        unlink(new_file);
                                        transfer_relfile(pageConverter, old_file, new_file,
-                                                         maps[mapnum].nspname, maps[mapnum].relname);
+                                                                maps[mapnum].nspname, maps[mapnum].relname);
                                }
                        }
                }
@@ -196,7 +196,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
                for (fileno = 0; fileno < numFiles; fileno++)
                {
                        if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
-                               strlen(scandir_file_pattern)) == 0)
+                                               strlen(scandir_file_pattern)) == 0)
                        {
                                snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
                                                 namelist[fileno]->d_name);
@@ -205,7 +205,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
 
                                unlink(new_file);
                                transfer_relfile(pageConverter, old_file, new_file,
-                                                         maps[mapnum].nspname, maps[mapnum].relname);
+                                                                maps[mapnum].nspname, maps[mapnum].relname);
                        }
                }
        }
@@ -227,7 +227,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
  */
 static void
 transfer_relfile(pageCnvCtx *pageConverter, const char *old_file,
-                const char *new_file, const char *nspname, const char *relname)
+                         const char *new_file, const char *nspname, const char *relname)
 {
        const char *msg;
 
@@ -249,7 +249,7 @@ transfer_relfile(pageCnvCtx *pageConverter, const char *old_file,
 
                if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
                        pg_log(PG_FATAL,
-                          "error while creating link from %s.%s (%s to %s): %s\n",
+                                  "error while creating link from %s.%s (%s to %s): %s\n",
                                   nspname, relname, old_file, new_file, msg);
        }
        return;
index a7d57872346104fca8e8e324f0bc188fed04cfb0..2a0f50eb2a22b4907e8d462db169530e1f251ea0 100644 (file)
@@ -194,12 +194,12 @@ start_postmaster(ClusterInfo *cluster, bool quiet)
         * because it is being used by another process." so we have to send all
         * other output to 'nul'.
         *
-        * Using autovacuum=off disables cleanup vacuum and analyze, but
-        * freeze vacuums can still happen, so we set
-        * autovacuum_freeze_max_age to its maximum.  We assume all datfrozenxid
-        * and relfrozen values are less than a gap of 2000000000 from the current
-        * xid counter, so autovacuum will not touch them.
-        */     
+        * Using autovacuum=off disables cleanup vacuum and analyze, but freeze
+        * vacuums can still happen, so we set autovacuum_freeze_max_age to its
+        * maximum.  We assume all datfrozenxid and relfrozen values are less than
+        * a gap of 2000000000 from the current xid counter, so autovacuum will
+        * not touch them.
+        */
        snprintf(cmd, sizeof(cmd),
                         SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
                         "-o \"-p %d -c autovacuum=off "
@@ -251,7 +251,7 @@ stop_postmaster(bool fast, bool quiet)
                         "\"%s\" 2>&1" SYSTEMQUOTE,
                         bindir,
 #ifndef WIN32
-                        log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename);
+          log_opts.filename, datadir, fast ? "-m fast" : "", log_opts.filename);
 #else
                         DEVNULL, datadir, fast ? "-m fast" : "", DEVNULL);
 #endif
index a575487621906cdfd9b85959f83affd73904e485..6cdae51cf1904b419058d57373cc50fdda855d06 100644 (file)
@@ -78,8 +78,8 @@ set_tablespace_directory_suffix(ClusterInfo *cluster)
        {
                /* This cluster has a version-specific subdirectory */
                cluster->tablespace_suffix = pg_malloc(4 +
-                                                                 strlen(cluster->major_version_str) +
-                                                                                                         10 /* OIDCHARS */ + 1);
+                                                                                strlen(cluster->major_version_str) +
+                                                                                          10 /* OIDCHARS */ + 1);
 
                /* The leading slash is needed to start a new directory. */
                sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
index 804aa0d1e5fa2900cdb59cffea565315294fe503..9a6691ce75f94f0cd353f4e331ac12ab2661febf 100644 (file)
@@ -12,7 +12,7 @@
 #include <signal.h>
 
 
-LogOpts                        log_opts;
+LogOpts                log_opts;
 
 /*
  * report_status()
index 3ec4b59a05e17a24b19ecbbe786af7b12681529a..0a60eec926b54764c76dbc68be64cb7d2908c25c 100644 (file)
@@ -288,7 +288,7 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
 
                                /* Rebuild all tsvector collumns with one ALTER TABLE command */
                                if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 ||
-                                strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
+                                       strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
                                {
                                        if (strlen(nspname) != 0 || strlen(relname) != 0)
                                                fprintf(script, ";\n\n");
index 02d1512719ec2a7f11e938145b144df39be03f0f..2c23cbab9df07b85583ff450632faaf44cd9f8df 100644 (file)
@@ -178,9 +178,9 @@ create_empty_extension(PG_FUNCTION_ARGS)
                                                  &textDatums, NULL, &ndatums);
                for (i = 0; i < ndatums; i++)
                {
-                       text   *txtname = DatumGetTextPP(textDatums[i]);
-                       char   *extName = text_to_cstring(txtname);
-                       Oid             extOid = get_extension_oid(extName, false);
+                       text       *txtname = DatumGetTextPP(textDatums[i]);
+                       char       *extName = text_to_cstring(txtname);
+                       Oid                     extOid = get_extension_oid(extName, false);
 
                        requiredExtensions = lappend_oid(requiredExtensions, extOid);
                }
@@ -188,7 +188,7 @@ create_empty_extension(PG_FUNCTION_ARGS)
 
        InsertExtensionTuple(text_to_cstring(extName),
                                                 GetUserId(),
-                                                get_namespace_oid(text_to_cstring(schemaName), false),
+                                          get_namespace_oid(text_to_cstring(schemaName), false),
                                                 relocatable,
                                                 text_to_cstring(extVersion),
                                                 extConfig,
index 7c2ca6e84d361f07908c86c5f33ee7fc9c9bccce..0a3e5fd9283ac92cfe761ea567b0bbdfea1eb56e 100644 (file)
@@ -69,7 +69,7 @@
 typedef struct win32_pthread *pthread_t;
 typedef int pthread_attr_t;
 
-static int     pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int     pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
 static int     pthread_join(pthread_t th, void **thread_return);
 #elif defined(ENABLE_THREAD_SAFETY)
 /* Use platform-dependent pthread capability */
@@ -87,7 +87,7 @@ static int    pthread_join(pthread_t th, void **thread_return);
 typedef struct fork_pthread *pthread_t;
 typedef int pthread_attr_t;
 
-static int     pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
+static int     pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
 static int     pthread_join(pthread_t th, void **thread_return);
 #endif
 
@@ -817,7 +817,7 @@ top:
 
                        INSTR_TIME_SET_CURRENT(now);
                        INSTR_TIME_ACCUM_DIFF(thread->exec_elapsed[cnum],
-                                             now, st->stmt_begin);
+                                                                 now, st->stmt_begin);
                        thread->exec_count[cnum]++;
                }
 
@@ -850,8 +850,8 @@ top:
                if (commands[st->state]->type == SQL_COMMAND)
                {
                        /*
-                        * Read and discard the query result; note this is not included
-                        * in the statement latency numbers.
+                        * Read and discard the query result; note this is not included in
+                        * the statement latency numbers.
                         */
                        res = PQgetResult(st->con);
                        switch (PQresultStatus(res))
@@ -1716,16 +1716,16 @@ printResults(int ttype, int normal_xacts, int nclients,
 
                for (i = 0; i < num_files; i++)
                {
-                       Command   **commands;
+                       Command   **commands;
 
                        if (num_files > 1)
-                               printf("statement latencies in milliseconds, file %d:\n", i+1);
+                               printf("statement latencies in milliseconds, file %d:\n", i + 1);
                        else
                                printf("statement latencies in milliseconds:\n");
 
                        for (commands = sql_files[i]; *commands != NULL; commands++)
                        {
-                               Command    *command = *commands;
+                               Command    *command = *commands;
                                int                     cnum = command->command_num;
                                double          total_time;
                                instr_time      total_exec_elapsed;
@@ -1737,7 +1737,7 @@ printResults(int ttype, int normal_xacts, int nclients,
                                total_exec_count = 0;
                                for (t = 0; t < nthreads; t++)
                                {
-                                       TState *thread = &threads[t];
+                                       TState     *thread = &threads[t];
 
                                        INSTR_TIME_ADD(total_exec_elapsed,
                                                                   thread->exec_elapsed[cnum]);
@@ -2014,9 +2014,9 @@ main(int argc, char **argv)
         * is_latencies only works with multiple threads in thread-based
         * implementations, not fork-based ones, because it supposes that the
         * parent can see changes made to the per-thread execution stats by child
-        * threads.  It seems useful enough to accept despite this limitation,
-        * but perhaps we should FIXME someday (by passing the stats data back
-        * up through the parent-to-child pipes).
+        * threads.  It seems useful enough to accept despite this limitation, but
+        * perhaps we should FIXME someday (by passing the stats data back up
+        * through the parent-to-child pipes).
         */
 #ifndef ENABLE_THREAD_SAFETY
        if (is_latencies && nthreads > 1)
@@ -2161,7 +2161,7 @@ main(int argc, char **argv)
        threads = (TState *) xmalloc(sizeof(TState) * nthreads);
        for (i = 0; i < nthreads; i++)
        {
-               TState *thread = &threads[i];
+               TState     *thread = &threads[i];
 
                thread->tid = i;
                thread->state = &state[nclients / nthreads * i];
@@ -2170,7 +2170,7 @@ main(int argc, char **argv)
                if (is_latencies)
                {
                        /* Reserve memory for the thread to store per-command latencies */
-                       int             t;
+                       int                     t;
 
                        thread->exec_elapsed = (instr_time *)
                                xmalloc(sizeof(instr_time) * num_commands);
@@ -2200,7 +2200,7 @@ main(int argc, char **argv)
        /* start threads */
        for (i = 0; i < nthreads; i++)
        {
-               TState *thread = &threads[i];
+               TState     *thread = &threads[i];
 
                INSTR_TIME_SET_CURRENT(thread->start_time);
 
@@ -2472,7 +2472,7 @@ typedef struct fork_pthread
 
 static int
 pthread_create(pthread_t *thread,
-                          pthread_attr_t *attr,
+                          pthread_attr_t * attr,
                           void *(*start_routine) (void *),
                           void *arg)
 {
@@ -2586,7 +2586,7 @@ typedef struct win32_pthread
        void       *(*routine) (void *);
        void       *arg;
        void       *result;
-}      win32_pthread;
+} win32_pthread;
 
 static unsigned __stdcall
 win32_pthread_run(void *arg)
@@ -2600,7 +2600,7 @@ win32_pthread_run(void *arg)
 
 static int
 pthread_create(pthread_t *thread,
-                          pthread_attr_t *attr,
+                          pthread_attr_t * attr,
                           void *(*start_routine) (void *),
                           void *arg)
 {
index afada2a0aad0f04f75c74c8309ecfd3f9c97b41c..fd284e0c071350918dcaf2283d4967ae1d30d76a 100644 (file)
@@ -356,7 +356,7 @@ gseg_picksplit(GistEntryVector *entryvec,
        {
                seg = (SEG *) DatumGetPointer(entryvec->vector[i].key);
                /* center calculation is done this way to avoid possible overflow */
-               sort_items[i - 1].center = seg->lower*0.5f + seg->upper*0.5f;
+               sort_items[i - 1].center = seg->lower * 0.5f + seg->upper * 0.5f;
                sort_items[i - 1].index = i;
                sort_items[i - 1].data = seg;
        }
index 358a2643ca469891954644fd886974f666c4667c..22666b708e96912b2701cb895c21db1de07d1c86 100644 (file)
@@ -59,7 +59,7 @@ fixup_whole_row_references(Oid relOid, Bitmapset *columns)
        result = bms_copy(columns);
        result = bms_del_member(result, index);
 
-       for (attno=1; attno <= natts; attno++)
+       for (attno = 1; attno <= natts; attno++)
        {
                tuple = SearchSysCache2(ATTNUM,
                                                                ObjectIdGetDatum(relOid),
@@ -108,6 +108,7 @@ fixup_inherited_columns(Oid parentId, Oid childId, Bitmapset *columns)
        while ((index = bms_first_member(tmpset)) > 0)
        {
                attno = index + FirstLowInvalidHeapAttributeNumber;
+
                /*
                 * whole-row-reference shall be fixed-up later
                 */
@@ -158,14 +159,13 @@ check_relation_privileges(Oid relOid,
        bool            result = true;
 
        /*
-        * Hardwired Policies:
-        * SE-PostgreSQL enforces
-        * - clients cannot modify system catalogs using DMLs
-        * - clients cannot reference/modify toast relations using DMLs
+        * Hardwired Policies: SE-PostgreSQL enforces - clients cannot modify
+        * system catalogs using DMLs - clients cannot reference/modify toast
+        * relations using DMLs
         */
        if (sepgsql_getenforce() > 0)
        {
-               Oid             relnamespace = get_rel_namespace(relOid);
+               Oid                     relnamespace = get_rel_namespace(relOid);
 
                if (IsSystemNamespace(relnamespace) &&
                        (required & (SEPG_DB_TABLE__UPDATE |
@@ -242,7 +242,7 @@ check_relation_privileges(Oid relOid,
        {
                AttrNumber      attnum;
                uint32          column_perms = 0;
-               ObjectAddress   object;
+               ObjectAddress object;
 
                if (bms_is_member(index, selected))
                        column_perms |= SEPG_DB_COLUMN__SELECT;
@@ -290,12 +290,12 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort)
 {
        ListCell   *lr;
 
-       foreach (lr, rangeTabls)
+       foreach(lr, rangeTabls)
        {
-               RangeTblEntry  *rte = lfirst(lr);
-               uint32                  required = 0;
-               List               *tableIds;
-               ListCell           *li;
+               RangeTblEntry *rte = lfirst(lr);
+               uint32          required = 0;
+               List       *tableIds;
+               ListCell   *li;
 
                /*
                 * Only regular relations shall be checked
@@ -328,25 +328,24 @@ sepgsql_dml_privileges(List *rangeTabls, bool abort)
 
                /*
                 * If this RangeTblEntry is also supposed to reference inherited
-                * tables, we need to check security label of the child tables.
-                * So, we expand rte->relid into list of OIDs of inheritance
-                * hierarchy, then checker routine will be invoked for each
-                * relations.
+                * tables, we need to check security label of the child tables. So, we
+                * expand rte->relid into list of OIDs of inheritance hierarchy, then
+                * checker routine will be invoked for each relations.
                 */
                if (!rte->inh)
                        tableIds = list_make1_oid(rte->relid);
                else
                        tableIds = find_all_inheritors(rte->relid, NoLock, NULL);
 
-               foreach (li, tableIds)
+               foreach(li, tableIds)
                {
                        Oid                     tableOid = lfirst_oid(li);
                        Bitmapset  *selectedCols;
                        Bitmapset  *modifiedCols;
 
                        /*
-                        * child table has different attribute numbers, so we need
-                        * to fix up them.
+                        * child table has different attribute numbers, so we need to fix
+                        * up them.
                         */
                        selectedCols = fixup_inherited_columns(rte->relid, tableOid,
                                                                                                   rte->selectedCols);
index 5dc8a3ecaa8b704a158409d6be46d8cd755253c5..7797ccb199f088ed101b0c77b41d0b9f98a63d2d 100644 (file)
@@ -29,17 +29,17 @@ PG_MODULE_MAGIC;
 /*
  * Declarations
  */
-void _PG_init(void);
+void           _PG_init(void);
 
 /*
  * Saved hook entries (if stacked)
  */
-static object_access_hook_type                 next_object_access_hook = NULL;
-static ClientAuthentication_hook_type  next_client_auth_hook = NULL;
-static ExecutorCheckPerms_hook_type            next_exec_check_perms_hook = NULL;
-static needs_fmgr_hook_type                            next_needs_fmgr_hook = NULL;
-static fmgr_hook_type                                  next_fmgr_hook = NULL;
-static ProcessUtility_hook_type                        next_ProcessUtility_hook = NULL;
+static object_access_hook_type next_object_access_hook = NULL;
+static ClientAuthentication_hook_type next_client_auth_hook = NULL;
+static ExecutorCheckPerms_hook_type next_exec_check_perms_hook = NULL;
+static needs_fmgr_hook_type next_needs_fmgr_hook = NULL;
+static fmgr_hook_type next_fmgr_hook = NULL;
+static ProcessUtility_hook_type next_ProcessUtility_hook = NULL;
 
 /*
  * GUC: sepgsql.permissive = (on|off)
@@ -73,14 +73,14 @@ sepgsql_get_debug_audit(void)
 static void
 sepgsql_client_auth(Port *port, int status)
 {
-       char   *context;
+       char       *context;
 
        if (next_client_auth_hook)
-               (*next_client_auth_hook)(port, status);
+               (*next_client_auth_hook) (port, status);
 
        /*
-        * In the case when authentication failed, the supplied socket
-        * shall be closed soon, so we don't need to do anything here.
+        * In the case when authentication failed, the supplied socket shall be
+        * closed soon, so we don't need to do anything here.
         */
        if (status != STATUS_OK)
                return;
@@ -96,8 +96,8 @@ sepgsql_client_auth(Port *port, int status)
        sepgsql_set_client_label(context);
 
        /*
-        * Switch the current performing mode from INTERNAL to either
-        * DEFAULT or PERMISSIVE.
+        * Switch the current performing mode from INTERNAL to either DEFAULT or
+        * PERMISSIVE.
         */
        if (sepgsql_permissive)
                sepgsql_set_mode(SEPGSQL_MODE_PERMISSIVE);
@@ -113,12 +113,12 @@ sepgsql_client_auth(Port *port, int status)
  */
 static void
 sepgsql_object_access(ObjectAccessType access,
-                      Oid classId,
-                      Oid objectId,
-                      int subId)
+                                         Oid classId,
+                                         Oid objectId,
+                                         int subId)
 {
        if (next_object_access_hook)
-               (*next_object_access_hook)(access, classId, objectId, subId);
+               (*next_object_access_hook) (access, classId, objectId, subId);
 
        switch (access)
        {
@@ -147,7 +147,7 @@ sepgsql_object_access(ObjectAccessType access,
                        break;
 
                default:
-                       elog(ERROR, "unexpected object access type: %d", (int)access);
+                       elog(ERROR, "unexpected object access type: %d", (int) access);
                        break;
        }
 }
@@ -161,11 +161,11 @@ static bool
 sepgsql_exec_check_perms(List *rangeTabls, bool abort)
 {
        /*
-        * If security provider is stacking and one of them replied 'false'
-        * at least, we don't need to check any more.
+        * If security provider is stacking and one of them replied 'false' at
+        * least, we don't need to check any more.
         */
        if (next_exec_check_perms_hook &&
-               !(*next_exec_check_perms_hook)(rangeTabls, abort))
+               !(*next_exec_check_perms_hook) (rangeTabls, abort))
                return false;
 
        if (!sepgsql_dml_privileges(rangeTabls, abort))
@@ -184,20 +184,19 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
 static bool
 sepgsql_needs_fmgr_hook(Oid functionId)
 {
-       char   *old_label;
-       char   *new_label;
-       char   *function_label;
+       char       *old_label;
+       char       *new_label;
+       char       *function_label;
 
        if (next_needs_fmgr_hook &&
-               (*next_needs_fmgr_hook)(functionId))
+               (*next_needs_fmgr_hook) (functionId))
                return true;
 
        /*
-        * SELinux needs the function to be called via security_definer
-        * wrapper, if this invocation will take a domain-transition.
-        * We call these functions as trusted-procedure, if the security
-        * policy has a rule that switches security label of the client
-        * on execution.
+        * SELinux needs the function to be called via security_definer wrapper,
+        * if this invocation will take a domain-transition. We call these
+        * functions as trusted-procedure, if the security policy has a rule that
+        * switches security label of the client on execution.
         */
        old_label = sepgsql_get_client_label();
        new_label = sepgsql_proc_get_domtrans(functionId);
@@ -210,9 +209,9 @@ sepgsql_needs_fmgr_hook(Oid functionId)
 
        /*
         * Even if not a trusted-procedure, this function should not be inlined
-        * unless the client has db_procedure:{execute} permission.
-        * Please note that it shall be actually failed later because of same
-        * reason with ACL_EXECUTE.
+        * unless the client has db_procedure:{execute} permission. Please note
+        * that it shall be actually failed later because of same reason with
+        * ACL_EXECUTE.
         */
        function_label = sepgsql_get_label(ProcedureRelationId, functionId, 0);
        if (sepgsql_check_perms(sepgsql_get_client_label(),
@@ -238,20 +237,21 @@ static void
 sepgsql_fmgr_hook(FmgrHookEventType event,
                                  FmgrInfo *flinfo, Datum *private)
 {
-       struct {
-               char   *old_label;
-               char   *new_label;
-               Datum   next_private;
-       } *stack;
+       struct
+       {
+               char       *old_label;
+               char       *new_label;
+               Datum           next_private;
+       }                  *stack;
 
        switch (event)
        {
                case FHET_START:
-                       stack = (void *)DatumGetPointer(*private);
+                       stack = (void *) DatumGetPointer(*private);
                        if (!stack)
                        {
-                               MemoryContext   oldcxt;
-                               const char         *cur_label = sepgsql_get_client_label();
+                               MemoryContext oldcxt;
+                               const char *cur_label = sepgsql_get_client_label();
 
                                oldcxt = MemoryContextSwitchTo(flinfo->fn_mcxt);
                                stack = palloc(sizeof(*stack));
@@ -265,8 +265,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
                                {
                                        /*
                                         * process:transition permission between old and new
-                                        * label, when user tries to switch security label of
-                                        * the client on execution of trusted procedure.
+                                        * label, when user tries to switch security label of the
+                                        * client on execution of trusted procedure.
                                         */
                                        sepgsql_check_perms(cur_label, stack->new_label,
                                                                                SEPG_CLASS_PROCESS,
@@ -280,22 +280,22 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
                        stack->old_label = sepgsql_set_client_label(stack->new_label);
 
                        if (next_fmgr_hook)
-                               (*next_fmgr_hook)(event, flinfo, &stack->next_private);
+                               (*next_fmgr_hook) (event, flinfo, &stack->next_private);
                        break;
 
                case FHET_END:
                case FHET_ABORT:
-                       stack = (void *)DatumGetPointer(*private);
+                       stack = (void *) DatumGetPointer(*private);
 
                        if (next_fmgr_hook)
-                               (*next_fmgr_hook)(event, flinfo, &stack->next_private);
+                               (*next_fmgr_hook) (event, flinfo, &stack->next_private);
 
                        sepgsql_set_client_label(stack->old_label);
                        stack->old_label = NULL;
                        break;
 
                default:
-                       elog(ERROR, "unexpected event type: %d", (int)event);
+                       elog(ERROR, "unexpected event type: %d", (int) event);
                        break;
        }
 }
@@ -315,8 +315,8 @@ sepgsql_utility_command(Node *parsetree,
                                                char *completionTag)
 {
        if (next_ProcessUtility_hook)
-               (*next_ProcessUtility_hook)(parsetree, queryString, params,
-                                                                       isTopLevel, dest, completionTag);
+               (*next_ProcessUtility_hook) (parsetree, queryString, params,
+                                                                        isTopLevel, dest, completionTag);
 
        /*
         * Check command tag to avoid nefarious operations
@@ -324,6 +324,7 @@ sepgsql_utility_command(Node *parsetree,
        switch (nodeTag(parsetree))
        {
                case T_LoadStmt:
+
                        /*
                         * We reject LOAD command across the board on enforcing mode,
                         * because a binary module can arbitrarily override hooks.
@@ -336,11 +337,12 @@ sepgsql_utility_command(Node *parsetree,
                        }
                        break;
                default:
+
                        /*
-                        * Right now we don't check any other utility commands,
-                        * because it needs more detailed information to make
-                        * access control decision here, but we don't want to
-                        * have two parse and analyze routines individually.
+                        * Right now we don't check any other utility commands, because it
+                        * needs more detailed information to make access control decision
+                        * here, but we don't want to have two parse and analyze routines
+                        * individually.
                         */
                        break;
        }
@@ -358,7 +360,7 @@ sepgsql_utility_command(Node *parsetree,
 void
 _PG_init(void)
 {
-       char   *context;
+       char       *context;
 
        /*
         * We allow to load the SE-PostgreSQL module on single-user-mode or
@@ -367,12 +369,12 @@ _PG_init(void)
        if (IsUnderPostmaster)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("sepgsql must be loaded via shared_preload_libraries")));
+                        errmsg("sepgsql must be loaded via shared_preload_libraries")));
 
        /*
-        * Check availability of SELinux on the platform.
-        * If disabled, we cannot activate any SE-PostgreSQL features,
-        * and we have to skip rest of initialization.
+        * Check availability of SELinux on the platform. If disabled, we cannot
+        * activate any SE-PostgreSQL features, and we have to skip rest of
+        * initialization.
         */
        if (is_selinux_enabled() < 1)
        {
@@ -383,8 +385,8 @@ _PG_init(void)
        /*
         * sepgsql.permissive = (on|off)
         *
-        * This variable controls performing mode of SE-PostgreSQL
-        * on user's session.
+        * This variable controls performing mode of SE-PostgreSQL on user's
+        * session.
         */
        DefineCustomBoolVariable("sepgsql.permissive",
                                                         "Turn on/off permissive mode in SE-PostgreSQL",
@@ -400,10 +402,9 @@ _PG_init(void)
        /*
         * sepgsql.debug_audit = (on|off)
         *
-        * This variable allows users to turn on/off audit logs on access
-        * control decisions, independent from auditallow/auditdeny setting
-        * in the security policy.
-        * We intend to use this option for debugging purpose.
+        * This variable allows users to turn on/off audit logs on access control
+        * decisions, independent from auditallow/auditdeny setting in the
+        * security policy. We intend to use this option for debugging purpose.
         */
        DefineCustomBoolVariable("sepgsql.debug_audit",
                                                         "Turn on/off debug audit messages",
@@ -419,13 +420,12 @@ _PG_init(void)
        /*
         * Set up dummy client label.
         *
-        * XXX - note that PostgreSQL launches background worker process
-        * like autovacuum without authentication steps. So, we initialize
-        * sepgsql_mode with SEPGSQL_MODE_INTERNAL, and client_label with
-        * the security context of server process.
-        * Later, it also launches background of user session. In this case,
-        * the process is always hooked on post-authentication, and we can
-        * initialize the sepgsql_mode and client_label correctly.
+        * XXX - note that PostgreSQL launches background worker process like
+        * autovacuum without authentication steps. So, we initialize sepgsql_mode
+        * with SEPGSQL_MODE_INTERNAL, and client_label with the security context
+        * of server process. Later, it also launches background of user session.
+        * In this case, the process is always hooked on post-authentication, and
+        * we can initialize the sepgsql_mode and client_label correctly.
         */
        if (getcon_raw(&context) < 0)
                ereport(ERROR,
index 828512a961aa7f00f94a0f32760b350f4d72e8fb..669ee35ac3e4a6d3515364e662568d865af1dbb2 100644 (file)
@@ -38,7 +38,7 @@
  *
  * security label of the client process
  */
-static char       *client_label = NULL;
+static char *client_label = NULL;
 
 char *
 sepgsql_get_client_label(void)
@@ -49,7 +49,7 @@ sepgsql_get_client_label(void)
 char *
 sepgsql_set_client_label(char *new_label)
 {
-       char   *old_label = client_label;
+       char       *old_label = client_label;
 
        client_label = new_label;
 
@@ -66,22 +66,22 @@ sepgsql_set_client_label(char *new_label)
 char *
 sepgsql_get_label(Oid classId, Oid objectId, int32 subId)
 {
-       ObjectAddress   object;
-       char               *label;
+       ObjectAddress object;
+       char       *label;
 
-       object.classId          = classId;
-       object.objectId         = objectId;
-       object.objectSubId      = subId;
+       object.classId = classId;
+       object.objectId = objectId;
+       object.objectSubId = subId;
 
        label = GetSecurityLabel(&object, SEPGSQL_LABEL_TAG);
-       if (!label || security_check_context_raw((security_context_t)label))
+       if (!label || security_check_context_raw((security_context_t) label))
        {
-               security_context_t      unlabeled;
+               security_context_t unlabeled;
 
                if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INTERNAL_ERROR),
-                                        errmsg("SELinux: failed to get initial security label: %m")));
+                          errmsg("SELinux: failed to get initial security label: %m")));
                PG_TRY();
                {
                        label = pstrdup(unlabeled);
@@ -107,21 +107,22 @@ void
 sepgsql_object_relabel(const ObjectAddress *object, const char *seclabel)
 {
        /*
-        * validate format of the supplied security label,
-        * if it is security context of selinux.
+        * validate format of the supplied security label, if it is security
+        * context of selinux.
         */
        if (seclabel &&
                security_check_context_raw((security_context_t) seclabel) < 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_NAME),
-                                errmsg("SELinux: invalid security label: \"%s\"", seclabel)));
+                          errmsg("SELinux: invalid security label: \"%s\"", seclabel)));
+
        /*
         * Do actual permission checks for each object classes
         */
        switch (object->classId)
        {
                case NamespaceRelationId:
-                   sepgsql_schema_relabel(object->objectId, seclabel);
+                       sepgsql_schema_relabel(object->objectId, seclabel);
                        break;
                case RelationRelationId:
                        if (object->objectSubId == 0)
@@ -151,7 +152,7 @@ PG_FUNCTION_INFO_V1(sepgsql_getcon);
 Datum
 sepgsql_getcon(PG_FUNCTION_ARGS)
 {
-       char   *client_label;
+       char       *client_label;
 
        if (!sepgsql_is_enabled())
                PG_RETURN_NULL();
@@ -171,9 +172,9 @@ PG_FUNCTION_INFO_V1(sepgsql_mcstrans_in);
 Datum
 sepgsql_mcstrans_in(PG_FUNCTION_ARGS)
 {
-       text   *label = PG_GETARG_TEXT_P(0);
-       char   *raw_label;
-       char   *result;
+       text       *label = PG_GETARG_TEXT_P(0);
+       char       *raw_label;
+       char       *result;
 
        if (!sepgsql_is_enabled())
                ereport(ERROR,
@@ -211,9 +212,9 @@ PG_FUNCTION_INFO_V1(sepgsql_mcstrans_out);
 Datum
 sepgsql_mcstrans_out(PG_FUNCTION_ARGS)
 {
-       text   *label = PG_GETARG_TEXT_P(0);
-       char   *qual_label;
-       char   *result;
+       text       *label = PG_GETARG_TEXT_P(0);
+       char       *qual_label;
+       char       *result;
 
        if (!sepgsql_is_enabled())
                ereport(ERROR,
@@ -250,8 +251,8 @@ static char *
 quote_object_name(const char *src1, const char *src2,
                                  const char *src3, const char *src4)
 {
-       StringInfoData  result;
-       const char         *temp;
+       StringInfoData result;
+       const char *temp;
 
        initStringInfo(&result);
 
@@ -260,28 +261,28 @@ quote_object_name(const char *src1, const char *src2,
                temp = quote_identifier(src1);
                appendStringInfo(&result, "%s", temp);
                if (src1 != temp)
-                       pfree((void *)temp);
+                       pfree((void *) temp);
        }
        if (src2)
        {
                temp = quote_identifier(src2);
                appendStringInfo(&result, ".%s", temp);
                if (src2 != temp)
-                       pfree((void *)temp);
+                       pfree((void *) temp);
        }
        if (src3)
        {
                temp = quote_identifier(src3);
                appendStringInfo(&result, ".%s", temp);
                if (src3 != temp)
-                       pfree((void *)temp);
+                       pfree((void *) temp);
        }
        if (src4)
        {
                temp = quote_identifier(src4);
                appendStringInfo(&result, ".%s", temp);
                if (src4 != temp)
-                       pfree((void *)temp);
+                       pfree((void *) temp);
        }
        return result.data;
 }
@@ -294,19 +295,19 @@ quote_object_name(const char *src1, const char *src2,
  * catalog OID.
  */
 static void
-exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
+exec_object_restorecon(struct selabel_handle * sehnd, Oid catalogId)
 {
-       Relation                rel;
-       SysScanDesc             sscan;
-       HeapTuple               tuple;
-       char               *database_name = get_database_name(MyDatabaseId);
-       char               *namespace_name;
-       Oid                             namespace_id;
-       char               *relation_name;
+       Relation        rel;
+       SysScanDesc sscan;
+       HeapTuple       tuple;
+       char       *database_name = get_database_name(MyDatabaseId);
+       char       *namespace_name;
+       Oid                     namespace_id;
+       char       *relation_name;
 
        /*
-        * Open the target catalog. We don't want to allow writable
-        * accesses by other session during initial labeling.
+        * Open the target catalog. We don't want to allow writable accesses by
+        * other session during initial labeling.
         */
        rel = heap_open(catalogId, AccessShareLock);
 
@@ -314,18 +315,18 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
                                                           SnapshotNow, 0, NULL);
        while (HeapTupleIsValid(tuple = systable_getnext(sscan)))
        {
-               Form_pg_namespace       nspForm;
-               Form_pg_class           relForm;
-               Form_pg_attribute       attForm;
-               Form_pg_proc            proForm;
-               char                       *objname;
-               int                                     objtype = 1234;
-               ObjectAddress           object;
-               security_context_t      context;
+               Form_pg_namespace nspForm;
+               Form_pg_class relForm;
+               Form_pg_attribute attForm;
+               Form_pg_proc proForm;
+               char       *objname;
+               int                     objtype = 1234;
+               ObjectAddress object;
+               security_context_t context;
 
                /*
-                * The way to determine object name depends on object classes.
-                * So, any branches set up `objtype', `objname' and `object' here.
+                * The way to determine object name depends on object classes. So, any
+                * branches set up `objtype', `objname' and `object' here.
                 */
                switch (catalogId)
                {
@@ -409,7 +410,7 @@ exec_object_restorecon(struct selabel_handle *sehnd, Oid catalogId)
 
                        default:
                                elog(ERROR, "unexpected catalog id: %u", catalogId);
-                               objname = NULL;         /* for compiler quiet */
+                               objname = NULL; /* for compiler quiet */
                                break;
                }
 
@@ -464,8 +465,8 @@ PG_FUNCTION_INFO_V1(sepgsql_restorecon);
 Datum
 sepgsql_restorecon(PG_FUNCTION_ARGS)
 {
-       struct selabel_handle  *sehnd;
-       struct selinux_opt              seopts;
+       struct selabel_handle *sehnd;
+       struct selinux_opt seopts;
 
        /*
         * SELinux has to be enabled on the running platform.
@@ -474,19 +475,19 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("sepgsql is not currently enabled")));
+
        /*
-        * Check DAC permission. Only superuser can set up initial
-        * security labels, like root-user in filesystems
+        * Check DAC permission. Only superuser can set up initial security
+        * labels, like root-user in filesystems
         */
        if (!superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                errmsg("SELinux: must be superuser to restore initial contexts")));
+                 errmsg("SELinux: must be superuser to restore initial contexts")));
 
        /*
-        * Open selabel_lookup(3) stuff. It provides a set of mapping
-        * between an initial security label and object class/name due
-        * to the system setting.
+        * Open selabel_lookup(3) stuff. It provides a set of mapping between an
+        * initial security label and object class/name due to the system setting.
         */
        if (PG_ARGISNULL(0))
        {
@@ -502,12 +503,12 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
        if (!sehnd)
                ereport(ERROR,
                                (errcode(ERRCODE_INTERNAL_ERROR),
-                                errmsg("SELinux: failed to initialize labeling handle: %m")));
+                          errmsg("SELinux: failed to initialize labeling handle: %m")));
        PG_TRY();
        {
                /*
-                * Right now, we have no support labeling on the shared
-                * database objects, such as database, role, or tablespace.
+                * Right now, we have no support labeling on the shared database
+                * objects, such as database, role, or tablespace.
                 */
                exec_object_restorecon(sehnd, NamespaceRelationId);
                exec_object_restorecon(sehnd, RelationRelationId);
@@ -519,7 +520,7 @@ sepgsql_restorecon(PG_FUNCTION_ARGS)
                selabel_close(sehnd);
                PG_RE_THROW();
        }
-       PG_END_TRY();   
+       PG_END_TRY();
 
        selabel_close(sehnd);
 
index 5a0c4947f726b3722e229e601d3b47e31ba10530..3b8bf23ba393cfe77bb956b2662c6698a9598aa3 100644 (file)
 void
 sepgsql_proc_post_create(Oid functionId)
 {
-       Relation                rel;
-       ScanKeyData             skey;
-       SysScanDesc             sscan;
-       HeapTuple               tuple;
-       Oid                             namespaceId;
-       ObjectAddress   object;
-       char               *scontext;
-       char               *tcontext;
-       char               *ncontext;
+       Relation        rel;
+       ScanKeyData skey;
+       SysScanDesc sscan;
+       HeapTuple       tuple;
+       Oid                     namespaceId;
+       ObjectAddress object;
+       char       *scontext;
+       char       *tcontext;
+       char       *ncontext;
 
        /*
         * Fetch namespace of the new procedure. Because pg_proc entry is not
@@ -67,8 +67,8 @@ sepgsql_proc_post_create(Oid functionId)
        heap_close(rel, AccessShareLock);
 
        /*
-        * Compute a default security label when we create a new procedure
-        * object under the specified namespace.
+        * Compute a default security label when we create a new procedure object
+        * under the specified namespace.
         */
        scontext = sepgsql_get_client_label();
        tcontext = sepgsql_get_label(NamespaceRelationId, namespaceId, 0);
@@ -144,9 +144,9 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
 char *
 sepgsql_proc_get_domtrans(Oid functionId)
 {
-       char   *scontext = sepgsql_get_client_label();
-       char   *tcontext;
-       char   *ncontext;
+       char       *scontext = sepgsql_get_client_label();
+       char       *tcontext;
+       char       *ncontext;
 
        tcontext = sepgsql_get_label(ProcedureRelationId, functionId, 0);
 
index ed5e3adc0e8cc1d4377ccd4e25d3f96b1e08d8b9..963cfdf9f10c7ba9845f7a1f3a0eb0538791b910 100644 (file)
 void
 sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
 {
-       char               *scontext = sepgsql_get_client_label();
-       char               *tcontext;
-       char               *ncontext;
-       ObjectAddress   object;
+       char       *scontext = sepgsql_get_client_label();
+       char       *tcontext;
+       char       *ncontext;
+       ObjectAddress object;
 
        /*
-        * Only attributes within regular relation have individual
-        * security labels.
+        * Only attributes within regular relation have individual security
+        * labels.
         */
        if (get_rel_relkind(relOid) != RELKIND_RELATION)
                return;
 
        /*
-        * Compute a default security label when we create a new procedure
-        * object under the specified namespace.
+        * Compute a default security label when we create a new procedure object
+        * under the specified namespace.
         */
        scontext = sepgsql_get_client_label();
        tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
        ncontext = sepgsql_compute_create(scontext, tcontext,
                                                                          SEPG_CLASS_DB_COLUMN);
+
        /*
         * Assign the default security label on a new procedure
         */
@@ -81,7 +82,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
        char       *scontext = sepgsql_get_client_label();
        char       *tcontext;
        char       *audit_name;
-       ObjectAddress   object;
+       ObjectAddress object;
 
        if (get_rel_relkind(relOid) != RELKIND_RELATION)
                ereport(ERROR,
@@ -127,21 +128,21 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
 void
 sepgsql_relation_post_create(Oid relOid)
 {
-       Relation                rel;
-       ScanKeyData             skey;
-       SysScanDesc             sscan;
-       HeapTuple               tuple;
-       Form_pg_class   classForm;
-       ObjectAddress   object;
-       uint16                  tclass;
-       char               *scontext;   /* subject */
-       char               *tcontext;   /* schema */
-       char               *rcontext;   /* relation */
-       char               *ccontext;   /* column */
+       Relation        rel;
+       ScanKeyData skey;
+       SysScanDesc sscan;
+       HeapTuple       tuple;
+       Form_pg_class classForm;
+       ObjectAddress object;
+       uint16          tclass;
+       char       *scontext;           /* subject */
+       char       *tcontext;           /* schema */
+       char       *rcontext;           /* relation */
+       char       *ccontext;           /* column */
 
        /*
-        * Fetch catalog record of the new relation. Because pg_class entry is
-        * not visible right now, we need to scan the catalog using SnapshotSelf.
+        * Fetch catalog record of the new relation. Because pg_class entry is not
+        * visible right now, we need to scan the catalog using SnapshotSelf.
         */
        rel = heap_open(RelationRelationId, AccessShareLock);
 
@@ -166,11 +167,11 @@ sepgsql_relation_post_create(Oid relOid)
        else if (classForm->relkind == RELKIND_VIEW)
                tclass = SEPG_CLASS_DB_VIEW;
        else
-               goto out;       /* No need to assign individual labels */
+               goto out;                               /* No need to assign individual labels */
 
        /*
-        * Compute a default security label when we create a new relation
-        * object under the specified namespace.
+        * Compute a default security label when we create a new relation object
+        * under the specified namespace.
         */
        scontext = sepgsql_get_client_label();
        tcontext = sepgsql_get_label(NamespaceRelationId,
@@ -186,8 +187,8 @@ sepgsql_relation_post_create(Oid relOid)
        SetSecurityLabel(&object, SEPGSQL_LABEL_TAG, rcontext);
 
        /*
-        * We also assigns a default security label on columns of the new
-        * regular tables.
+        * We also assigns a default security label on columns of the new regular
+        * tables.
         */
        if (classForm->relkind == RELKIND_RELATION)
        {
index 8538d18ac9e266d1a061cbc8bf6df4a31e518d56..0de89971fbc15c879e7bbac765ebd26dea62460b 100644 (file)
 void
 sepgsql_schema_post_create(Oid namespaceId)
 {
-       char               *scontext = sepgsql_get_client_label();
-       char               *tcontext;
-       char               *ncontext;
-       ObjectAddress   object;
+       char       *scontext = sepgsql_get_client_label();
+       char       *tcontext;
+       char       *ncontext;
+       ObjectAddress object;
 
        /*
-        * FIXME: Right now, we assume pg_database object has a fixed
-        * security label, because pg_seclabel does not support to store
-        * label of shared database objects.
+        * FIXME: Right now, we assume pg_database object has a fixed security
+        * label, because pg_seclabel does not support to store label of shared
+        * database objects.
         */
        tcontext = "system_u:object_r:sepgsql_db_t:s0";
 
        /*
-        * Compute a default security label when we create a new schema
-        * object under the working database.
+        * Compute a default security label when we create a new schema object
+        * under the working database.
         */
        ncontext = sepgsql_compute_create(scontext, tcontext,
                                                                          SEPG_CLASS_DB_SCHEMA);
index 03ba25cef082780f84ff526fc9111a0f67e70667..1f5a97e878ab1d67bd66a5a02500a6c1ecb3c989 100644 (file)
  */
 static struct
 {
-       const char                 *class_name;
-       uint16                          class_code;
+       const char *class_name;
+       uint16          class_code;
        struct
        {
-               const char         *av_name;
-               uint32                  av_code;
-       } av[32];
-} selinux_catalog[] = {
+               const char *av_name;
+               uint32          av_code;
+       }                       av[32];
+}      selinux_catalog[] =
+
+{
        {
-               "process",                              SEPG_CLASS_PROCESS,
+               "process", SEPG_CLASS_PROCESS,
                {
-                       { "transition",         SEPG_PROCESS__TRANSITION },
-                       { NULL, 0UL }
+                       {
+                               "transition", SEPG_PROCESS__TRANSITION
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "file",                                 SEPG_CLASS_FILE,
+               "file", SEPG_CLASS_FILE,
                {
-                       { "read",                       SEPG_FILE__READ },
-                       { "write",                      SEPG_FILE__WRITE },
-                       { "create",                     SEPG_FILE__CREATE },
-                       { "getattr",            SEPG_FILE__GETATTR },
-                       { "unlink",                     SEPG_FILE__UNLINK },
-                       { "rename",                     SEPG_FILE__RENAME },
-                       { "append",                     SEPG_FILE__APPEND },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_FILE__READ
+                       },
+                       {
+                               "write", SEPG_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_FILE__RENAME
+                       },
+                       {
+                               "append", SEPG_FILE__APPEND
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "dir",                                  SEPG_CLASS_DIR,
+               "dir", SEPG_CLASS_DIR,
                {
-                       { "read",                       SEPG_DIR__READ },
-                       { "write",                      SEPG_DIR__WRITE },
-                       { "create",                     SEPG_DIR__CREATE },
-                       { "getattr",            SEPG_DIR__GETATTR },
-                       { "unlink",                     SEPG_DIR__UNLINK },
-                       { "rename",                     SEPG_DIR__RENAME },
-                       { "search",                     SEPG_DIR__SEARCH },
-                       { "add_name",           SEPG_DIR__ADD_NAME },
-                       { "remove_name",        SEPG_DIR__REMOVE_NAME },
-                       { "rmdir",                      SEPG_DIR__RMDIR },
-                       { "reparent",           SEPG_DIR__REPARENT },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_DIR__READ
+                       },
+                       {
+                               "write", SEPG_DIR__WRITE
+                       },
+                       {
+                               "create", SEPG_DIR__CREATE
+                       },
+                       {
+                               "getattr", SEPG_DIR__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_DIR__UNLINK
+                       },
+                       {
+                               "rename", SEPG_DIR__RENAME
+                       },
+                       {
+                               "search", SEPG_DIR__SEARCH
+                       },
+                       {
+                               "add_name", SEPG_DIR__ADD_NAME
+                       },
+                       {
+                               "remove_name", SEPG_DIR__REMOVE_NAME
+                       },
+                       {
+                               "rmdir", SEPG_DIR__RMDIR
+                       },
+                       {
+                               "reparent", SEPG_DIR__REPARENT
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "lnk_file",                             SEPG_CLASS_LNK_FILE,
+               "lnk_file", SEPG_CLASS_LNK_FILE,
                {
-                       { "read",                       SEPG_LNK_FILE__READ },
-                       { "write",                      SEPG_LNK_FILE__WRITE },
-                       { "create",                     SEPG_LNK_FILE__CREATE },
-                       { "getattr",            SEPG_LNK_FILE__GETATTR },
-                       { "unlink",                     SEPG_LNK_FILE__UNLINK },
-                       { "rename",                     SEPG_LNK_FILE__RENAME },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_LNK_FILE__READ
+                       },
+                       {
+                               "write", SEPG_LNK_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_LNK_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_LNK_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_LNK_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_LNK_FILE__RENAME
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "chr_file",                             SEPG_CLASS_CHR_FILE,
+               "chr_file", SEPG_CLASS_CHR_FILE,
                {
-                       { "read",                       SEPG_CHR_FILE__READ },
-                       { "write",                      SEPG_CHR_FILE__WRITE },
-                       { "create",                     SEPG_CHR_FILE__CREATE },
-                       { "getattr",            SEPG_CHR_FILE__GETATTR },
-                       { "unlink",                     SEPG_CHR_FILE__UNLINK },
-                       { "rename",                     SEPG_CHR_FILE__RENAME },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_CHR_FILE__READ
+                       },
+                       {
+                               "write", SEPG_CHR_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_CHR_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_CHR_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_CHR_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_CHR_FILE__RENAME
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "blk_file",                             SEPG_CLASS_BLK_FILE,
+               "blk_file", SEPG_CLASS_BLK_FILE,
                {
-                       { "read",                       SEPG_BLK_FILE__READ },
-                       { "write",                      SEPG_BLK_FILE__WRITE },
-                       { "create",                     SEPG_BLK_FILE__CREATE },
-                       { "getattr",            SEPG_BLK_FILE__GETATTR },
-                       { "unlink",                     SEPG_BLK_FILE__UNLINK },
-                       { "rename",                     SEPG_BLK_FILE__RENAME },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_BLK_FILE__READ
+                       },
+                       {
+                               "write", SEPG_BLK_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_BLK_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_BLK_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_BLK_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_BLK_FILE__RENAME
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "sock_file",                    SEPG_CLASS_SOCK_FILE,
+               "sock_file", SEPG_CLASS_SOCK_FILE,
                {
-                       { "read",                       SEPG_SOCK_FILE__READ },
-                       { "write",                      SEPG_SOCK_FILE__WRITE },
-                       { "create",                     SEPG_SOCK_FILE__CREATE },
-                       { "getattr",            SEPG_SOCK_FILE__GETATTR },
-                       { "unlink",                     SEPG_SOCK_FILE__UNLINK },
-                       { "rename",                     SEPG_SOCK_FILE__RENAME },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_SOCK_FILE__READ
+                       },
+                       {
+                               "write", SEPG_SOCK_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_SOCK_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_SOCK_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_SOCK_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_SOCK_FILE__RENAME
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "fifo_file",                    SEPG_CLASS_FIFO_FILE,
+               "fifo_file", SEPG_CLASS_FIFO_FILE,
                {
-                       { "read",                       SEPG_FIFO_FILE__READ },
-                       { "write",                      SEPG_FIFO_FILE__WRITE },
-                       { "create",                     SEPG_FIFO_FILE__CREATE },
-                       { "getattr",            SEPG_FIFO_FILE__GETATTR },
-                       { "unlink",                     SEPG_FIFO_FILE__UNLINK },
-                       { "rename",                     SEPG_FIFO_FILE__RENAME },
-                       { NULL, 0UL }
+                       {
+                               "read", SEPG_FIFO_FILE__READ
+                       },
+                       {
+                               "write", SEPG_FIFO_FILE__WRITE
+                       },
+                       {
+                               "create", SEPG_FIFO_FILE__CREATE
+                       },
+                       {
+                               "getattr", SEPG_FIFO_FILE__GETATTR
+                       },
+                       {
+                               "unlink", SEPG_FIFO_FILE__UNLINK
+                       },
+                       {
+                               "rename", SEPG_FIFO_FILE__RENAME
+                       },
+                       {
+                               NULL, 0UL
+                       }
                }
        },
        {
-               "db_database",                  SEPG_CLASS_DB_DATABASE,
+               "db_database", SEPG_CLASS_DB_DATABASE,
                {
-                       { "create",                     SEPG_DB_DATABASE__CREATE },
-                       { "drop",                       SEPG_DB_DATABASE__DROP },
-                       { "getattr",            SEPG_DB_DATABASE__GETATTR },
-                       { "setattr",            SEPG_DB_DATABASE__SETATTR },
-                       { "relabelfrom",        SEPG_DB_DATABASE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_DATABASE__RELABELTO },
-                       { "access",                     SEPG_DB_DATABASE__ACCESS },
-                       { "load_module",        SEPG_DB_DATABASE__LOAD_MODULE },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_DATABASE__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_DATABASE__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_DATABASE__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_DATABASE__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_DATABASE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_DATABASE__RELABELTO
+                       },
+                       {
+                               "access", SEPG_DB_DATABASE__ACCESS
+                       },
+                       {
+                               "load_module", SEPG_DB_DATABASE__LOAD_MODULE
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_schema",                    SEPG_CLASS_DB_SCHEMA,
+               "db_schema", SEPG_CLASS_DB_SCHEMA,
                {
-                       { "create",                     SEPG_DB_SCHEMA__CREATE },
-                       { "drop",                       SEPG_DB_SCHEMA__DROP },
-                       { "getattr",            SEPG_DB_SCHEMA__GETATTR },
-                       { "setattr",            SEPG_DB_SCHEMA__SETATTR },
-                       { "relabelfrom",        SEPG_DB_SCHEMA__RELABELFROM },
-                       { "relabelto",          SEPG_DB_SCHEMA__RELABELTO },
-                       { "search",                     SEPG_DB_SCHEMA__SEARCH },
-                       { "add_name",           SEPG_DB_SCHEMA__ADD_NAME },
-                       { "remove_name",        SEPG_DB_SCHEMA__REMOVE_NAME },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_SCHEMA__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_SCHEMA__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_SCHEMA__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_SCHEMA__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_SCHEMA__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_SCHEMA__RELABELTO
+                       },
+                       {
+                               "search", SEPG_DB_SCHEMA__SEARCH
+                       },
+                       {
+                               "add_name", SEPG_DB_SCHEMA__ADD_NAME
+                       },
+                       {
+                               "remove_name", SEPG_DB_SCHEMA__REMOVE_NAME
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_table",                             SEPG_CLASS_DB_TABLE,
+               "db_table", SEPG_CLASS_DB_TABLE,
                {
-                       { "create",                     SEPG_DB_TABLE__CREATE },
-                       { "drop",                       SEPG_DB_TABLE__DROP },
-                       { "getattr",            SEPG_DB_TABLE__GETATTR },
-                       { "setattr",            SEPG_DB_TABLE__SETATTR },
-                       { "relabelfrom",        SEPG_DB_TABLE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_TABLE__RELABELTO },
-                       { "select",                     SEPG_DB_TABLE__SELECT },
-                       { "update",                     SEPG_DB_TABLE__UPDATE },
-                       { "insert",                     SEPG_DB_TABLE__INSERT },
-                       { "delete",                     SEPG_DB_TABLE__DELETE },
-                       { "lock",                       SEPG_DB_TABLE__LOCK },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_TABLE__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_TABLE__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_TABLE__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_TABLE__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_TABLE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_TABLE__RELABELTO
+                       },
+                       {
+                               "select", SEPG_DB_TABLE__SELECT
+                       },
+                       {
+                               "update", SEPG_DB_TABLE__UPDATE
+                       },
+                       {
+                               "insert", SEPG_DB_TABLE__INSERT
+                       },
+                       {
+                               "delete", SEPG_DB_TABLE__DELETE
+                       },
+                       {
+                               "lock", SEPG_DB_TABLE__LOCK
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_sequence",                  SEPG_CLASS_DB_SEQUENCE,
+               "db_sequence", SEPG_CLASS_DB_SEQUENCE,
                {
-                       { "create",                     SEPG_DB_SEQUENCE__CREATE },
-                       { "drop",                       SEPG_DB_SEQUENCE__DROP },
-                       { "getattr",            SEPG_DB_SEQUENCE__GETATTR },
-                       { "setattr",            SEPG_DB_SEQUENCE__SETATTR },
-                       { "relabelfrom",        SEPG_DB_SEQUENCE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_SEQUENCE__RELABELTO },
-                       { "get_value",          SEPG_DB_SEQUENCE__GET_VALUE },
-                       { "next_value",         SEPG_DB_SEQUENCE__NEXT_VALUE },
-                       { "set_value",          SEPG_DB_SEQUENCE__SET_VALUE },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_SEQUENCE__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_SEQUENCE__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_SEQUENCE__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_SEQUENCE__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_SEQUENCE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_SEQUENCE__RELABELTO
+                       },
+                       {
+                               "get_value", SEPG_DB_SEQUENCE__GET_VALUE
+                       },
+                       {
+                               "next_value", SEPG_DB_SEQUENCE__NEXT_VALUE
+                       },
+                       {
+                               "set_value", SEPG_DB_SEQUENCE__SET_VALUE
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_procedure",                 SEPG_CLASS_DB_PROCEDURE,
+               "db_procedure", SEPG_CLASS_DB_PROCEDURE,
                {
-                       { "create",                     SEPG_DB_PROCEDURE__CREATE },
-                       { "drop",                       SEPG_DB_PROCEDURE__DROP },
-                       { "getattr",            SEPG_DB_PROCEDURE__GETATTR },
-                       { "setattr",            SEPG_DB_PROCEDURE__SETATTR },
-                       { "relabelfrom",        SEPG_DB_PROCEDURE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_PROCEDURE__RELABELTO },
-                       { "execute",            SEPG_DB_PROCEDURE__EXECUTE },
-                       { "entrypoint",         SEPG_DB_PROCEDURE__ENTRYPOINT },
-                       { "install",            SEPG_DB_PROCEDURE__INSTALL },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_PROCEDURE__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_PROCEDURE__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_PROCEDURE__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_PROCEDURE__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_PROCEDURE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_PROCEDURE__RELABELTO
+                       },
+                       {
+                               "execute", SEPG_DB_PROCEDURE__EXECUTE
+                       },
+                       {
+                               "entrypoint", SEPG_DB_PROCEDURE__ENTRYPOINT
+                       },
+                       {
+                               "install", SEPG_DB_PROCEDURE__INSTALL
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_column",                    SEPG_CLASS_DB_COLUMN,
+               "db_column", SEPG_CLASS_DB_COLUMN,
                {
-                       { "create",                     SEPG_DB_COLUMN__CREATE },
-                       { "drop",                       SEPG_DB_COLUMN__DROP },
-                       { "getattr",            SEPG_DB_COLUMN__GETATTR },
-                       { "setattr",            SEPG_DB_COLUMN__SETATTR },
-                       { "relabelfrom",        SEPG_DB_COLUMN__RELABELFROM },
-                       { "relabelto",          SEPG_DB_COLUMN__RELABELTO },
-                       { "select",                     SEPG_DB_COLUMN__SELECT },
-                       { "update",                     SEPG_DB_COLUMN__UPDATE },
-                       { "insert",                     SEPG_DB_COLUMN__INSERT },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_COLUMN__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_COLUMN__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_COLUMN__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_COLUMN__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_COLUMN__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_COLUMN__RELABELTO
+                       },
+                       {
+                               "select", SEPG_DB_COLUMN__SELECT
+                       },
+                       {
+                               "update", SEPG_DB_COLUMN__UPDATE
+                       },
+                       {
+                               "insert", SEPG_DB_COLUMN__INSERT
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_tuple",                             SEPG_CLASS_DB_TUPLE,
+               "db_tuple", SEPG_CLASS_DB_TUPLE,
                {
-                       { "relabelfrom",        SEPG_DB_TUPLE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_TUPLE__RELABELTO },
-                       { "select",                     SEPG_DB_TUPLE__SELECT },
-                       { "update",                     SEPG_DB_TUPLE__UPDATE },
-                       { "insert",                     SEPG_DB_TUPLE__INSERT },
-                       { "delete",                     SEPG_DB_TUPLE__DELETE },
-                       { NULL, 0UL },
+                       {
+                               "relabelfrom", SEPG_DB_TUPLE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_TUPLE__RELABELTO
+                       },
+                       {
+                               "select", SEPG_DB_TUPLE__SELECT
+                       },
+                       {
+                               "update", SEPG_DB_TUPLE__UPDATE
+                       },
+                       {
+                               "insert", SEPG_DB_TUPLE__INSERT
+                       },
+                       {
+                               "delete", SEPG_DB_TUPLE__DELETE
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_blob",                              SEPG_CLASS_DB_BLOB,
+               "db_blob", SEPG_CLASS_DB_BLOB,
                {
-                       { "create",                     SEPG_DB_BLOB__CREATE },
-                       { "drop",                       SEPG_DB_BLOB__DROP },
-                       { "getattr",            SEPG_DB_BLOB__GETATTR },
-                       { "setattr",            SEPG_DB_BLOB__SETATTR },
-                       { "relabelfrom",        SEPG_DB_BLOB__RELABELFROM },
-                       { "relabelto",          SEPG_DB_BLOB__RELABELTO },
-                       { "read",                       SEPG_DB_BLOB__READ },
-                       { "write",                      SEPG_DB_BLOB__WRITE },
-                       { "import",                     SEPG_DB_BLOB__IMPORT },
-                       { "export",                     SEPG_DB_BLOB__EXPORT },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_BLOB__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_BLOB__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_BLOB__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_BLOB__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_BLOB__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_BLOB__RELABELTO
+                       },
+                       {
+                               "read", SEPG_DB_BLOB__READ
+                       },
+                       {
+                               "write", SEPG_DB_BLOB__WRITE
+                       },
+                       {
+                               "import", SEPG_DB_BLOB__IMPORT
+                       },
+                       {
+                               "export", SEPG_DB_BLOB__EXPORT
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_language",                  SEPG_CLASS_DB_LANGUAGE,
+               "db_language", SEPG_CLASS_DB_LANGUAGE,
                {
-                       { "create",                     SEPG_DB_LANGUAGE__CREATE },
-                       { "drop",                       SEPG_DB_LANGUAGE__DROP },
-                       { "getattr",            SEPG_DB_LANGUAGE__GETATTR },
-                       { "setattr",            SEPG_DB_LANGUAGE__SETATTR },
-                       { "relabelfrom",        SEPG_DB_LANGUAGE__RELABELFROM },
-                       { "relabelto",          SEPG_DB_LANGUAGE__RELABELTO },
-                       { "implement",          SEPG_DB_LANGUAGE__IMPLEMENT },
-                       { "execute",            SEPG_DB_LANGUAGE__EXECUTE },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_LANGUAGE__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_LANGUAGE__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_LANGUAGE__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_LANGUAGE__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_LANGUAGE__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_LANGUAGE__RELABELTO
+                       },
+                       {
+                               "implement", SEPG_DB_LANGUAGE__IMPLEMENT
+                       },
+                       {
+                               "execute", SEPG_DB_LANGUAGE__EXECUTE
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
        {
-               "db_view",                              SEPG_CLASS_DB_VIEW,
+               "db_view", SEPG_CLASS_DB_VIEW,
                {
-                       { "create",                     SEPG_DB_VIEW__CREATE },
-                       { "drop",                       SEPG_DB_VIEW__DROP },
-                       { "getattr",            SEPG_DB_VIEW__GETATTR },
-                       { "setattr",            SEPG_DB_VIEW__SETATTR },
-                       { "relabelfrom",        SEPG_DB_VIEW__RELABELFROM },
-                       { "relabelto",          SEPG_DB_VIEW__RELABELTO },
-                       { "expand",                     SEPG_DB_VIEW__EXPAND },
-                       { NULL, 0UL },
+                       {
+                               "create", SEPG_DB_VIEW__CREATE
+                       },
+                       {
+                               "drop", SEPG_DB_VIEW__DROP
+                       },
+                       {
+                               "getattr", SEPG_DB_VIEW__GETATTR
+                       },
+                       {
+                               "setattr", SEPG_DB_VIEW__SETATTR
+                       },
+                       {
+                               "relabelfrom", SEPG_DB_VIEW__RELABELFROM
+                       },
+                       {
+                               "relabelto", SEPG_DB_VIEW__RELABELTO
+                       },
+                       {
+                               "expand", SEPG_DB_VIEW__EXPAND
+                       },
+                       {
+                               NULL, 0UL
+                       },
                }
        },
 };
@@ -316,7 +624,7 @@ sepgsql_get_mode(void)
 int
 sepgsql_set_mode(int new_mode)
 {
-       int             old_mode = sepgsql_mode;
+       int                     old_mode = sepgsql_mode;
 
        sepgsql_mode = new_mode;
 
@@ -367,10 +675,10 @@ sepgsql_audit_log(bool denied,
                                  uint32 audited,
                                  const char *audit_name)
 {
-       StringInfoData  buf;
-       const char         *class_name;
-       const char         *av_name;
-       int                             i;
+       StringInfoData buf;
+       const char *class_name;
+       const char *av_name;
+       int                     i;
 
        /* lookup name of the object class */
        Assert(tclass < SEPG_CLASS_MAX);
@@ -380,7 +688,7 @@ sepgsql_audit_log(bool denied,
        initStringInfo(&buf);
        appendStringInfo(&buf, "%s {",
                                         (denied ? "denied" : "allowed"));
-       for (i=0; selinux_catalog[tclass].av[i].av_name; i++)
+       for (i = 0; selinux_catalog[tclass].av[i].av_name; i++)
        {
                if (audited & (1UL << i))
                {
@@ -418,14 +726,15 @@ void
 sepgsql_compute_avd(const char *scontext,
                                        const char *tcontext,
                                        uint16 tclass,
-                                       struct av_decision *avd)
+                                       struct av_decision * avd)
 {
-       const char                 *tclass_name;
-       security_class_t        tclass_ex;
-       struct av_decision      avd_ex;
-       int                                     i, deny_unknown = security_deny_unknown();
+       const char *tclass_name;
+       security_class_t tclass_ex;
+       struct av_decision avd_ex;
+       int                     i,
+                               deny_unknown = security_deny_unknown();
 
-       /* Get external code of the object class*/
+       /* Get external code of the object class */
        Assert(tclass < SEPG_CLASS_MAX);
        Assert(tclass == selinux_catalog[tclass].class_code);
 
@@ -436,14 +745,13 @@ sepgsql_compute_avd(const char *scontext,
        {
                /*
                 * If the current security policy does not support permissions
-                * corresponding to database objects, we fill up them with dummy
-                * data.
+                * corresponding to database objects, we fill up them with dummy data.
                 * If security_deny_unknown() returns positive value, undefined
                 * permissions should be denied. Otherwise, allowed
                 */
                avd->allowed = (security_deny_unknown() > 0 ? 0 : ~0);
                avd->auditallow = 0U;
-               avd->auditdeny =  ~0U;
+               avd->auditdeny = ~0U;
                avd->flags = 0;
 
                return;
@@ -453,8 +761,8 @@ sepgsql_compute_avd(const char *scontext,
         * Ask SELinux what is allowed set of permissions on a pair of the
         * security contexts and the given object class.
         */
-       if (security_compute_av_flags_raw((security_context_t)scontext,
-                                                                         (security_context_t)tcontext,
+       if (security_compute_av_flags_raw((security_context_t) scontext,
+                                                                         (security_context_t) tcontext,
                                                                          tclass_ex, 0, &avd_ex) < 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INTERNAL_ERROR),
@@ -464,17 +772,17 @@ sepgsql_compute_avd(const char *scontext,
 
        /*
         * SELinux returns its access control decision as a set of permissions
-        * represented in external code which depends on run-time environment.
-        * So, we need to translate it to the internal representation before
-        * returning results for the caller.
+        * represented in external code which depends on run-time environment. So,
+        * we need to translate it to the internal representation before returning
+        * results for the caller.
         */
        memset(avd, 0, sizeof(struct av_decision));
 
-       for (i=0; selinux_catalog[tclass].av[i].av_name; i++)
+       for (i = 0; selinux_catalog[tclass].av[i].av_name; i++)
        {
-               access_vector_t av_code_ex;
-               const char         *av_name = selinux_catalog[tclass].av[i].av_name;
-               uint32                  av_code = selinux_catalog[tclass].av[i].av_code;
+               access_vector_t av_code_ex;
+               const char *av_name = selinux_catalog[tclass].av[i].av_name;
+               uint32          av_code = selinux_catalog[tclass].av[i].av_code;
 
                av_code_ex = string_to_av_perm(tclass_ex, av_name);
                if (av_code_ex == 0)
@@ -524,23 +832,23 @@ sepgsql_compute_create(const char *scontext,
                                           const char *tcontext,
                                           uint16 tclass)
 {
-       security_context_t      ncontext;
-       security_class_t        tclass_ex;
-       const char                 *tclass_name;
-       char                       *result;
+       security_context_t ncontext;
+       security_class_t tclass_ex;
+       const char *tclass_name;
+       char       *result;
 
-       /* Get external code of the object class*/
+       /* Get external code of the object class */
        Assert(tclass < SEPG_CLASS_MAX);
 
        tclass_name = selinux_catalog[tclass].class_name;
        tclass_ex = string_to_security_class(tclass_name);
 
        /*
-        * Ask SELinux what is the default context for the given object class
-        * on a pair of security contexts
+        * Ask SELinux what is the default context for the given object class on a
+        * pair of security contexts
         */
-       if (security_compute_create_raw((security_context_t)scontext,
-                                                                       (security_context_t)tcontext,
+       if (security_compute_create_raw((security_context_t) scontext,
+                                                                       (security_context_t) tcontext,
                                                                        tclass_ex, &ncontext) < 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INTERNAL_ERROR),
@@ -549,8 +857,8 @@ sepgsql_compute_create(const char *scontext,
                                                scontext, tcontext, tclass_name)));
 
        /*
-        * libselinux returns malloc()'ed string, so we need to copy it
-        * on the palloc()'ed region.
+        * libselinux returns malloc()'ed string, so we need to copy it on the
+        * palloc()'ed region.
         */
        PG_TRY();
        {
@@ -589,7 +897,7 @@ sepgsql_check_perms(const char *scontext,
                                        const char *audit_name,
                                        bool abort)
 {
-       struct av_decision      avd;
+       struct av_decision avd;
        uint32          denied;
        uint32          audited;
        bool            result = true;
@@ -602,7 +910,7 @@ sepgsql_check_perms(const char *scontext,
                audited = (denied ? denied : required);
        else
                audited = (denied ? (denied & avd.auditdeny)
-                                                 : (required & avd.auditallow));
+                                  : (required & avd.auditallow));
 
        if (denied &&
                sepgsql_getenforce() > 0 &&
@@ -610,8 +918,8 @@ sepgsql_check_perms(const char *scontext,
                result = false;
 
        /*
-        * It records a security audit for the request, if needed.
-        * But, when SE-PgSQL performs 'internal' mode, it needs to keep silent.
+        * It records a security audit for the request, if needed. But, when
+        * SE-PgSQL performs 'internal' mode, it needs to keep silent.
         */
        if (audited && sepgsql_mode != SEPGSQL_MODE_INTERNAL)
        {
index ba7b2d15975f7f95b0d31fed0ac17f01d08edbe5..71688ab784f7fba46c882b5a3f701937eb6b7d1e 100644 (file)
@@ -218,33 +218,34 @@ extern bool sepgsql_get_debug_audit(void);
 /*
  * selinux.c
  */
-extern bool    sepgsql_is_enabled(void);
+extern bool sepgsql_is_enabled(void);
 extern int     sepgsql_get_mode(void);
 extern int     sepgsql_set_mode(int new_mode);
 extern bool sepgsql_getenforce(void);
 
 extern void sepgsql_audit_log(bool denied,
-                                                         const char *scontext,
-                                                         const char *tcontext,
-                                                         uint16 tclass,
-                                                         uint32 audited,
-                                                         const char *audit_name);
+                                 const char *scontext,
+                                 const char *tcontext,
+                                 uint16 tclass,
+                                 uint32 audited,
+                                 const char *audit_name);
 
 extern void sepgsql_compute_avd(const char *scontext,
-                                                               const char *tcontext,
-                                                               uint16 tclass,
-                                                               struct av_decision *avd);
+                                       const char *tcontext,
+                                       uint16 tclass,
+                                       struct av_decision * avd);
 
 extern char *sepgsql_compute_create(const char *scontext,
-                                                                       const char *tcontext,
-                                                                       uint16 tclass);
+                                          const char *tcontext,
+                                          uint16 tclass);
 
 extern bool sepgsql_check_perms(const char *scontext,
-                                                               const char *tcontext,
-                                                               uint16 tclass,
-                                                               uint32 required,
-                                                               const char *audit_name,
-                                                               bool abort);
+                                       const char *tcontext,
+                                       uint16 tclass,
+                                       uint32 required,
+                                       const char *audit_name,
+                                       bool abort);
+
 /*
  * label.c
  */
@@ -252,8 +253,8 @@ extern char *sepgsql_get_client_label(void);
 extern char *sepgsql_set_client_label(char *new_label);
 extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
 
-extern void     sepgsql_object_relabel(const ObjectAddress *object,
-                                                                       const char *seclabel);
+extern void sepgsql_object_relabel(const ObjectAddress *object,
+                                          const char *seclabel);
 
 extern Datum sepgsql_getcon(PG_FUNCTION_ARGS);
 extern Datum sepgsql_mcstrans_in(PG_FUNCTION_ARGS);
@@ -276,7 +277,7 @@ extern void sepgsql_schema_relabel(Oid namespaceId, const char *seclabel);
  */
 extern void sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum);
 extern void sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
-                                                                         const char *seclabel);
+                                                 const char *seclabel);
 extern void sepgsql_relation_post_create(Oid relOid);
 extern void sepgsql_relation_relabel(Oid relOid, const char *seclabel);
 
@@ -287,4 +288,4 @@ extern void sepgsql_proc_post_create(Oid functionId);
 extern void sepgsql_proc_relabel(Oid functionId, const char *seclabel);
 extern char *sepgsql_proc_get_domtrans(Oid functionId);
 
-#endif /* SEPGSQL_H */
+#endif   /* SEPGSQL_H */
index f5a0d93ef5fb7a37bc0384d04ff7a5e9929da173..d02560c2981d43b575dc6cbddd35f5dc63b1d952 100644 (file)
@@ -84,7 +84,7 @@ moddatetime(PG_FUNCTION_ARGS)
 
        /*
         * This is where we check to see if the field we are supposed to update
-        * even exists. The above function must return -1 if name not found?
+        * even exists. The above function must return -1 if name not found?
         */
        if (attnum < 0)
                ereport(ERROR,
index e92ab66491f4cc4d9b56cc181d7aa35897655df7..44c600e1348d1227ae50ba8e6ed0a11dd47a8330 100644 (file)
@@ -61,7 +61,7 @@ static text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag,
 static xmlChar *pgxml_texttoxmlchar(text *textstring);
 
 static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar *xpath,
-                                                                        xpath_workspace *workspace);
+                       xpath_workspace *workspace);
 
 static void cleanup_workspace(xpath_workspace *workspace);
 
@@ -234,7 +234,7 @@ Datum
 xpath_nodeset(PG_FUNCTION_ARGS)
 {
        text       *document = PG_GETARG_TEXT_P(0);
-       text       *xpathsupp = PG_GETARG_TEXT_P(1);    /* XPath expression */
+       text       *xpathsupp = PG_GETARG_TEXT_P(1);            /* XPath expression */
        xmlChar    *toptag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2));
        xmlChar    *septag = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(3));
        xmlChar    *xpath;
@@ -267,7 +267,7 @@ Datum
 xpath_list(PG_FUNCTION_ARGS)
 {
        text       *document = PG_GETARG_TEXT_P(0);
-       text       *xpathsupp = PG_GETARG_TEXT_P(1);    /* XPath expression */
+       text       *xpathsupp = PG_GETARG_TEXT_P(1);            /* XPath expression */
        xmlChar    *plainsep = pgxml_texttoxmlchar(PG_GETARG_TEXT_P(2));
        xmlChar    *xpath;
        text       *xpres;
@@ -296,7 +296,7 @@ Datum
 xpath_string(PG_FUNCTION_ARGS)
 {
        text       *document = PG_GETARG_TEXT_P(0);
-       text       *xpathsupp = PG_GETARG_TEXT_P(1);    /* XPath expression */
+       text       *xpathsupp = PG_GETARG_TEXT_P(1);            /* XPath expression */
        xmlChar    *xpath;
        int32           pathsize;
        text       *xpres;
@@ -337,7 +337,7 @@ Datum
 xpath_number(PG_FUNCTION_ARGS)
 {
        text       *document = PG_GETARG_TEXT_P(0);
-       text       *xpathsupp = PG_GETARG_TEXT_P(1);    /* XPath expression */
+       text       *xpathsupp = PG_GETARG_TEXT_P(1);            /* XPath expression */
        xmlChar    *xpath;
        float4          fRes;
        xmlXPathObjectPtr res;
@@ -369,7 +369,7 @@ Datum
 xpath_bool(PG_FUNCTION_ARGS)
 {
        text       *document = PG_GETARG_TEXT_P(0);
-       text       *xpathsupp = PG_GETARG_TEXT_P(1);    /* XPath expression */
+       text       *xpathsupp = PG_GETARG_TEXT_P(1);            /* XPath expression */
        xmlChar    *xpath;
        int                     bRes;
        xmlXPathObjectPtr res;
index a90104d17a2dc14aad83a113c49b107babfe81af..f8f7d7263f989d0348735b66d5594cc3506f8103 100644 (file)
@@ -42,7 +42,6 @@ extern void pgxml_parser_init(void);
 
 /* local defs */
 static const char **parse_params(text *paramstr);
-
 #endif   /* USE_LIBXSLT */
 
 
@@ -166,7 +165,7 @@ parse_params(text *paramstr)
                {
                        max_params *= 2;
                        params = (const char **) repalloc(params,
-                                                                                         (max_params + 1) * sizeof(char *));
+                                                                                 (max_params + 1) * sizeof(char *));
                }
                params[nparams++] = pos;
                pos = strstr(pos, nvsep);
index 6d608fed895162c74eb30c0c8f90db3797c776df..175e6ea2f2e6657e347daabf552fb7d701d02c3c 100644 (file)
@@ -350,7 +350,7 @@ nocachegetattr(HeapTuple tuple,
                 *
                 * check to see if any preceding bits are null...
                 */
-               int byte = attnum >> 3;
+               int                     byte = attnum >> 3;
                int                     finalbit = attnum & 0x07;
 
                /* check for nulls "before" final bit of last byte */
index 9ea87360f91c764663ad081b953ab69c2a4395d5..85c43199aa78e81ba2334828b0294a42dfda2584 100644 (file)
@@ -237,7 +237,7 @@ nocache_index_getattr(IndexTuple tup,
                 * Now check to see if any preceding bits are null...
                 */
                {
-                       int byte = attnum >> 3;
+                       int                     byte = attnum >> 3;
                        int                     finalbit = attnum & 0x07;
 
                        /* check for nulls "before" final bit of last byte */
index ce9abae6aa4ac3f740f9e6980391a7cbdf972c98..2de58604eee7810648cfa3b426457e44ba6ce252 100644 (file)
@@ -82,7 +82,8 @@ ginqueryarrayextract(PG_FUNCTION_ARGS)
        ArrayType  *array = PG_GETARG_ARRAYTYPE_P_COPY(0);
        int32      *nkeys = (int32 *) PG_GETARG_POINTER(1);
        StrategyNumber strategy = PG_GETARG_UINT16(2);
-       /* bool   **pmatch = (bool **) PG_GETARG_POINTER(3); */
+
+       /* bool   **pmatch = (bool **) PG_GETARG_POINTER(3); */
        /* Pointer         *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
        bool      **nullFlags = (bool **) PG_GETARG_POINTER(5);
        int32      *searchMode = (int32 *) PG_GETARG_POINTER(6);
@@ -112,7 +113,7 @@ ginqueryarrayextract(PG_FUNCTION_ARGS)
                case GinContainsStrategy:
                        if (nelems > 0)
                                *searchMode = GIN_SEARCH_MODE_DEFAULT;
-                       else                            /* everything contains the empty set */
+                       else    /* everything contains the empty set */
                                *searchMode = GIN_SEARCH_MODE_ALL;
                        break;
                case GinContainedStrategy:
@@ -142,10 +143,13 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
 {
        bool       *check = (bool *) PG_GETARG_POINTER(0);
        StrategyNumber strategy = PG_GETARG_UINT16(1);
+
        /* ArrayType  *query = PG_GETARG_ARRAYTYPE_P(2); */
        int32           nkeys = PG_GETARG_INT32(3);
+
        /* Pointer         *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
        bool       *recheck = (bool *) PG_GETARG_POINTER(5);
+
        /* Datum           *queryKeys = (Datum *) PG_GETARG_POINTER(6); */
        bool       *nullFlags = (bool *) PG_GETARG_POINTER(7);
        bool            res;
@@ -190,10 +194,11 @@ ginarrayconsistent(PG_FUNCTION_ARGS)
                case GinEqualStrategy:
                        /* we will need recheck */
                        *recheck = true;
+
                        /*
                         * Must have all elements in check[] true; no discrimination
-                        * against nulls here.  This is because array_contain_compare
-                        * and array_eq handle nulls differently ...
+                        * against nulls here.  This is because array_contain_compare and
+                        * array_eq handle nulls differently ...
                         */
                        res = true;
                        for (i = 0; i < nkeys; i++)
index f0c8c8e37f6b2a826e8192eb5f81b8400671f5f5..9e5bab194de3bbfbbb2ce1122c555f40518e021a 100644 (file)
@@ -80,8 +80,8 @@ ginAllocEntryAccumulator(void *arg)
        GinEntryAccumulator *ea;
 
        /*
-        * Allocate memory by rather big chunks to decrease overhead.  We have
-        * no need to reclaim RBNodes individually, so this costs nothing.
+        * Allocate memory by rather big chunks to decrease overhead.  We have no
+        * need to reclaim RBNodes individually, so this costs nothing.
         */
        if (accum->entryallocator == NULL || accum->eas_used >= DEF_NENTRY)
        {
@@ -108,7 +108,7 @@ ginInitBA(BuildAccumulator *accum)
                                                        cmpEntryAccumulator,
                                                        ginCombineData,
                                                        ginAllocEntryAccumulator,
-                                                       NULL,                           /* no freefunc needed */
+                                                       NULL,           /* no freefunc needed */
                                                        (void *) accum);
 }
 
@@ -145,8 +145,8 @@ ginInsertBAEntry(BuildAccumulator *accum,
        bool            isNew;
 
        /*
-        * For the moment, fill only the fields of eatmp that will be looked at
-        * by cmpEntryAccumulator or ginCombineData.
+        * For the moment, fill only the fields of eatmp that will be looked at by
+        * cmpEntryAccumulator or ginCombineData.
         */
        eatmp.attnum = attnum;
        eatmp.key = key;
index 4a1e75480089f8e0249068b0eea9a53177d30a86..41dbe9fd11ed20894b6821445d226eb7982fd70e 100644 (file)
 int
 ginCompareItemPointers(ItemPointer a, ItemPointer b)
 {
-       BlockNumber     ba = GinItemPointerGetBlockNumber(a);
-       BlockNumber     bb = GinItemPointerGetBlockNumber(b);
+       BlockNumber ba = GinItemPointerGetBlockNumber(a);
+       BlockNumber bb = GinItemPointerGetBlockNumber(b);
 
        if (ba == bb)
        {
-               OffsetNumber    oa = GinItemPointerGetOffsetNumber(a);
-               OffsetNumber    ob = GinItemPointerGetOffsetNumber(b);
+               OffsetNumber oa = GinItemPointerGetOffsetNumber(a);
+               OffsetNumber ob = GinItemPointerGetOffsetNumber(b);
 
                if (oa == ob)
                        return 0;
@@ -383,6 +383,7 @@ dataPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prda
        Page            page = BufferGetPage(buf);
        int                     sizeofitem = GinSizeOfDataPageItem(page);
        int                     cnt = 0;
+
        /* these must be static so they can be returned to caller */
        static XLogRecData rdata[3];
        static ginxlogInsert data;
@@ -474,6 +475,7 @@ dataSplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogRe
        Size            pageSize = PageGetPageSize(lpage);
        Size            freeSpace;
        uint32          nCopied = 1;
+
        /* these must be static so they can be returned to caller */
        static ginxlogSplit data;
        static XLogRecData rdata[4];
index 9749a1be78669a15b259ea083211014b75b883c6..fa134f9fc3f24841ea5b179a7b4e5b59059ab1e2 100644 (file)
@@ -98,11 +98,11 @@ GinFormTuple(GinState *ginstate,
                if (errorTooBig)
                        ereport(ERROR,
                                        (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
-                                        errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
-                                                       (unsigned long) newsize,
-                                                       (unsigned long) Min(INDEX_SIZE_MASK,
-                                                                                               GinMaxItemSize),
-                                                       RelationGetRelationName(ginstate->index))));
+                       errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
+                                  (unsigned long) newsize,
+                                  (unsigned long) Min(INDEX_SIZE_MASK,
+                                                                          GinMaxItemSize),
+                                  RelationGetRelationName(ginstate->index))));
                pfree(itup);
                return NULL;
        }
@@ -164,7 +164,7 @@ GinShortenTuple(IndexTuple itup, uint32 nipd)
  * Form a non-leaf entry tuple by copying the key data from the given tuple,
  * which can be either a leaf or non-leaf entry tuple.
  *
- * Any posting list in the source tuple is not copied.  The specified child
+ * Any posting list in the source tuple is not copied. The specified child
  * block number is inserted into t_tid.
  */
 static IndexTuple
@@ -225,7 +225,7 @@ entryIsMoveRight(GinBtree btree, Page page)
        key = gintuple_get_key(btree->ginstate, itup, &category);
 
        if (ginCompareAttEntries(btree->ginstate,
-                                                        btree->entryAttnum, btree->entryKey, btree->entryCategory,
+                                  btree->entryAttnum, btree->entryKey, btree->entryCategory,
                                                         attnum, key, category) > 0)
                return TRUE;
 
@@ -488,6 +488,7 @@ entryPlaceToPage(GinBtree btree, Buffer buf, OffsetNumber off, XLogRecData **prd
        Page            page = BufferGetPage(buf);
        OffsetNumber placed;
        int                     cnt = 0;
+
        /* these must be static so they can be returned to caller */
        static XLogRecData rdata[3];
        static ginxlogInsert data;
@@ -561,6 +562,7 @@ entrySplitPage(GinBtree btree, Buffer lbuf, Buffer rbuf, OffsetNumber off, XLogR
        Page            lpage = PageGetTempPageCopy(BufferGetPage(lbuf));
        Page            rpage = BufferGetPage(rbuf);
        Size            pageSize = PageGetPageSize(lpage);
+
        /* these must be static so they can be returned to caller */
        static XLogRecData rdata[2];
        static ginxlogSplit data;
index 9960c786c94d3e2fc4806c3c5993281650244aee..82419e37acb278eea849e5fcf12b2e0c90abc0bf 100644 (file)
@@ -88,9 +88,9 @@ writeListPage(Relation index, Buffer buffer,
        GinPageGetOpaque(page)->rightlink = rightlink;
 
        /*
-        * tail page may contain only whole row(s) or final part of row placed
-        * on previous pages (a "row" here meaning all the index tuples generated
-        * for one heap tuple)
+        * tail page may contain only whole row(s) or final part of row placed on
+        * previous pages (a "row" here meaning all the index tuples generated for
+        * one heap tuple)
         */
        if (rightlink == InvalidBlockNumber)
        {
@@ -437,7 +437,7 @@ ginHeapTupleFastInsert(GinState *ginstate, GinTupleCollector *collector)
  * Create temporary index tuples for a single indexable item (one index column
  * for the heap tuple specified by ht_ctid), and append them to the array
  * in *collector.  They will subsequently be written out using
- * ginHeapTupleFastInsert.  Note that to guarantee consistent state, all
+ * ginHeapTupleFastInsert.     Note that to guarantee consistent state, all
  * temp tuples for a given heap tuple must be written in one call to
  * ginHeapTupleFastInsert.
  */
@@ -475,8 +475,8 @@ ginHeapTupleFastCollect(GinState *ginstate,
        }
 
        /*
-        * Build an index tuple for each key value, and add to array.  In
-        * pending tuples we just stick the heap TID into t_tid.
+        * Build an index tuple for each key value, and add to array.  In pending
+        * tuples we just stick the heap TID into t_tid.
         */
        for (i = 0; i < nentries; i++)
        {
@@ -665,7 +665,7 @@ processPendingPage(BuildAccumulator *accum, KeyArray *ka,
        {
                IndexTuple      itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
                OffsetNumber curattnum;
-               Datum   curkey;
+               Datum           curkey;
                GinNullCategory curcategory;
 
                /* Check for change of heap TID or attnum */
@@ -830,7 +830,7 @@ ginInsertCleanup(GinState *ginstate,
                         */
                        ginBeginBAScan(&accum);
                        while ((list = ginGetBAEntry(&accum,
-                                                                                &attnum, &key, &category, &nlist)) != NULL)
+                                                                 &attnum, &key, &category, &nlist)) != NULL)
                        {
                                ginEntryInsert(ginstate, attnum, key, category,
                                                           list, nlist, NULL);
@@ -867,7 +867,7 @@ ginInsertCleanup(GinState *ginstate,
 
                                ginBeginBAScan(&accum);
                                while ((list = ginGetBAEntry(&accum,
-                                                                                        &attnum, &key, &category, &nlist)) != NULL)
+                                                                 &attnum, &key, &category, &nlist)) != NULL)
                                        ginEntryInsert(ginstate, attnum, key, category,
                                                                   list, nlist, NULL);
                        }
index e07dc0a6ce06454215cd7d6f6b585e968ace03fa..a4771654a6d129439f2bf2a9842de30b7cfbae85 100644 (file)
@@ -40,8 +40,8 @@ static bool
 callConsistentFn(GinState *ginstate, GinScanKey key)
 {
        /*
-        * If we're dealing with a dummy EVERYTHING key, we don't want to call
-        * the consistentFn; just claim it matches.
+        * If we're dealing with a dummy EVERYTHING key, we don't want to call the
+        * consistentFn; just claim it matches.
         */
        if (key->searchMode == GIN_SEARCH_MODE_EVERYTHING)
        {
@@ -174,14 +174,14 @@ scanPostingTree(Relation index, GinScanEntry scanEntry,
 
 /*
  * Collects TIDs into scanEntry->matchBitmap for all heap tuples that
- * match the search entry.  This supports three different match modes:
+ * match the search entry.     This supports three different match modes:
  *
  * 1. Partial-match support: scan from current point until the
- *    comparePartialFn says we're done.
+ *       comparePartialFn says we're done.
  * 2. SEARCH_MODE_ALL: scan from current point (which should be first
- *    key for the current attnum) until we hit null items or end of attnum
+ *       key for the current attnum) until we hit null items or end of attnum
  * 3. SEARCH_MODE_EVERYTHING: scan from current point (which should be first
- *    key for the current attnum) until we hit end of attnum
+ *       key for the current attnum) until we hit end of attnum
  *
  * Returns true if done, false if it's necessary to restart scan from scratch
  */
@@ -189,7 +189,7 @@ static bool
 collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
                                   GinScanEntry scanEntry)
 {
-       OffsetNumber    attnum;
+       OffsetNumber attnum;
        Form_pg_attribute attr;
 
        /* Initialize empty bitmap result */
@@ -253,8 +253,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
                        cmp = DatumGetInt32(FunctionCall4(&btree->ginstate->comparePartialFn[attnum - 1],
                                                                                          scanEntry->queryKey,
                                                                                          idatum,
-                                                                                         UInt16GetDatum(scanEntry->strategy),
-                                                                                         PointerGetDatum(scanEntry->extra_data)));
+                                                                                UInt16GetDatum(scanEntry->strategy),
+                                                                       PointerGetDatum(scanEntry->extra_data)));
 
                        if (cmp > 0)
                                return true;
@@ -269,7 +269,7 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
                        /*
                         * In ALL mode, we are not interested in null items, so we can
                         * stop if we get to a null-item placeholder (which will be the
-                        * last entry for a given attnum).  We do want to include NULL_KEY
+                        * last entry for a given attnum).      We do want to include NULL_KEY
                         * and EMPTY_ITEM entries, though.
                         */
                        if (icategory == GIN_CAT_NULL_ITEM)
@@ -287,8 +287,8 @@ collectMatchBitmap(GinBtreeData *btree, GinBtreeStack *stack,
                         * We should unlock current page (but not unpin) during tree scan
                         * to prevent deadlock with vacuum processes.
                         *
-                        * We save current entry value (idatum) to be able to re-find
-                        * our tuple after re-locking
+                        * We save current entry value (idatum) to be able to re-find our
+                        * tuple after re-locking
                         */
                        if (icategory == GIN_CAT_NORM_KEY)
                                idatum = datumCopy(idatum, attr->attbyval, attr->attlen);
@@ -442,11 +442,11 @@ restartScanEntry:
                        Page            page;
 
                        /*
-                        * We should unlock entry page before touching posting tree
-                        * to prevent deadlocks with vacuum processes. Because entry is
-                        * never deleted from page and posting tree is never reduced to
-                        * the posting list, we can unlock page after getting BlockNumber
-                        * of root of posting tree.
+                        * We should unlock entry page before touching posting tree to
+                        * prevent deadlocks with vacuum processes. Because entry is never
+                        * deleted from page and posting tree is never reduced to the
+                        * posting list, we can unlock page after getting BlockNumber of
+                        * root of posting tree.
                         */
                        LockBuffer(stackEntry->buffer, GIN_UNLOCK);
                        needUnlock = FALSE;
@@ -596,7 +596,7 @@ entryGetNextItem(GinState *ginstate, GinScanEntry entry)
 
                                if (!ItemPointerIsValid(&entry->curItem) ||
                                        ginCompareItemPointers(&entry->curItem,
-                                                                                  entry->list + entry->offset - 1) == 0)
+                                                                          entry->list + entry->offset - 1) == 0)
                                {
                                        /*
                                         * First pages are deleted or empty, or we found exact
@@ -656,10 +656,10 @@ entryGetItem(GinState *ginstate, GinScanEntry entry)
                                }
 
                                /*
-                                * Reset counter to the beginning of entry->matchResult.
-                                * Note: entry->offset is still greater than
-                                * matchResult->ntuples if matchResult is lossy.  So, on next
-                                * call we will get next result from TIDBitmap.
+                                * Reset counter to the beginning of entry->matchResult. Note:
+                                * entry->offset is still greater than matchResult->ntuples if
+                                * matchResult is lossy.  So, on next call we will get next
+                                * result from TIDBitmap.
                                 */
                                entry->offset = 0;
                        }
@@ -745,10 +745,10 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
        /*
         * Find the minimum of the active entry curItems.
         *
-        * Note: a lossy-page entry is encoded by a ItemPointer with max value
-        * for offset (0xffff), so that it will sort after any exact entries
-        * for the same page.  So we'll prefer to return exact pointers not
-        * lossy pointers, which is good.
+        * Note: a lossy-page entry is encoded by a ItemPointer with max value for
+        * offset (0xffff), so that it will sort after any exact entries for the
+        * same page.  So we'll prefer to return exact pointers not lossy
+        * pointers, which is good.
         */
        ItemPointerSetMax(&minItem);
 
@@ -782,28 +782,27 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
 
        /*
         * Lossy-page entries pose a problem, since we don't know the correct
-        * entryRes state to pass to the consistentFn, and we also don't know
-        * what its combining logic will be (could be AND, OR, or even NOT).
-        * If the logic is OR then the consistentFn might succeed for all
-        * items in the lossy page even when none of the other entries match.
+        * entryRes state to pass to the consistentFn, and we also don't know what
+        * its combining logic will be (could be AND, OR, or even NOT). If the
+        * logic is OR then the consistentFn might succeed for all items in the
+        * lossy page even when none of the other entries match.
         *
         * If we have a single lossy-page entry then we check to see if the
-        * consistentFn will succeed with only that entry TRUE.  If so,
-        * we return a lossy-page pointer to indicate that the whole heap
-        * page must be checked.  (On subsequent calls, we'll do nothing until
-        * minItem is past the page altogether, thus ensuring that we never return
-        * both regular and lossy pointers for the same page.)
+        * consistentFn will succeed with only that entry TRUE.  If so, we return
+        * a lossy-page pointer to indicate that the whole heap page must be
+        * checked.  (On subsequent calls, we'll do nothing until minItem is past
+        * the page altogether, thus ensuring that we never return both regular
+        * and lossy pointers for the same page.)
         *
-        * This idea could be generalized to more than one lossy-page entry,
-        * but ideally lossy-page entries should be infrequent so it would
-        * seldom be the case that we have more than one at once.  So it
-        * doesn't seem worth the extra complexity to optimize that case.
-        * If we do find more than one, we just punt and return a lossy-page
-        * pointer always.
+        * This idea could be generalized to more than one lossy-page entry, but
+        * ideally lossy-page entries should be infrequent so it would seldom be
+        * the case that we have more than one at once.  So it doesn't seem worth
+        * the extra complexity to optimize that case. If we do find more than
+        * one, we just punt and return a lossy-page pointer always.
         *
-        * Note that only lossy-page entries pointing to the current item's
-        * page should trigger this processing; we might have future lossy
-        * pages in the entry array, but they aren't relevant yet.
+        * Note that only lossy-page entries pointing to the current item's page
+        * should trigger this processing; we might have future lossy pages in the
+        * entry array, but they aren't relevant yet.
         */
        ItemPointerSetLossyPage(&curPageLossy,
                                                        GinItemPointerGetBlockNumber(&key->curItem));
@@ -853,15 +852,14 @@ keyGetItem(GinState *ginstate, MemoryContext tempCtx, GinScanKey key)
        }
 
        /*
-        * At this point we know that we don't need to return a lossy
-        * whole-page pointer, but we might have matches for individual exact
-        * item pointers, possibly in combination with a lossy pointer.  Our
-        * strategy if there's a lossy pointer is to try the consistentFn both
-        * ways and return a hit if it accepts either one (forcing the hit to
-        * be marked lossy so it will be rechecked).  An exception is that
-        * we don't need to try it both ways if the lossy pointer is in a
-        * "hidden" entry, because the consistentFn's result can't depend on
-        * that.
+        * At this point we know that we don't need to return a lossy whole-page
+        * pointer, but we might have matches for individual exact item pointers,
+        * possibly in combination with a lossy pointer.  Our strategy if there's
+        * a lossy pointer is to try the consistentFn both ways and return a hit
+        * if it accepts either one (forcing the hit to be marked lossy so it will
+        * be rechecked).  An exception is that we don't need to try it both ways
+        * if the lossy pointer is in a "hidden" entry, because the consistentFn's
+        * result can't depend on that.
         *
         * Prepare entryRes array to be passed to consistentFn.
         */
@@ -960,7 +958,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
                        keyGetItem(&so->ginstate, so->tempCtx, key);
 
                        if (key->isFinished)
-                               return false;           /* finished one of keys */
+                               return false;   /* finished one of keys */
 
                        if (ginCompareItemPointers(&key->curItem, item) < 0)
                                *item = key->curItem;
@@ -975,7 +973,7 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
                 * that exact TID, or a lossy reference to the same page.
                 *
                 * This logic works only if a keyGetItem stream can never contain both
-                * exact and lossy pointers for the same page.  Else we could have a
+                * exact and lossy pointers for the same page.  Else we could have a
                 * case like
                 *
                 *              stream 1                stream 2
@@ -1011,8 +1009,8 @@ scanGetItem(IndexScanDesc scan, ItemPointer advancePast,
                        break;
 
                /*
-                * No hit.  Update myAdvancePast to this TID, so that on the next
-                * pass we'll move to the next possible entry.
+                * No hit.      Update myAdvancePast to this TID, so that on the next pass
+                * we'll move to the next possible entry.
                 */
                myAdvancePast = *item;
        }
@@ -1118,8 +1116,8 @@ scanGetCandidate(IndexScanDesc scan, pendingPosition *pos)
 
                        /*
                         * Now pos->firstOffset points to the first tuple of current heap
-                        * row, pos->lastOffset points to the first tuple of next heap
-                        * row (or to the end of page)
+                        * row, pos->lastOffset points to the first tuple of next heap row
+                        * (or to the end of page)
                         */
                        break;
                }
@@ -1181,7 +1179,7 @@ matchPartialInPendingList(GinState *ginstate, Page page,
                                                                                  entry->queryKey,
                                                                                  datum[off - 1],
                                                                                  UInt16GetDatum(entry->strategy),
-                                                                                 PointerGetDatum(entry->extra_data)));
+                                                                               PointerGetDatum(entry->extra_data)));
                if (cmp == 0)
                        return true;
                else if (cmp > 0)
@@ -1227,8 +1225,8 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
        memset(pos->hasMatchKey, FALSE, so->nkeys);
 
        /*
-        * Outer loop iterates over multiple pending-list pages when a single
-        * heap row has entries spanning those pages.
+        * Outer loop iterates over multiple pending-list pages when a single heap
+        * row has entries spanning those pages.
         */
        for (;;)
        {
@@ -1322,11 +1320,11 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
                                        if (res == 0)
                                        {
                                                /*
-                                                * Found exact match (there can be only one, except
-                                                * in EMPTY_QUERY mode).
+                                                * Found exact match (there can be only one, except in
+                                                * EMPTY_QUERY mode).
                                                 *
-                                                * If doing partial match, scan forward from
-                                                * here to end of page to check for matches.
+                                                * If doing partial match, scan forward from here to
+                                                * end of page to check for matches.
                                                 *
                                                 * See comment above about tuple's ordering.
                                                 */
@@ -1355,13 +1353,12 @@ collectMatchesForHeapRow(IndexScanDesc scan, pendingPosition *pos)
                                if (StopLow >= StopHigh && entry->isPartialMatch)
                                {
                                        /*
-                                        * No exact match on this page.  If doing partial
-                                        * match, scan from the first tuple greater than
-                                        * target value to end of page.  Note that since we
-                                        * don't remember whether the comparePartialFn told us
-                                        * to stop early on a previous page, we will uselessly
-                                        * apply comparePartialFn to the first tuple on each
-                                        * subsequent page.
+                                        * No exact match on this page.  If doing partial match,
+                                        * scan from the first tuple greater than target value to
+                                        * end of page.  Note that since we don't remember whether
+                                        * the comparePartialFn told us to stop early on a
+                                        * previous page, we will uselessly apply comparePartialFn
+                                        * to the first tuple on each subsequent page.
                                         */
                                        key->entryRes[j] =
                                                matchPartialInPendingList(&so->ginstate,
index af5068906fb89c4e3ed40187bab121079e45ece3..3e32af94a96d34369487f014df68559a08c299d0 100644 (file)
@@ -97,7 +97,7 @@ createPostingTree(Relation index, ItemPointerData *items, uint32 nitems)
  * Adds array of item pointers to tuple's posting list, or
  * creates posting tree and tuple pointing to tree in case
  * of not enough space.  Max size of tuple is defined in
- * GinFormTuple().  Returns a new, modified index tuple.
+ * GinFormTuple().     Returns a new, modified index tuple.
  * items[] must be in sorted order with no duplicates.
  */
 static IndexTuple
@@ -195,14 +195,14 @@ buildFreshLeafTuple(GinState *ginstate,
                BlockNumber postingRoot;
 
                /*
-                * Build posting-tree-only result tuple.  We do this first so as
-                * to fail quickly if the key is too big.
+                * Build posting-tree-only result tuple.  We do this first so as to
+                * fail quickly if the key is too big.
                 */
                res = GinFormTuple(ginstate, attnum, key, category, NULL, 0, true);
 
                /*
-                * Initialize posting tree with as many TIDs as will fit on the
-                * first page.
+                * Initialize posting tree with as many TIDs as will fit on the first
+                * page.
                 */
                postingRoot = createPostingTree(ginstate->index,
                                                                                items,
@@ -361,7 +361,7 @@ ginBuildCallback(Relation index, HeapTuple htup, Datum *values,
 
                ginBeginBAScan(&buildstate->accum);
                while ((list = ginGetBAEntry(&buildstate->accum,
-                                                                        &attnum, &key, &category, &nlist)) != NULL)
+                                                                 &attnum, &key, &category, &nlist)) != NULL)
                {
                        /* there could be many entries, so be willing to abort here */
                        CHECK_FOR_INTERRUPTS();
index 25f60e15a0d438a55eea244ba145cd17b044b54e..37b08c0df62a388c87b83bcafa4b342828f4f5b7 100644 (file)
@@ -199,7 +199,7 @@ ginFillScanKey(GinScanOpaque so, OffsetNumber attnum,
                                        break;
                                default:
                                        elog(ERROR, "unexpected searchMode: %d", searchMode);
-                                       queryCategory = 0;              /* keep compiler quiet */
+                                       queryCategory = 0;      /* keep compiler quiet */
                                        break;
                        }
                        isPartialMatch = false;
@@ -294,8 +294,8 @@ ginNewScanKey(IndexScanDesc scan)
                int32           searchMode = GIN_SEARCH_MODE_DEFAULT;
 
                /*
-                * We assume that GIN-indexable operators are strict, so a null
-                * query argument means an unsatisfiable query.
+                * We assume that GIN-indexable operators are strict, so a null query
+                * argument means an unsatisfiable query.
                 */
                if (skey->sk_flags & SK_ISNULL)
                {
@@ -315,8 +315,8 @@ ginNewScanKey(IndexScanDesc scan)
                                                                                  PointerGetDatum(&searchMode)));
 
                /*
-                * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL;
-                * note in particular we don't allow extractQueryFn to select
+                * If bogus searchMode is returned, treat as GIN_SEARCH_MODE_ALL; note
+                * in particular we don't allow extractQueryFn to select
                 * GIN_SEARCH_MODE_EVERYTHING.
                 */
                if (searchMode < GIN_SEARCH_MODE_DEFAULT ||
@@ -344,20 +344,20 @@ ginNewScanKey(IndexScanDesc scan)
                 * If the extractQueryFn didn't create a nullFlags array, create one,
                 * assuming that everything's non-null.  Otherwise, run through the
                 * array and make sure each value is exactly 0 or 1; this ensures
-                * binary compatibility with the GinNullCategory representation.
-                * While at it, detect whether any null keys are present.
+                * binary compatibility with the GinNullCategory representation. While
+                * at it, detect whether any null keys are present.
                 */
                if (nullFlags == NULL)
                        nullFlags = (bool *) palloc0(nQueryValues * sizeof(bool));
                else
                {
-                       int32 j;
+                       int32           j;
 
                        for (j = 0; j < nQueryValues; j++)
                        {
                                if (nullFlags[j])
                                {
-                                       nullFlags[j] = true;    /* not any other nonzero value */
+                                       nullFlags[j] = true;            /* not any other nonzero value */
                                        hasNullQuery = true;
                                }
                        }
@@ -387,11 +387,11 @@ ginNewScanKey(IndexScanDesc scan)
        /*
         * If the index is version 0, it may be missing null and placeholder
         * entries, which would render searches for nulls and full-index scans
-        * unreliable.  Throw an error if so.
+        * unreliable.  Throw an error if so.
         */
        if (hasNullQuery && !so->isVoidRes)
        {
-               GinStatsData   ginStats;
+               GinStatsData ginStats;
 
                ginGetStats(scan->indexRelation, &ginStats);
                if (ginStats.ginVersion < 1)
@@ -410,6 +410,7 @@ ginrescan(PG_FUNCTION_ARGS)
 {
        IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
        ScanKey         scankey = (ScanKey) PG_GETARG_POINTER(1);
+
        /* remaining arguments are ignored */
        GinScanOpaque so = (GinScanOpaque) scan->opaque;
 
index 392c12d47abfa740cbfe6436f5bc652cd0c49fcb..716cf3a734865d7ee3502f6377c12a5f50c9c1c3 100644 (file)
@@ -70,7 +70,7 @@ initGinState(GinState *state, Relation index)
                 * However, we may have a collatable storage type for a noncollatable
                 * indexed data type (for instance, hstore uses text index entries).
                 * If there's no index collation then specify default collation in
-                * case the comparison function needs one.  This is harmless if the
+                * case the comparison function needs one.      This is harmless if the
                 * comparison function doesn't care about collation, so we just do it
                 * unconditionally.  (We could alternatively call get_typcollation,
                 * but that seems like expensive overkill --- there aren't going to be
@@ -359,9 +359,9 @@ cmpEntries(const void *a, const void *b, void *arg)
                                                                                  aa->datum, bb->datum));
 
        /*
-        * Detect if we have any duplicates.  If there are equal keys, qsort
-        * must compare them at some point, else it wouldn't know whether one
-        * should go before or after the other.
+        * Detect if we have any duplicates.  If there are equal keys, qsort must
+        * compare them at some point, else it wouldn't know whether one should go
+        * before or after the other.
         */
        if (res == 0)
                data->haveDups = true;
@@ -422,9 +422,9 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
 
        /*
         * If the extractValueFn didn't create a nullFlags array, create one,
-        * assuming that everything's non-null.  Otherwise, run through the
-        * array and make sure each value is exactly 0 or 1; this ensures
-        * binary compatibility with the GinNullCategory representation.
+        * assuming that everything's non-null.  Otherwise, run through the array
+        * and make sure each value is exactly 0 or 1; this ensures binary
+        * compatibility with the GinNullCategory representation.
         */
        if (nullFlags == NULL)
                nullFlags = (bool *) palloc0(*nentries * sizeof(bool));
@@ -440,8 +440,8 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
         * If there's more than one key, sort and unique-ify.
         *
         * XXX Using qsort here is notationally painful, and the overhead is
-        * pretty bad too.  For small numbers of keys it'd likely be better to
-        * use a simple insertion sort.
+        * pretty bad too.      For small numbers of keys it'd likely be better to use
+        * a simple insertion sort.
         */
        if (*nentries > 1)
        {
@@ -470,7 +470,7 @@ ginExtractEntries(GinState *ginstate, OffsetNumber attnum,
                        j = 1;
                        for (i = 1; i < *nentries; i++)
                        {
-                               if (cmpEntries(&keydata[i-1], &keydata[i], &arg) != 0)
+                               if (cmpEntries(&keydata[i - 1], &keydata[i], &arg) != 0)
                                {
                                        entries[j] = keydata[i].datum;
                                        nullFlags[j] = keydata[i].isnull;
@@ -533,9 +533,9 @@ ginoptions(PG_FUNCTION_ARGS)
 void
 ginGetStats(Relation index, GinStatsData *stats)
 {
-       Buffer                  metabuffer;
-       Page                    metapage;
-       GinMetaPageData *metadata;
+       Buffer          metabuffer;
+       Page            metapage;
+       GinMetaPageData *metadata;
 
        metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
        LockBuffer(metabuffer, GIN_SHARE);
@@ -560,9 +560,9 @@ ginGetStats(Relation index, GinStatsData *stats)
 void
 ginUpdateStats(Relation index, const GinStatsData *stats)
 {
-       Buffer                  metabuffer;
-       Page                    metapage;
-       GinMetaPageData *metadata;
+       Buffer          metabuffer;
+       Page            metapage;
+       GinMetaPageData *metadata;
 
        metabuffer = ReadBuffer(index, GIN_METAPAGE_BLKNO);
        LockBuffer(metabuffer, GIN_EXCLUSIVE);
@@ -580,9 +580,9 @@ ginUpdateStats(Relation index, const GinStatsData *stats)
 
        if (RelationNeedsWAL(index))
        {
-               XLogRecPtr                      recptr;
-               ginxlogUpdateMeta       data;
-               XLogRecData                     rdata;
+               XLogRecPtr      recptr;
+               ginxlogUpdateMeta data;
+               XLogRecData rdata;
 
                data.node = index->rd_node;
                data.ntuples = 0;
index 41ad382df0cd60362e7b7d5d1ba4bff3fa91c7a9..79c54f16b8dce67b3fd23462052b37d6562aea9b 100644 (file)
@@ -783,7 +783,7 @@ ginvacuumcleanup(PG_FUNCTION_ARGS)
                {
                        idxStat.nEntryPages++;
 
-                       if ( GinPageIsLeaf(page) )
+                       if (GinPageIsLeaf(page))
                                idxStat.nEntries += PageGetMaxOffsetNumber(page);
                }
 
index e410959b851bd8ac528253c077f104e1d82d3d0f..c954bcb12fc8d82bc0ecbf9dd3a09b64b95dc14f 100644 (file)
@@ -388,7 +388,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record)
                else
                {
                        OffsetNumber i,
-                               *tod;
+                                          *tod;
                        IndexTuple      itup = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogVacuumPage));
 
                        tod = (OffsetNumber *) palloc(sizeof(OffsetNumber) * PageGetMaxOffsetNumber(page));
@@ -513,10 +513,10 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
                                if (!XLByteLE(lsn, PageGetLSN(page)))
                                {
                                        OffsetNumber l,
-                                               off = (PageIsEmpty(page)) ? FirstOffsetNumber :
-                                               OffsetNumberNext(PageGetMaxOffsetNumber(page));
+                                                               off = (PageIsEmpty(page)) ? FirstOffsetNumber :
+                                       OffsetNumberNext(PageGetMaxOffsetNumber(page));
                                        int                     i,
-                                               tupsize;
+                                                               tupsize;
                                        IndexTuple      tuples = (IndexTuple) (XLogRecGetData(record) + sizeof(ginxlogUpdateMeta));
 
                                        for (i = 0; i < data->ntuples; i++)
index 9529413e80e970078796564b8d9b413dbac16213..fae3464600a5a87dd31c7c3dcf44b3bdcd0276b1 100644 (file)
@@ -34,8 +34,8 @@ typedef struct
 /* A List of these is used represent a split-in-progress. */
 typedef struct
 {
-       Buffer          buf;            /* the split page "half" */
-       IndexTuple      downlink;       /* downlink for this half. */
+       Buffer          buf;                    /* the split page "half" */
+       IndexTuple      downlink;               /* downlink for this half. */
 } GISTPageSplitInfo;
 
 /* non-export function prototypes */
@@ -306,13 +306,13 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
        bool            is_split;
 
        /*
-        * Refuse to modify a page that's incompletely split. This should
-        * not happen because we finish any incomplete splits while we walk
-        * down the tree. However, it's remotely possible that another
-        * concurrent inserter splits a parent page, and errors out before
-        * completing the split. We will just throw an error in that case,
-        * and leave any split we had in progress unfinished too. The next
-        * insert that comes along will clean up the mess.
+        * Refuse to modify a page that's incompletely split. This should not
+        * happen because we finish any incomplete splits while we walk down the
+        * tree. However, it's remotely possible that another concurrent inserter
+        * splits a parent page, and errors out before completing the split. We
+        * will just throw an error in that case, and leave any split we had in
+        * progress unfinished too. The next insert that comes along will clean up
+        * the mess.
         */
        if (GistFollowRight(page))
                elog(ERROR, "concurrent GiST page split was incomplete");
@@ -338,7 +338,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                SplitedPageLayout *dist = NULL,
                                   *ptr;
                BlockNumber oldrlink = InvalidBlockNumber;
-               GistNSN         oldnsn = { 0, 0 };
+               GistNSN         oldnsn = {0, 0};
                SplitedPageLayout rootpg;
                BlockNumber blkno = BufferGetBlockNumber(buffer);
                bool            is_rootsplit;
@@ -364,8 +364,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
 
                /*
                 * Set up pages to work with. Allocate new buffers for all but the
-                * leftmost page. The original page becomes the new leftmost page,
-                * and is just replaced with the new contents.
+                * leftmost page. The original page becomes the new leftmost page, and
+                * is just replaced with the new contents.
                 *
                 * For a root-split, allocate new buffers for all child pages, the
                 * original page is overwritten with new root page containing
@@ -414,8 +414,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                if (is_rootsplit)
                {
                        IndexTuple *downlinks;
-                       int ndownlinks = 0;
-                       int i;
+                       int                     ndownlinks = 0;
+                       int                     i;
 
                        rootpg.buffer = buffer;
                        rootpg.page = PageGetTempPageCopySpecial(BufferGetPage(rootpg.buffer));
@@ -443,6 +443,7 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                        for (ptr = dist; ptr; ptr = ptr->next)
                        {
                                GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
+
                                si->buf = ptr->buffer;
                                si->downlink = ptr->itup;
                                *splitinfo = lappend(*splitinfo, si);
@@ -455,7 +456,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                 */
                for (ptr = dist; ptr; ptr = ptr->next)
                {
-                       char *data = (char *) (ptr->list);
+                       char       *data = (char *) (ptr->list);
+
                        for (i = 0; i < ptr->block.num; i++)
                        {
                                if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, false, false) == InvalidOffsetNumber)
@@ -495,8 +497,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                        MarkBufferDirty(leftchildbuf);
 
                /*
-                * The first page in the chain was a temporary working copy meant
-                * to replace the old page. Copy it over the old page.
+                * The first page in the chain was a temporary working copy meant to
+                * replace the old page. Copy it over the old page.
                 */
                PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
                dist->page = BufferGetPage(dist->buffer);
@@ -518,8 +520,8 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
                 * Return the new child buffers to the caller.
                 *
                 * If this was a root split, we've already inserted the downlink
-                * pointers, in the form of a new root page. Therefore we can
-                * release all the new buffers, and keep just the root page locked.
+                * pointers, in the form of a new root page. Therefore we can release
+                * all the new buffers, and keep just the root page locked.
                 */
                if (is_rootsplit)
                {
@@ -572,20 +574,20 @@ gistplacetopage(GISTInsertState *state, GISTSTATE *giststate,
 
        /*
         * If we inserted the downlink for a child page, set NSN and clear
-        * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know
-        * to follow the rightlink if and only if they looked at the parent page
+        * F_FOLLOW_RIGHT flag on the left child, so that concurrent scans know to
+        * follow the rightlink if and only if they looked at the parent page
         * before we inserted the downlink.
         *
         * Note that we do this *after* writing the WAL record. That means that
-        * the possible full page image in the WAL record does not include
-        * these changes, and they must be replayed even if the page is restored
-        * from the full page image. There's a chicken-and-egg problem: if we
-        * updated the child pages first, we wouldn't know the recptr of the WAL
-        * record we're about to write.
+        * the possible full page image in the WAL record does not include these
+        * changes, and they must be replayed even if the page is restored from
+        * the full page image. There's a chicken-and-egg problem: if we updated
+        * the child pages first, we wouldn't know the recptr of the WAL record
+        * we're about to write.
         */
        if (BufferIsValid(leftchildbuf))
        {
-               Page leftpg = BufferGetPage(leftchildbuf);
+               Page            leftpg = BufferGetPage(leftchildbuf);
 
                GistPageGetOpaque(leftpg)->nsn = recptr;
                GistClearFollowRight(leftpg);
@@ -636,8 +638,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                        stack->buffer = ReadBuffer(state.r, stack->blkno);
 
                /*
-                * Be optimistic and grab shared lock first. Swap it for an
-                * exclusive lock later if we need to update the page.
+                * Be optimistic and grab shared lock first. Swap it for an exclusive
+                * lock later if we need to update the page.
                 */
                if (!xlocked)
                {
@@ -650,9 +652,9 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                Assert(!RelationNeedsWAL(state.r) || !XLogRecPtrIsInvalid(stack->lsn));
 
                /*
-                * If this page was split but the downlink was never inserted to
-                * the parent because the inserting backend crashed before doing
-                * that, fix that now.
+                * If this page was split but the downlink was never inserted to the
+                * parent because the inserting backend crashed before doing that, fix
+                * that now.
                 */
                if (GistFollowRight(stack->page))
                {
@@ -680,8 +682,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                        /*
                         * Concurrent split detected. There's no guarantee that the
                         * downlink for this page is consistent with the tuple we're
-                        * inserting anymore, so go back to parent and rechoose the
-                        * best child.
+                        * inserting anymore, so go back to parent and rechoose the best
+                        * child.
                         */
                        UnlockReleaseBuffer(stack->buffer);
                        xlocked = false;
@@ -696,7 +698,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                         * Find the child node that has the minimum insertion penalty.
                         */
                        BlockNumber childblkno;
-                       IndexTuple newtup;
+                       IndexTuple      newtup;
                        GISTInsertStack *item;
 
                        stack->childoffnum = gistchoose(state.r, stack->page, itup, giststate);
@@ -722,8 +724,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                        if (newtup)
                        {
                                /*
-                                * Swap shared lock for an exclusive one. Beware, the page
-                                * may change while we unlock/lock the page...
+                                * Swap shared lock for an exclusive one. Beware, the page may
+                                * change while we unlock/lock the page...
                                 */
                                if (!xlocked)
                                {
@@ -738,6 +740,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                                                continue;
                                        }
                                }
+
                                /*
                                 * Update the tuple.
                                 *
@@ -752,8 +755,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                                                                         stack->childoffnum, InvalidBuffer))
                                {
                                        /*
-                                        * If this was a root split, the root page continues to
-                                        * be the parent and the updated tuple went to one of the
+                                        * If this was a root split, the root page continues to be
+                                        * the parent and the updated tuple went to one of the
                                         * child pages, so we just need to retry from the root
                                         * page.
                                         */
@@ -779,13 +782,13 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                {
                        /*
                         * Leaf page. Insert the new key. We've already updated all the
-                        * parents on the way down, but we might have to split the page
-                        * if it doesn't fit. gistinserthere() will take care of that.
+                        * parents on the way down, but we might have to split the page if
+                        * it doesn't fit. gistinserthere() will take care of that.
                         */
 
                        /*
-                        * Swap shared lock for an exclusive one. Be careful, the page
-                        * may change while we unlock/lock the page...
+                        * Swap shared lock for an exclusive one. Be careful, the page may
+                        * change while we unlock/lock the page...
                         */
                        if (!xlocked)
                        {
@@ -798,8 +801,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
                                if (stack->blkno == GIST_ROOT_BLKNO)
                                {
                                        /*
-                                        * the only page that can become inner instead of leaf
-                                        * is the root page, so for root we should recheck it
+                                        * the only page that can become inner instead of leaf is
+                                        * the root page, so for root we should recheck it
                                         */
                                        if (!GistPageIsLeaf(stack->page))
                                        {
@@ -1059,21 +1062,23 @@ static IndexTuple
 gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
                                 GISTInsertStack *stack)
 {
-       Page page = BufferGetPage(buf);
+       Page            page = BufferGetPage(buf);
        OffsetNumber maxoff;
        OffsetNumber offset;
-       IndexTuple downlink = NULL;
+       IndexTuple      downlink = NULL;
 
        maxoff = PageGetMaxOffsetNumber(page);
        for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
        {
                IndexTuple      ituple = (IndexTuple)
-                       PageGetItem(page, PageGetItemId(page, offset));
+               PageGetItem(page, PageGetItemId(page, offset));
+
                if (downlink == NULL)
                        downlink = CopyIndexTuple(ituple);
                else
                {
-                       IndexTuple newdownlink;
+                       IndexTuple      newdownlink;
+
                        newdownlink = gistgetadjusted(rel, downlink, ituple,
                                                                                  giststate);
                        if (newdownlink)
@@ -1082,19 +1087,18 @@ gistformdownlink(Relation rel, Buffer buf, GISTSTATE *giststate,
        }
 
        /*
-        * If the page is completely empty, we can't form a meaningful
-        * downlink for it. But we have to insert a downlink for the page.
-        * Any key will do, as long as its consistent with the downlink of
-        * parent page, so that we can legally insert it to the parent.
-        * A minimal one that matches as few scans as possible would be best,
-        * to keep scans from doing useless work, but we don't know how to
-        * construct that. So we just use the downlink of the original page
-        * that was split - that's as far from optimal as it can get but will
-        * do..
+        * If the page is completely empty, we can't form a meaningful downlink
+        * for it. But we have to insert a downlink for the page. Any key will do,
+        * as long as its consistent with the downlink of parent page, so that we
+        * can legally insert it to the parent. A minimal one that matches as few
+        * scans as possible would be best, to keep scans from doing useless work,
+        * but we don't know how to construct that. So we just use the downlink of
+        * the original page that was split - that's as far from optimal as it can
+        * get but will do..
         */
        if (!downlink)
        {
-               ItemId iid;
+               ItemId          iid;
 
                LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
                gistFindCorrectParent(rel, stack);
@@ -1131,13 +1135,13 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
        buf = stack->buffer;
 
        /*
-        * Read the chain of split pages, following the rightlinks. Construct
-        * downlink tuple for each page.
+        * Read the chain of split pages, following the rightlinks. Construct a
+        * downlink tuple for each page.
         */
        for (;;)
        {
                GISTPageSplitInfo *si = palloc(sizeof(GISTPageSplitInfo));
-               IndexTuple downlink;
+               IndexTuple      downlink;
 
                page = BufferGetPage(buf);
 
@@ -1182,8 +1186,8 @@ gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
                                 IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
                                 Buffer leftchild)
 {
-       List *splitinfo;
-       bool is_split;
+       List       *splitinfo;
+       bool            is_split;
 
        is_split = gistplacetopage(state, giststate, stack->buffer,
                                                           tuples, ntup, oldoffnum,
@@ -1204,21 +1208,21 @@ static void
 gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
                                GISTSTATE *giststate, List *splitinfo)
 {
-       ListCell *lc;
-       List *reversed;
+       ListCell   *lc;
+       List       *reversed;
        GISTPageSplitInfo *right;
        GISTPageSplitInfo *left;
-       IndexTuple tuples[2];
+       IndexTuple      tuples[2];
 
        /* A split always contains at least two halves */
        Assert(list_length(splitinfo) >= 2);
 
        /*
-        * We need to insert downlinks for each new page, and update the
-        * downlink for the original (leftmost) page in the split. Begin at
-        * the rightmost page, inserting one downlink at a time until there's
-        * only two pages left. Finally insert the downlink for the last new
-        * page and update the downlink for the original page as one operation.
+        * We need to insert downlinks for each new page, and update the downlink
+        * for the original (leftmost) page in the split. Begin at the rightmost
+        * page, inserting one downlink at a time until there's only two pages
+        * left. Finally insert the downlink for the last new page and update the
+        * downlink for the original page as one operation.
         */
 
        /* for convenience, create a copy of the list in reverse order */
@@ -1231,7 +1235,7 @@ gistfinishsplit(GISTInsertState *state, GISTInsertStack *stack,
        LockBuffer(stack->parent->buffer, GIST_EXCLUSIVE);
        gistFindCorrectParent(state->r, stack);
 
-       while(list_length(reversed) > 2)
+       while (list_length(reversed) > 2)
        {
                right = (GISTPageSplitInfo *) linitial(reversed);
                left = (GISTPageSplitInfo *) lsecond(reversed);
@@ -1386,7 +1390,7 @@ initGISTstate(GISTSTATE *giststate, Relation index)
                /* opclasses are not required to provide a Distance method */
                if (OidIsValid(index_getprocid(index, i + 1, GIST_DISTANCE_PROC)))
                        fmgr_info_copy(&(giststate->distanceFn[i]),
-                                                  index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
+                                                index_getprocinfo(index, i + 1, GIST_DISTANCE_PROC),
                                                   CurrentMemoryContext);
                else
                        giststate->distanceFn[i].fn_oid = InvalidOid;
index 8355081553d740410fca58c7c154f77436b8cc5c..e4488a925de687e153677be332fd217d522c3415 100644 (file)
@@ -32,7 +32,7 @@
  *
  * On success return for a heap tuple, *recheck_p is set to indicate
  * whether recheck is needed.  We recheck if any of the consistent() functions
- * request it.  recheck is not interesting when examining a non-leaf entry,
+ * request it. recheck is not interesting when examining a non-leaf entry,
  * since we must visit the lower index page if there's any doubt.
  *
  * If we are doing an ordered scan, so->distances[] is filled with distance
@@ -62,15 +62,15 @@ gistindex_keytest(IndexScanDesc scan,
        *recheck_p = false;
 
        /*
-        * If it's a leftover invalid tuple from pre-9.1, treat it as a match
-        * with minimum possible distances.  This means we'll always follow it
-        * to the referenced page.
+        * If it's a leftover invalid tuple from pre-9.1, treat it as a match with
+        * minimum possible distances.  This means we'll always follow it to the
+        * referenced page.
         */
        if (GistTupleIsInvalid(tuple))
        {
-               int             i;
+               int                     i;
 
-               if (GistPageIsLeaf(page))                       /* shouldn't happen */
+               if (GistPageIsLeaf(page))               /* shouldn't happen */
                        elog(ERROR, "invalid GIST tuple found on leaf page");
                for (i = 0; i < scan->numberOfOrderBys; i++)
                        so->distances[i] = -get_float8_infinity();
@@ -191,8 +191,8 @@ gistindex_keytest(IndexScanDesc scan,
                         * always be zero, but might as well pass it for possible future
                         * use.)
                         *
-                        * Note that Distance functions don't get a recheck argument.
-                        * We can't tolerate lossy distance calculations on leaf tuples;
+                        * Note that Distance functions don't get a recheck argument. We
+                        * can't tolerate lossy distance calculations on leaf tuples;
                         * there is no opportunity to re-sort the tuples afterwards.
                         */
                        dist = FunctionCall4(&key->sk_func,
@@ -223,7 +223,7 @@ gistindex_keytest(IndexScanDesc scan,
  * ntids: if not NULL, gistgetbitmap's output tuple counter
  *
  * If tbm/ntids aren't NULL, we are doing an amgetbitmap scan, and heap
- * tuples should be reported directly into the bitmap.  If they are NULL,
+ * tuples should be reported directly into the bitmap. If they are NULL,
  * we're doing a plain or ordered indexscan.  For a plain indexscan, heap
  * tuple TIDs are returned into so->pageData[].  For an ordered indexscan,
  * heap tuple TIDs are pushed into individual search queue items.
@@ -525,8 +525,8 @@ gistgettuple(PG_FUNCTION_ARGS)
                                /*
                                 * While scanning a leaf page, ItemPointers of matching heap
                                 * tuples are stored in so->pageData.  If there are any on
-                                * this page, we fall out of the inner "do" and loop around
-                                * to return them.
+                                * this page, we fall out of the inner "do" and loop around to
+                                * return them.
                                 */
                                gistScanPage(scan, item, so->curTreeItem->distances, NULL, NULL);
 
index 86a5d90f95528ef5a094873de447b8f4f84d927f..43c4b1251b1301b0da1eec863fcc7b4160653179 100644 (file)
@@ -904,7 +904,7 @@ gist_point_compress(PG_FUNCTION_ARGS)
        PG_RETURN_POINTER(entry);
 }
 
-#define        point_point_distance(p1,p2)     \
+#define point_point_distance(p1,p2) \
        DatumGetFloat8(DirectFunctionCall2(point_distance, \
                                                                           PointPGetDatum(p1), PointPGetDatum(p2)))
 
@@ -949,8 +949,8 @@ computeDistance(bool isLeaf, BOX *box, Point *point)
        else
        {
                /* closest point will be a vertex */
-               Point   p;
-               double  subresult;
+               Point           p;
+               double          subresult;
 
                result = point_point_distance(point, &box->low);
 
index 0a125e772d077cbc9531f2711a89c2ebb9f725e7..67308ed37e5cb3370da15a89b001c7e032f35bfb 100644 (file)
@@ -57,9 +57,9 @@ GISTSearchTreeItemCombiner(RBNode *existing, const RBNode *newrb, void *arg)
 
        /*
         * If new item is heap tuple, it goes to front of chain; otherwise insert
-        * it before the first index-page item, so that index pages are visited
-        * in LIFO order, ensuring depth-first search of index pages.  See
-        * comments in gist_private.h.
+        * it before the first index-page item, so that index pages are visited in
+        * LIFO order, ensuring depth-first search of index pages.      See comments
+        * in gist_private.h.
         */
        if (GISTSearchItemIsHeap(*newitem))
        {
@@ -136,6 +136,7 @@ gistrescan(PG_FUNCTION_ARGS)
        IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
        ScanKey         key = (ScanKey) PG_GETARG_POINTER(1);
        ScanKey         orderbys = (ScanKey) PG_GETARG_POINTER(3);
+
        /* nkeys and norderbys arguments are ignored */
        GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
        int                     i;
@@ -164,8 +165,8 @@ gistrescan(PG_FUNCTION_ARGS)
                                scan->numberOfKeys * sizeof(ScanKeyData));
 
                /*
-                * Modify the scan key so that the Consistent method is called for
-                * all comparisons. The original operator is passed to the Consistent
+                * Modify the scan key so that the Consistent method is called for all
+                * comparisons. The original operator is passed to the Consistent
                 * function in the form of its strategy number, which is available
                 * from the sk_strategy field, and its subtype from the sk_subtype
                 * field.  Also, preserve sk_func.fn_collation which is the input
index 6736fd166c3650e9fafc8436e0dddb414154da13..e8bbd564c714257260234a787b46bcd118e29404 100644 (file)
@@ -503,11 +503,12 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
        }
 
        res = index_form_tuple(giststate->tupdesc, compatt, isnull);
+
        /*
         * The offset number on tuples on internal pages is unused. For historical
         * reasons, it is set 0xffff.
         */
-       ItemPointerSetOffsetNumber( &(res->t_tid), 0xffff);
+       ItemPointerSetOffsetNumber(&(res->t_tid), 0xffff);
        return res;
 }
 
index 0f406e16c4eb56f9251e4c8d78a4900ddb930c3c..51354c1c185f98ed3282c17aacece1fb30c3c8a0 100644 (file)
@@ -41,12 +41,12 @@ static void
 gistRedoClearFollowRight(RelFileNode node, XLogRecPtr lsn,
                                                 BlockNumber leftblkno)
 {
-       Buffer buffer;
+       Buffer          buffer;
 
        buffer = XLogReadBuffer(node, leftblkno, false);
        if (BufferIsValid(buffer))
        {
-               Page page = (Page) BufferGetPage(buffer);
+               Page            page = (Page) BufferGetPage(buffer);
 
                /*
                 * Note that we still update the page even if page LSN is equal to the
@@ -103,6 +103,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
        {
                int                     i;
                OffsetNumber *todelete = (OffsetNumber *) data;
+
                data += sizeof(OffsetNumber) * xldata->ntodelete;
 
                for (i = 0; i < xldata->ntodelete; i++)
@@ -115,12 +116,14 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
        if (data - begin < record->xl_len)
        {
                OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
-                       OffsetNumberNext(PageGetMaxOffsetNumber(page));
+               OffsetNumberNext(PageGetMaxOffsetNumber(page));
+
                while (data - begin < record->xl_len)
                {
-                       IndexTuple itup = (IndexTuple) data;
+                       IndexTuple      itup = (IndexTuple) data;
                        Size            sz = IndexTupleSize(itup);
                        OffsetNumber l;
+
                        data += sz;
 
                        l = PageAddItem(page, (Item) itup, sz, off, false, false);
@@ -418,7 +421,7 @@ gistXLogSplit(RelFileNode node, BlockNumber blkno, bool page_is_leaf,
        SplitedPageLayout *ptr;
        int                     npage = 0,
                                cur;
-       XLogRecPtr recptr;
+       XLogRecPtr      recptr;
 
        for (ptr = dist; ptr; ptr = ptr->next)
                npage++;
@@ -540,8 +543,8 @@ gistXLogUpdate(RelFileNode node, Buffer buffer,
        }
 
        /*
-        * Include a full page image of the child buf. (only necessary if
-        * checkpoint happened since the child page was split)
+        * Include a full page image of the child buf. (only necessary if a
+        * checkpoint happened since the child page was split)
         */
        if (BufferIsValid(leftchildbuf))
        {
index f19e5627f83a87202f1a6a51fee36a33200f430c..4cb29b2bb45159991551d7fc14671dacf6bacfcc 100644 (file)
@@ -413,6 +413,7 @@ hashrescan(PG_FUNCTION_ARGS)
 {
        IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
        ScanKey         scankey = (ScanKey) PG_GETARG_POINTER(1);
+
        /* remaining arguments are ignored */
        HashScanOpaque so = (HashScanOpaque) scan->opaque;
        Relation        rel = scan->indexRelation;
index 89697f6ff5e6d8c5cfe05fa4865564e70d5c9761..1fbd8b39b4a73f6aee337fccf2dcce548d0850bb 100644 (file)
@@ -1070,7 +1070,7 @@ relation_close(Relation relation, LOCKMODE lockmode)
  *             This is essentially relation_open plus check that the relation
  *             is not an index nor a composite type.  (The caller should also
  *             check that it's not a view or foreign table before assuming it has
- *      storage.)
+ *             storage.)
  * ----------------
  */
 Relation
@@ -1922,8 +1922,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
 
        /*
         * We're about to do the actual insert -- check for conflict at the
-        * relation or buffer level first, to avoid possibly having to roll
-        * back work we've just done.
+        * relation or buffer level first, to avoid possibly having to roll back
+        * work we've just done.
         */
        CheckForSerializableConflictIn(relation, NULL, buffer);
 
@@ -2228,8 +2228,8 @@ l1:
        }
 
        /*
-        * We're about to do the actual delete -- check for conflict first,
-        * to avoid possibly having to roll back work we've just done.
+        * We're about to do the actual delete -- check for conflict first, to
+        * avoid possibly having to roll back work we've just done.
         */
        CheckForSerializableConflictIn(relation, &tp, buffer);
 
@@ -2587,8 +2587,8 @@ l2:
        }
 
        /*
-        * We're about to do the actual update -- check for conflict first,
-        * to avoid possibly having to roll back work we've just done.
+        * We're about to do the actual update -- check for conflict first, to
+        * avoid possibly having to roll back work we've just done.
         */
        CheckForSerializableConflictIn(relation, &oldtup, buffer);
 
@@ -2737,8 +2737,8 @@ l2:
        }
 
        /*
-        * We're about to create the new tuple -- check for conflict first,
-        * to avoid possibly having to roll back work we've just done.
+        * We're about to create the new tuple -- check for conflict first, to
+        * avoid possibly having to roll back work we've just done.
         *
         * NOTE: For a tuple insert, we only need to check for table locks, since
         * predicate locking at the index level will cover ranges for anything
@@ -3860,12 +3860,12 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
        }
 
        /*
-        * Ignore tuples inserted by an aborted transaction or
-        * if the tuple was updated/deleted by the inserting transaction.
+        * Ignore tuples inserted by an aborted transaction or if the tuple was
+        * updated/deleted by the inserting transaction.
         *
         * Look for a committed hint bit, or if no xmin bit is set, check clog.
-        * This needs to work on both master and standby, where it is used
-        * to assess btree delete records.
+        * This needs to work on both master and standby, where it is used to
+        * assess btree delete records.
         */
        if ((tuple->t_infomask & HEAP_XMIN_COMMITTED) ||
                (!(tuple->t_infomask & HEAP_XMIN_COMMITTED) &&
@@ -3874,7 +3874,7 @@ HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple,
        {
                if (xmax != xmin &&
                        TransactionIdFollows(xmax, *latestRemovedXid))
-                               *latestRemovedXid = xmax;
+                       *latestRemovedXid = xmax;
        }
 
        /* *latestRemovedXid may still be invalid at end */
@@ -4158,8 +4158,8 @@ log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
        recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_NEWPAGE, rdata);
 
        /*
-        * The page may be uninitialized. If so, we can't set the LSN
-        * and TLI because that would corrupt the page.
+        * The page may be uninitialized. If so, we can't set the LSN and TLI
+        * because that would corrupt the page.
         */
        if (!PageIsNew(page))
        {
@@ -4352,8 +4352,8 @@ heap_xlog_newpage(XLogRecPtr lsn, XLogRecord *record)
        memcpy(page, (char *) xlrec + SizeOfHeapNewpage, BLCKSZ);
 
        /*
-        * The page may be uninitialized. If so, we can't set the LSN
-        * and TLI because that would corrupt the page.
+        * The page may be uninitialized. If so, we can't set the LSN and TLI
+        * because that would corrupt the page.
         */
        if (!PageIsNew(page))
        {
index 28499925281ceec223186efb257186523d6c601e..72a69e52b02878aba076a8b9f15d70149505aed0 100644 (file)
@@ -150,7 +150,7 @@ ReadBufferBI(Relation relation, BlockNumber targetBlock,
 Buffer
 RelationGetBufferForTuple(Relation relation, Size len,
                                                  Buffer otherBuffer, int options,
-                                                 struct BulkInsertStateData *bistate)
+                                                 struct BulkInsertStateData * bistate)
 {
        bool            use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
        Buffer          buffer = InvalidBuffer;
index c710f1d316e759e6bcbf091f44e89233799a817b..e56140950afc96f3b90843ec19ae2f7231ce36d1 100644 (file)
@@ -131,7 +131,7 @@ typedef struct RewriteStateData
                                                                 * them */
        HTAB       *rs_unresolved_tups;         /* unmatched A tuples */
        HTAB       *rs_old_new_tid_map;         /* unmatched B tuples */
-} RewriteStateData;
+}      RewriteStateData;
 
 /*
  * The lookup keys for the hash tables are tuple TID and xmin (we must check
@@ -277,7 +277,7 @@ end_heap_rewrite(RewriteState state)
        }
 
        /*
-        * If the rel is WAL-logged, must fsync before commit.  We use heap_sync
+        * If the rel is WAL-logged, must fsync before commit.  We use heap_sync
         * to ensure that the toast table gets fsync'd too.
         *
         * It's obvious that we must do this when not WAL-logging. It's less
index 88f73e8241e5c8a521e9f9209c49f8041f9336d4..66af2c37c54f8f0d33145db2d5b84bb67e17b4a8 100644 (file)
@@ -872,7 +872,7 @@ index_getprocinfo(Relation irel,
                                 procnum, attnum, RelationGetRelationName(irel));
 
                fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
-               fmgr_info_set_collation(irel->rd_indcollation[attnum-1], locinfo);
+               fmgr_info_set_collation(irel->rd_indcollation[attnum - 1], locinfo);
        }
 
        return locinfo;
index 0dd745f19a4c7cfeff3ffb7faba277a1ea635ad0..219f94fd0dd92c43a48b255da8deeb915000506d 100644 (file)
@@ -179,8 +179,8 @@ top:
                 * The only conflict predicate locking cares about for indexes is when
                 * an index tuple insert conflicts with an existing lock.  Since the
                 * actual location of the insert is hard to predict because of the
-                * random search used to prevent O(N^2) performance when there are many
-                * duplicate entries, we can just use the "first valid" page.
+                * random search used to prevent O(N^2) performance when there are
+                * many duplicate entries, we can just use the "first valid" page.
                 */
                CheckForSerializableConflictIn(rel, NULL, buf);
                /* do the insertion */
@@ -915,13 +915,13 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
        /*
         * origpage is the original page to be split.  leftpage is a temporary
         * buffer that receives the left-sibling data, which will be copied back
-        * into origpage on success.  rightpage is the new page that receives
-        * the right-sibling data.  If we fail before reaching the critical
-        * section, origpage hasn't been modified and leftpage is only workspace.
-        * In principle we shouldn't need to worry about rightpage either,
-        * because it hasn't been linked into the btree page structure; but to
-        * avoid leaving possibly-confusing junk behind, we are careful to rewrite
-        * rightpage as zeroes before throwing any error.
+        * into origpage on success.  rightpage is the new page that receives the
+        * right-sibling data.  If we fail before reaching the critical section,
+        * origpage hasn't been modified and leftpage is only workspace. In
+        * principle we shouldn't need to worry about rightpage either, because it
+        * hasn't been linked into the btree page structure; but to avoid leaving
+        * possibly-confusing junk behind, we are careful to rewrite rightpage as
+        * zeroes before throwing any error.
         */
        origpage = BufferGetPage(buf);
        leftpage = PageGetTempPage(origpage);
@@ -1118,7 +1118,7 @@ _bt_split(Relation rel, Buffer buf, OffsetNumber firstright,
                {
                        memset(rightpage, 0, BufferGetPageSize(rbuf));
                        elog(ERROR, "right sibling's left-link doesn't match: "
-                                "block %u links to %u instead of expected %u in index \"%s\"",
+                          "block %u links to %u instead of expected %u in index \"%s\"",
                                 oopaque->btpo_next, sopaque->btpo_prev, origpagenumber,
                                 RelationGetRelationName(rel));
                }
index 27964455f7cac5fd402a1fb57e0d8d1b3c2bc3f4..2477736281bcefb1702f84b878b7619edb2ab57f 100644 (file)
@@ -1268,9 +1268,9 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
 
        /*
         * Check that the parent-page index items we're about to delete/overwrite
-        * contain what we expect.  This can fail if the index has become
-        * corrupt for some reason.  We want to throw any error before entering
-        * the critical section --- otherwise it'd be a PANIC.
+        * contain what we expect.      This can fail if the index has become corrupt
+        * for some reason.  We want to throw any error before entering the
+        * critical section --- otherwise it'd be a PANIC.
         *
         * The test on the target item is just an Assert because _bt_getstackbuf
         * should have guaranteed it has the expected contents.  The test on the
index 7a0e1a9c25ea2423f1322a174b7cb6e7dd1bbb69..6a7ddd7db4de414f011f7b855fd3f47a6bf97b45 100644 (file)
@@ -220,7 +220,7 @@ btbuildempty(PG_FUNCTION_ARGS)
        metapage = (Page) palloc(BLCKSZ);
        _bt_initmetapage(metapage, P_NONE, 0);
 
-       /* Write the page.  If archiving/streaming, XLOG it. */
+       /* Write the page.      If archiving/streaming, XLOG it. */
        smgrwrite(index->rd_smgr, INIT_FORKNUM, BTREE_METAPAGE,
                          (char *) metapage, true);
        if (XLogIsNeeded())
@@ -403,6 +403,7 @@ btrescan(PG_FUNCTION_ARGS)
 {
        IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
        ScanKey         scankey = (ScanKey) PG_GETARG_POINTER(1);
+
        /* remaining arguments are ignored */
        BTScanOpaque so = (BTScanOpaque) scan->opaque;
 
index cb78a1bae16d75253feaea855c836459abfe4a18..91f8cadea52b1bca1474a05f02f17655bb1685a9 100644 (file)
@@ -65,7 +65,7 @@ _bt_search(Relation rel, int keysz, ScanKey scankey, bool nextkey,
        /* If index is empty and access = BT_READ, no root page is created. */
        if (!BufferIsValid(*bufP))
        {
-               PredicateLockRelation(rel);  /* Nothing finer to lock exists. */
+               PredicateLockRelation(rel);             /* Nothing finer to lock exists. */
                return (BTStack) NULL;
        }
 
@@ -1364,7 +1364,7 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
        if (!BufferIsValid(buf))
        {
                /* empty index... */
-               PredicateLockRelation(rel);  /* Nothing finer to lock exists. */
+               PredicateLockRelation(rel);             /* Nothing finer to lock exists. */
                return InvalidBuffer;
        }
 
@@ -1444,7 +1444,7 @@ _bt_endpoint(IndexScanDesc scan, ScanDirection dir)
        if (!BufferIsValid(buf))
        {
                /* empty index... */
-               PredicateLockRelation(rel);  /* Nothing finer to lock exists. */
+               PredicateLockRelation(rel);             /* Nothing finer to lock exists. */
                so->currPos.buf = InvalidBuffer;
                return false;
        }
index fd0e86a6aa3a19df28d523c3cacf8640763fb3bd..256a7f9f98f9aae57b69b61c717ca4e404af788b 100644 (file)
@@ -799,7 +799,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
 
        /*
         * If the index is WAL-logged, we must fsync it down to disk before it's
-        * safe to commit the transaction.  (For a non-WAL-logged index we don't
+        * safe to commit the transaction.      (For a non-WAL-logged index we don't
         * care since the index will be uninteresting after a crash anyway.)
         *
         * It's obvious that we must do this when not WAL-logging the build. It's
index add932d9428180d68a0962ab1005d8a3788df9d7..d448ba6a502845e1f9d4d9ff75e1f0eb86768781 100644 (file)
@@ -70,8 +70,8 @@ _bt_mkscankey(Relation rel, IndexTuple itup)
 
                /*
                 * We can use the cached (default) support procs since no cross-type
-                * comparison can be needed.  The cached support proc entries have
-                * the right collation for the index, too.
+                * comparison can be needed.  The cached support proc entries have the
+                * right collation for the index, too.
                 */
                procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
                arg = index_getattr(itup, i + 1, itupdesc, &null);
@@ -120,8 +120,8 @@ _bt_mkscankey_nodata(Relation rel)
 
                /*
                 * We can use the cached (default) support procs since no cross-type
-                * comparison can be needed.  The cached support proc entries have
-                * the right collation for the index, too.
+                * comparison can be needed.  The cached support proc entries have the
+                * right collation for the index, too.
                 */
                procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
                flags = SK_ISNULL | (indoption[i] << SK_BT_INDOPTION_SHIFT);
index 729c7b72e0f110fbeffa4cd5169f61c617c4d121..281268120ef254a0a925763cea9da48ddb9f598c 100644 (file)
@@ -120,7 +120,7 @@ typedef struct GlobalTransactionData
        TransactionId locking_xid;      /* top-level XID of backend working on xact */
        bool            valid;                  /* TRUE if fully prepared */
        char            gid[GIDSIZE];   /* The GID assigned to the prepared xact */
-} GlobalTransactionData;
+}      GlobalTransactionData;
 
 /*
  * Two Phase Commit shared state.  Access to this struct is protected
@@ -1029,8 +1029,8 @@ EndPrepare(GlobalTransaction gxact)
        /* If we crash now, we have prepared: WAL replay will fix things */
 
        /*
-        * Wake up all walsenders to send WAL up to the PREPARE record
-        * immediately if replication is enabled
+        * Wake up all walsenders to send WAL up to the PREPARE record immediately
+        * if replication is enabled
         */
        if (max_wal_senders > 0)
                WalSndWakeup();
@@ -2043,8 +2043,8 @@ RecordTransactionCommitPrepared(TransactionId xid,
        /*
         * Wait for synchronous replication, if required.
         *
-        * Note that at this stage we have marked clog, but still show as
-        * running in the procarray and continue to hold locks.
+        * Note that at this stage we have marked clog, but still show as running
+        * in the procarray and continue to hold locks.
         */
        SyncRepWaitForLSN(recptr);
 }
@@ -2130,8 +2130,8 @@ RecordTransactionAbortPrepared(TransactionId xid,
        /*
         * Wait for synchronous replication, if required.
         *
-        * Note that at this stage we have marked clog, but still show as
-        * running in the procarray and continue to hold locks.
+        * Note that at this stage we have marked clog, but still show as running
+        * in the procarray and continue to hold locks.
         */
        SyncRepWaitForLSN(recptr);
 }
index a828b3de48fb523c4f774929c417853597eb4758..500335bd6ffb398e4db82807a1a7b76d7cde260a 100644 (file)
@@ -355,9 +355,9 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
                char       *oldest_datname;
 
                /*
-                * We can be called when not inside a transaction, for example
-                * during StartupXLOG().  In such a case we cannot do database
-                * access, so we must just report the oldest DB's OID.
+                * We can be called when not inside a transaction, for example during
+                * StartupXLOG().  In such a case we cannot do database access, so we
+                * must just report the oldest DB's OID.
                 *
                 * Note: it's also possible that get_database_name fails and returns
                 * NULL, for example because the database just got dropped.  We'll
index 55aee8791014e343e7ddf459e386399cee070ef3..8a4c4eccd7398f8e833a81f437a9f8fb09937127 100644 (file)
@@ -420,11 +420,11 @@ AssignTransactionId(TransactionState s)
         */
        if (isSubXact && !TransactionIdIsValid(s->parent->transactionId))
        {
-               TransactionState        p = s->parent;
-               TransactionState   *parents;
-               size_t  parentOffset = 0;
+               TransactionState p = s->parent;
+               TransactionState *parents;
+               size_t          parentOffset = 0;
 
-               parents = palloc(sizeof(TransactionState) *  s->nestingLevel);
+               parents = palloc(sizeof(TransactionState) * s->nestingLevel);
                while (p != NULL && !TransactionIdIsValid(p->transactionId))
                {
                        parents[parentOffset++] = p;
@@ -432,8 +432,8 @@ AssignTransactionId(TransactionState s)
                }
 
                /*
-                * This is technically a recursive call, but the recursion will
-                * never be more than one layer deep.
+                * This is technically a recursive call, but the recursion will never
+                * be more than one layer deep.
                 */
                while (parentOffset != 0)
                        AssignTransactionId(parents[--parentOffset]);
@@ -1037,16 +1037,17 @@ RecordTransactionCommit(void)
        /*
         * Check if we want to commit asynchronously.  We can allow the XLOG flush
         * to happen asynchronously if synchronous_commit=off, or if the current
-        * transaction has not performed any WAL-logged operation.  The latter case
-        * can arise if the current transaction wrote only to temporary and/or
-        * unlogged tables.  In case of a crash, the loss of such a transaction
-        * will be irrelevant since temp tables will be lost anyway, and unlogged
-        * tables will be truncated.  (Given the foregoing, you might think that it
-        * would be unnecessary to emit the XLOG record at all in this case, but we
-        * don't currently try to do that.  It would certainly cause problems at
-        * least in Hot Standby mode, where the KnownAssignedXids machinery
-        * requires tracking every XID assignment.  It might be OK to skip it only
-        * when wal_level < hot_standby, but for now we don't.)
+        * transaction has not performed any WAL-logged operation.      The latter
+        * case can arise if the current transaction wrote only to temporary
+        * and/or unlogged tables.      In case of a crash, the loss of such a
+        * transaction will be irrelevant since temp tables will be lost anyway,
+        * and unlogged tables will be truncated.  (Given the foregoing, you might
+        * think that it would be unnecessary to emit the XLOG record at all in
+        * this case, but we don't currently try to do that.  It would certainly
+        * cause problems at least in Hot Standby mode, where the
+        * KnownAssignedXids machinery requires tracking every XID assignment.  It
+        * might be OK to skip it only when wal_level < hot_standby, but for now
+        * we don't.)
         *
         * However, if we're doing cleanup of any non-temp rels or committing any
         * command that wanted to force sync commit, then we must flush XLOG
@@ -1130,8 +1131,8 @@ RecordTransactionCommit(void)
        /*
         * Wait for synchronous replication, if required.
         *
-        * Note that at this stage we have marked clog, but still show as
-        * running in the procarray and continue to hold locks.
+        * Note that at this stage we have marked clog, but still show as running
+        * in the procarray and continue to hold locks.
         */
        SyncRepWaitForLSN(XactLastRecEnd);
 
@@ -1785,10 +1786,10 @@ CommitTransaction(void)
        }
 
        /*
-        * The remaining actions cannot call any user-defined code, so it's
-        * safe to start shutting down within-transaction services.  But note
-        * that most of this stuff could still throw an error, which would
-        * switch us into the transaction-abort path.
+        * The remaining actions cannot call any user-defined code, so it's safe
+        * to start shutting down within-transaction services.  But note that most
+        * of this stuff could still throw an error, which would switch us into
+        * the transaction-abort path.
         */
 
        /* Shut down the deferred-trigger manager */
@@ -1805,8 +1806,8 @@ CommitTransaction(void)
 
        /*
         * Mark serializable transaction as complete for predicate locking
-        * purposes.  This should be done as late as we can put it and still
-        * allow errors to be raised for failure patterns found at commit.
+        * purposes.  This should be done as late as we can put it and still allow
+        * errors to be raised for failure patterns found at commit.
         */
        PreCommit_CheckForSerializationFailure();
 
@@ -1988,10 +1989,10 @@ PrepareTransaction(void)
        }
 
        /*
-        * The remaining actions cannot call any user-defined code, so it's
-        * safe to start shutting down within-transaction services.  But note
-        * that most of this stuff could still throw an error, which would
-        * switch us into the transaction-abort path.
+        * The remaining actions cannot call any user-defined code, so it's safe
+        * to start shutting down within-transaction services.  But note that most
+        * of this stuff could still throw an error, which would switch us into
+        * the transaction-abort path.
         */
 
        /* Shut down the deferred-trigger manager */
@@ -2008,8 +2009,8 @@ PrepareTransaction(void)
 
        /*
         * Mark serializable transaction as complete for predicate locking
-        * purposes.  This should be done as late as we can put it and still
-        * allow errors to be raised for failure patterns found at commit.
+        * purposes.  This should be done as late as we can put it and still allow
+        * errors to be raised for failure patterns found at commit.
         */
        PreCommit_CheckForSerializationFailure();
 
index b31c79ebbdcb03660c81e5f83c55e107ee2b6fb2..9c45759661c4a3220e1e0bbb9929786c3b7cfd31 100644 (file)
@@ -64,7 +64,7 @@
 /* File path names (all relative to $PGDATA) */
 #define RECOVERY_COMMAND_FILE  "recovery.conf"
 #define RECOVERY_COMMAND_DONE  "recovery.done"
-#define PROMOTE_SIGNAL_FILE    "promote"
+#define PROMOTE_SIGNAL_FILE "promote"
 
 
 /* User-settable parameters */
@@ -160,6 +160,7 @@ static XLogRecPtr LastRec;
  * known, need to check the shared state".
  */
 static bool LocalRecoveryInProgress = true;
+
 /*
  * Local copy of SharedHotStandbyActive variable. False actually means "not
  * known, need to check the shared state".
@@ -355,10 +356,9 @@ typedef struct XLogCtlInsert
        /*
         * exclusiveBackup is true if a backup started with pg_start_backup() is
         * in progress, and nonExclusiveBackups is a counter indicating the number
-        * of streaming base backups currently in progress. forcePageWrites is
-        * set to true when either of these is non-zero. lastBackupStart is the
-        * latest checkpoint redo location used as a starting point for an online
-        * backup.
+        * of streaming base backups currently in progress. forcePageWrites is set
+        * to true when either of these is non-zero. lastBackupStart is the latest
+        * checkpoint redo location used as a starting point for an online backup.
         */
        bool            exclusiveBackup;
        int                     nonExclusiveBackups;
@@ -388,7 +388,7 @@ typedef struct XLogCtlData
        XLogwrtResult LogwrtResult;
        uint32          ckptXidEpoch;   /* nextXID & epoch of latest checkpoint */
        TransactionId ckptXid;
-       XLogRecPtr      asyncXactLSN; /* LSN of newest async commit/abort */
+       XLogRecPtr      asyncXactLSN;   /* LSN of newest async commit/abort */
        uint32          lastRemovedLog; /* latest removed/recycled XLOG segment */
        uint32          lastRemovedSeg;
 
@@ -425,9 +425,9 @@ typedef struct XLogCtlData
        bool            SharedHotStandbyActive;
 
        /*
-        * recoveryWakeupLatch is used to wake up the startup process to
-        * continue WAL replay, if it is waiting for WAL to arrive or failover
-        * trigger file to appear.
+        * recoveryWakeupLatch is used to wake up the startup process to continue
+        * WAL replay, if it is waiting for WAL to arrive or failover trigger file
+        * to appear.
         */
        Latch           recoveryWakeupLatch;
 
@@ -576,7 +576,7 @@ typedef struct xl_parameter_change
 /* logs restore point */
 typedef struct xl_restore_point
 {
-       TimestampTz     rp_time;
+       TimestampTz rp_time;
        char            rp_name[MAXFNAMELEN];
 } xl_restore_point;
 
@@ -4272,27 +4272,29 @@ existsTimeLineHistory(TimeLineID probeTLI)
 static bool
 rescanLatestTimeLine(void)
 {
-       TimeLineID newtarget;
+       TimeLineID      newtarget;
+
        newtarget = findNewestTimeLine(recoveryTargetTLI);
        if (newtarget != recoveryTargetTLI)
        {
                /*
                 * Determine the list of expected TLIs for the new TLI
                 */
-               List *newExpectedTLIs;
+               List       *newExpectedTLIs;
+
                newExpectedTLIs = readTimeLineHistory(newtarget);
 
                /*
-                * If the current timeline is not part of the history of the
-                * new timeline, we cannot proceed to it.
+                * If the current timeline is not part of the history of the new
+                * timeline, we cannot proceed to it.
                 *
                 * XXX This isn't foolproof: The new timeline might have forked from
                 * the current one, but before the current recovery location. In that
                 * case we will still switch to the new timeline and proceed replaying
                 * from it even though the history doesn't match what we already
                 * replayed. That's not good. We will likely notice at the next online
-                * checkpoint, as the TLI won't match what we expected, but it's
-                * not guaranteed. The admin needs to make sure that doesn't happen.
+                * checkpoint, as the TLI won't match what we expected, but it's not
+                * guaranteed. The admin needs to make sure that doesn't happen.
                 */
                if (!list_member_int(newExpectedTLIs,
                                                         (int) recoveryTargetTLI))
@@ -4480,7 +4482,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
                                 timestamptz_to_str(recoveryStopTime));
        else if (recoveryTarget == RECOVERY_TARGET_NAME)
                snprintf(buffer, sizeof(buffer),
-                               "%s%u\t%s\tat restore point \"%s\"\n",
+                                "%s%u\t%s\tat restore point \"%s\"\n",
                                 (srcfd < 0) ? "" : "\n",
                                 parentTLI,
                                 xlogfname,
@@ -4921,7 +4923,7 @@ check_wal_buffers(int *newval, void **extra, GucSource source)
        {
                /*
                 * If we haven't yet changed the boot_val default of -1, just let it
-                * be.  We'll fix it when XLOGShmemSize is called.
+                * be.  We'll fix it when XLOGShmemSize is called.
                 */
                if (XLOGbuffers == -1)
                        return true;
@@ -4954,8 +4956,8 @@ XLOGShmemSize(void)
        /*
         * If the value of wal_buffers is -1, use the preferred auto-tune value.
         * This isn't an amazingly clean place to do this, but we must wait till
-        * NBuffers has received its final value, and must do it before using
-        * the value of XLOGbuffers to do anything important.
+        * NBuffers has received its final value, and must do it before using the
+        * value of XLOGbuffers to do anything important.
         */
        if (XLOGbuffers == -1)
        {
@@ -5086,9 +5088,9 @@ BootStrapXLOG(void)
        /*
         * Set up information for the initial checkpoint record
         *
-        * The initial checkpoint record is written to the beginning of the
-        * WAL segment with logid=0 logseg=1. The very first WAL segment, 0/0, is
-        * not used, so that we can use 0/0 to mean "before any valid WAL segment".
+        * The initial checkpoint record is written to the beginning of the WAL
+        * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
+        * used, so that we can use 0/0 to mean "before any valid WAL segment".
         */
        checkPoint.redo.xlogid = 0;
        checkPoint.redo.xrecoff = XLogSegSize + SizeOfXLogLongPHD;
@@ -5219,8 +5221,8 @@ readRecoveryCommandFile(void)
        TimeLineID      rtli = 0;
        bool            rtliGiven = false;
        ConfigVariable *item,
-                                  *head = NULL,
-                                  *tail = NULL;
+                          *head = NULL,
+                          *tail = NULL;
 
        fd = AllocateFile(RECOVERY_COMMAND_FILE, "r");
        if (fd == NULL)
@@ -5236,7 +5238,7 @@ readRecoveryCommandFile(void)
        /*
         * Since we're asking ParseConfigFp() to error out at FATAL, there's no
         * need to check the return value.
-        */ 
+        */
        ParseConfigFp(fd, RECOVERY_COMMAND_FILE, 0, FATAL, &head, &tail);
 
        for (item = head; item; item = item->next)
@@ -5312,7 +5314,7 @@ readRecoveryCommandFile(void)
                         * this overrides recovery_target_time
                         */
                        if (recoveryTarget == RECOVERY_TARGET_XID ||
-                                       recoveryTarget == RECOVERY_TARGET_NAME)
+                               recoveryTarget == RECOVERY_TARGET_NAME)
                                continue;
                        recoveryTarget = RECOVERY_TARGET_TIME;
 
@@ -5321,7 +5323,7 @@ readRecoveryCommandFile(void)
                         */
                        recoveryTargetTime =
                                DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
-                                                                                                               CStringGetDatum(item->value),
+                                                                                               CStringGetDatum(item->value),
                                                                                                ObjectIdGetDatum(InvalidOid),
                                                                                                                Int32GetDatum(-1)));
                        ereport(DEBUG2,
@@ -5610,8 +5612,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
        if (recoveryTarget == RECOVERY_TARGET_UNSET)
        {
                /*
-                * Save timestamp of latest transaction commit/abort if this is
-                * transaction record
+                * Save timestamp of latest transaction commit/abort if this is a
+                * transaction record
                 */
                if (record->xl_rmid == RM_XACT_ID)
                        SetLatestXTime(recordXtime);
@@ -5636,8 +5638,8 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
        else if (recoveryTarget == RECOVERY_TARGET_NAME)
        {
                /*
-                * There can be many restore points that share the same name, so we stop
-                * at the first one
+                * There can be many restore points that share the same name, so we
+                * stop at the first one
                 */
                stopsHere = (strcmp(recordRPName, recoveryTargetName) == 0);
 
@@ -5699,14 +5701,14 @@ recoveryStopsHere(XLogRecord *record, bool *includeThis)
                        strncpy(recoveryStopName, recordRPName, MAXFNAMELEN);
 
                        ereport(LOG,
-                                       (errmsg("recovery stopping at restore point \"%s\", time %s",
-                                                               recoveryStopName,
-                                                               timestamptz_to_str(recoveryStopTime))));
+                               (errmsg("recovery stopping at restore point \"%s\", time %s",
+                                               recoveryStopName,
+                                               timestamptz_to_str(recoveryStopTime))));
                }
 
                /*
-                * Note that if we use a RECOVERY_TARGET_TIME then we can stop
-                * at a restore point since they are timestamped, though the latest
+                * Note that if we use a RECOVERY_TARGET_TIME then we can stop at a
+                * restore point since they are timestamped, though the latest
                 * transaction time is not updated.
                 */
                if (record->xl_rmid == RM_XACT_ID && recoveryStopAfter)
@@ -5732,7 +5734,7 @@ recoveryPausesHere(void)
 
        while (RecoveryIsPaused())
        {
-               pg_usleep(1000000L);            /* 1000 ms */
+               pg_usleep(1000000L);    /* 1000 ms */
                HandleStartupProcInterrupts();
        }
 }
@@ -5742,7 +5744,7 @@ RecoveryIsPaused(void)
 {
        /* use volatile pointer to prevent code rearrangement */
        volatile XLogCtlData *xlogctl = XLogCtl;
-       bool recoveryPause;
+       bool            recoveryPause;
 
        SpinLockAcquire(&xlogctl->info_lck);
        recoveryPause = xlogctl->recoveryPause;
@@ -5771,7 +5773,7 @@ pg_xlog_replay_pause(PG_FUNCTION_ARGS)
        if (!superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                        (errmsg("must be superuser to control recovery"))));
+                                (errmsg("must be superuser to control recovery"))));
 
        if (!RecoveryInProgress())
                ereport(ERROR,
@@ -5793,7 +5795,7 @@ pg_xlog_replay_resume(PG_FUNCTION_ARGS)
        if (!superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                        (errmsg("must be superuser to control recovery"))));
+                                (errmsg("must be superuser to control recovery"))));
 
        if (!RecoveryInProgress())
                ereport(ERROR,
@@ -5815,7 +5817,7 @@ pg_is_xlog_replay_paused(PG_FUNCTION_ARGS)
        if (!superuser())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                        (errmsg("must be superuser to control recovery"))));
+                                (errmsg("must be superuser to control recovery"))));
 
        if (!RecoveryInProgress())
                ereport(ERROR,
@@ -5870,7 +5872,7 @@ GetLatestXTime(void)
 Datum
 pg_last_xact_replay_timestamp(PG_FUNCTION_ARGS)
 {
-       TimestampTz     xtime;
+       TimestampTz xtime;
 
        xtime = GetLatestXTime();
        if (xtime == 0)
@@ -6132,10 +6134,10 @@ StartupXLOG(void)
                        InRecovery = true;      /* force recovery even if SHUTDOWNED */
 
                        /*
-                        * Make sure that REDO location exists. This may not be
-                        * the case if there was a crash during an online backup,
-                        * which left a backup_label around that references a WAL
-                        * segment that's already been archived.
+                        * Make sure that REDO location exists. This may not be the case
+                        * if there was a crash during an online backup, which left a
+                        * backup_label around that references a WAL segment that's
+                        * already been archived.
                         */
                        if (XLByteLT(checkPoint.redo, checkPointLoc))
                        {
@@ -6150,7 +6152,7 @@ StartupXLOG(void)
                        ereport(FATAL,
                                        (errmsg("could not locate required checkpoint record"),
                                         errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
-                       wasShutdown = false; /* keep compiler quiet */
+                       wasShutdown = false;    /* keep compiler quiet */
                }
                /* set flag to delete it later */
                haveBackupLabel = true;
@@ -6330,9 +6332,9 @@ StartupXLOG(void)
 
                /*
                 * We're in recovery, so unlogged relations relations may be trashed
-                * and must be reset.  This should be done BEFORE allowing Hot
-                * Standby connections, so that read-only backends don't try to
-                * read whatever garbage is left over from before.
+                * and must be reset.  This should be done BEFORE allowing Hot Standby
+                * connections, so that read-only backends don't try to read whatever
+                * garbage is left over from before.
                 */
                ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
 
@@ -6517,7 +6519,8 @@ StartupXLOG(void)
                                if (recoveryStopsHere(record, &recoveryApply))
                                {
                                        /*
-                                        * Pause only if users can connect to send a resume message
+                                        * Pause only if users can connect to send a resume
+                                        * message
                                         */
                                        if (recoveryPauseAtTarget && standbyState == STANDBY_SNAPSHOT_READY)
                                        {
@@ -7003,8 +7006,8 @@ HotStandbyActive(void)
 {
        /*
         * We check shared state each time only until Hot Standby is active. We
-        * can't de-activate Hot Standby, so there's no need to keep checking after
-        * the shared variable has once been seen true.
+        * can't de-activate Hot Standby, so there's no need to keep checking
+        * after the shared variable has once been seen true.
         */
        if (LocalHotStandbyActive)
                return true;
@@ -7429,14 +7432,14 @@ LogCheckpointEnd(bool restartpoint)
         */
        longest_secs = (long) (CheckpointStats.ckpt_longest_sync / 1000000);
        longest_usecs = CheckpointStats.ckpt_longest_sync -
-               (uint64) longest_secs * 1000000;
+               (uint64) longest_secs *1000000;
 
        average_sync_time = 0;
-       if (CheckpointStats.ckpt_sync_rels > 0) 
+       if (CheckpointStats.ckpt_sync_rels > 0)
                average_sync_time = CheckpointStats.ckpt_agg_sync_time /
                        CheckpointStats.ckpt_sync_rels;
        average_secs = (long) (average_sync_time / 1000000);
-       average_usecs = average_sync_time - (uint64) average_secs * 1000000;
+       average_usecs = average_sync_time - (uint64) average_secs *1000000;
 
        if (restartpoint)
                elog(LOG, "restartpoint complete: wrote %d buffers (%.1f%%); "
@@ -8241,9 +8244,9 @@ RequestXLogSwitch(void)
 XLogRecPtr
 XLogRestorePoint(const char *rpName)
 {
-       XLogRecPtr                              RecPtr;
-       XLogRecData                             rdata;
-       xl_restore_point                xlrec;
+       XLogRecPtr      RecPtr;
+       XLogRecData rdata;
+       xl_restore_point xlrec;
 
        xlrec.rp_time = GetCurrentTimestamp();
        strncpy(xlrec.rp_name, rpName, MAXFNAMELEN);
@@ -8257,7 +8260,7 @@ XLogRestorePoint(const char *rpName)
 
        ereport(LOG,
                        (errmsg("restore point \"%s\" created at %X/%X",
-                                       rpName, RecPtr.xlogid, RecPtr.xrecoff)));
+                                       rpName, RecPtr.xlogid, RecPtr.xrecoff)));
 
        return RecPtr;
 }
@@ -8643,7 +8646,7 @@ get_sync_bit(int method)
 
        /*
         * Optimize writes by bypassing kernel cache with O_DIRECT when using
-        * O_SYNC/O_FSYNC and O_DSYNC.  But only if archiving and streaming are
+        * O_SYNC/O_FSYNC and O_DSYNC.  But only if archiving and streaming are
         * disabled, otherwise the archive command or walsender process will read
         * the WAL soon after writing it, which is guaranteed to cause a physical
         * read if we bypassed the kernel cache. We also skip the
@@ -8775,7 +8778,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
        text       *backupid = PG_GETARG_TEXT_P(0);
        bool            fast = PG_GETARG_BOOL(1);
        char       *backupidstr;
-       XLogRecPtr  startpoint;
+       XLogRecPtr      startpoint;
        char            startxlogstr[MAXFNAMELEN];
 
        backupidstr = text_to_cstring(backupid);
@@ -8791,7 +8794,7 @@ pg_start_backup(PG_FUNCTION_ARGS)
  * do_pg_start_backup is the workhorse of the user-visible pg_start_backup()
  * function. It creates the necessary starting checkpoint and constructs the
  * backup label file.
- * 
+ *
  * There are two kind of backups: exclusive and non-exclusive. An exclusive
  * backup is started with pg_start_backup(), and there can be only one active
  * at a time. The backup label file of an exclusive backup is written to
@@ -8826,7 +8829,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
        if (!superuser() && !is_authenticated_user_replication_role())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                errmsg("must be superuser or replication role to run a backup")));
+                  errmsg("must be superuser or replication role to run a backup")));
 
        if (RecoveryInProgress())
                ereport(ERROR,
@@ -8897,25 +8900,27 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
        /* Ensure we release forcePageWrites if fail below */
        PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive));
        {
-               bool gotUniqueStartpoint = false;
+               bool            gotUniqueStartpoint = false;
+
                do
                {
                        /*
                         * Force a CHECKPOINT.  Aside from being necessary to prevent torn
-                        * page problems, this guarantees that two successive backup runs will
-                        * have different checkpoint positions and hence different history
-                        * file names, even if nothing happened in between.
+                        * page problems, this guarantees that two successive backup runs
+                        * will have different checkpoint positions and hence different
+                        * history file names, even if nothing happened in between.
                         *
-                        * We use CHECKPOINT_IMMEDIATE only if requested by user (via passing
-                        * fast = true).  Otherwise this can take awhile.
+                        * We use CHECKPOINT_IMMEDIATE only if requested by user (via
+                        * passing fast = true).  Otherwise this can take awhile.
                         */
                        RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
                                                          (fast ? CHECKPOINT_IMMEDIATE : 0));
 
                        /*
-                        * Now we need to fetch the checkpoint record location, and also its
-                        * REDO pointer.  The oldest point in WAL that would be needed to
-                        * restore starting from the checkpoint is precisely the REDO pointer.
+                        * Now we need to fetch the checkpoint record location, and also
+                        * its REDO pointer.  The oldest point in WAL that would be needed
+                        * to restore starting from the checkpoint is precisely the REDO
+                        * pointer.
                         */
                        LWLockAcquire(ControlFileLock, LW_SHARED);
                        checkpointloc = ControlFile->checkPoint;
@@ -8923,16 +8928,15 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                        LWLockRelease(ControlFileLock);
 
                        /*
-                        * If two base backups are started at the same time (in WAL
-                        * sender processes), we need to make sure that they use
-                        * different checkpoints as starting locations, because we use
-                        * the starting WAL location as a unique identifier for the base
-                        * backup in the end-of-backup WAL record and when we write the
-                        * backup history file. Perhaps it would be better generate a
-                        * separate unique ID for each backup instead of forcing another
-                        * checkpoint, but taking a checkpoint right after another is
-                        * not that expensive either because only few buffers have been
-                        * dirtied yet.
+                        * If two base backups are started at the same time (in WAL sender
+                        * processes), we need to make sure that they use different
+                        * checkpoints as starting locations, because we use the starting
+                        * WAL location as a unique identifier for the base backup in the
+                        * end-of-backup WAL record and when we write the backup history
+                        * file. Perhaps it would be better generate a separate unique ID
+                        * for each backup instead of forcing another checkpoint, but
+                        * taking a checkpoint right after another is not that expensive
+                        * either because only few buffers have been dirtied yet.
                         */
                        LWLockAcquire(WALInsertLock, LW_SHARED);
                        if (XLByteLT(XLogCtl->Insert.lastBackupStart, startpoint))
@@ -8941,13 +8945,13 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                                gotUniqueStartpoint = true;
                        }
                        LWLockRelease(WALInsertLock);
-               } while(!gotUniqueStartpoint);
+               } while (!gotUniqueStartpoint);
 
                XLByteToSeg(startpoint, _logId, _logSeg);
                XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg);
 
                /*
-                * Construct backup label file 
+                * Construct backup label file
                 */
                initStringInfo(&labelfbuf);
 
@@ -8970,8 +8974,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                {
                        /*
                         * Check for existing backup label --- implies a backup is already
-                        * running.  (XXX given that we checked exclusiveBackup above, maybe
-                        * it would be OK to just unlink any such label file?)
+                        * running.  (XXX given that we checked exclusiveBackup above,
+                        * maybe it would be OK to just unlink any such label file?)
                         */
                        if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
                        {
@@ -9018,7 +9022,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
 static void
 pg_start_backup_callback(int code, Datum arg)
 {
-       bool exclusive = DatumGetBool(arg);
+       bool            exclusive = DatumGetBool(arg);
 
        /* Update backup counters and forcePageWrites on failure */
        LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
@@ -9101,7 +9105,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
        if (!superuser() && !is_authenticated_user_replication_role())
                ereport(ERROR,
                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                (errmsg("must be superuser or replication role to run a backup"))));
+                (errmsg("must be superuser or replication role to run a backup"))));
 
        if (RecoveryInProgress())
                ereport(ERROR,
@@ -9145,8 +9149,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
                /*
                 * Read the existing label file into memory.
                 */
-               struct  stat statbuf;
-               int             r;
+               struct stat statbuf;
+               int                     r;
 
                if (stat(BACKUP_LABEL_FILE, &statbuf))
                {
@@ -9197,7 +9201,7 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
-       remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
+       remaining = strchr(labelfile, '\n') + 1;        /* %n is not portable enough */
 
        /*
         * Write the backup-end xlog record
@@ -9388,8 +9392,8 @@ pg_switch_xlog(PG_FUNCTION_ARGS)
 Datum
 pg_create_restore_point(PG_FUNCTION_ARGS)
 {
-       text            *restore_name = PG_GETARG_TEXT_P(0);
-       char            *restore_name_str;
+       text       *restore_name = PG_GETARG_TEXT_P(0);
+       char       *restore_name_str;
        XLogRecPtr      restorepoint;
        char            location[MAXFNAMELEN];
 
@@ -9407,7 +9411,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS)
        if (!XLogIsNeeded())
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                         errmsg("WAL level not sufficient for creating a restore point"),
+                        errmsg("WAL level not sufficient for creating a restore point"),
                                 errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
 
        restore_name_str = text_to_cstring(restore_name);
@@ -9423,7 +9427,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS)
         * As a convenience, return the WAL location of the restore point record
         */
        snprintf(location, sizeof(location), "%X/%X",
-                       restorepoint.xlogid, restorepoint.xrecoff);
+                        restorepoint.xlogid, restorepoint.xrecoff);
        PG_RETURN_TEXT_P(cstring_to_text(location));
 }
 
@@ -10177,8 +10181,8 @@ retry:
                                                }
 
                                                /*
-                                                * If it hasn't been long since last attempt, sleep
-                                                * to avoid busy-waiting.
+                                                * If it hasn't been long since last attempt, sleep to
+                                                * avoid busy-waiting.
                                                 */
                                                now = (pg_time_t) time(NULL);
                                                if ((now - last_fail_time) < 5)
@@ -10404,7 +10408,7 @@ static bool
 CheckForStandbyTrigger(void)
 {
        struct stat stat_buf;
-       static bool     triggered = false;
+       static bool triggered = false;
 
        if (triggered)
                return true;
@@ -10446,8 +10450,8 @@ CheckPromoteSignal(void)
        if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
        {
                /*
-                * Since we are in a signal handler, it's not safe
-                * to elog. We silently ignore any error from unlink.
+                * Since we are in a signal handler, it's not safe to elog. We
+                * silently ignore any error from unlink.
                 */
                unlink(PROMOTE_SIGNAL_FILE);
                return true;
index aa3d59d4c9a31fbdebd07e1e62785f9f8d22d30d..693b6343984bbe5f5573f2c6e9e9c28a30c5210b 100644 (file)
@@ -1011,8 +1011,8 @@ SetDefaultACLsInSchemas(InternalDefaultACL *iacls, List *nspnames)
 
                        /*
                         * Note that we must do the permissions check against the target
-                        * role not the calling user.  We require CREATE privileges,
-                        * since without CREATE you won't be able to do anything using the
+                        * role not the calling user.  We require CREATE privileges, since
+                        * without CREATE you won't be able to do anything using the
                         * default privs anyway.
                         */
                        iacls->nspid = get_namespace_oid(nspname, false);
@@ -1707,7 +1707,7 @@ ExecGrant_Relation(InternalGrant *istmt)
                        pg_class_tuple->relkind != RELKIND_FOREIGN_TABLE)
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                       errmsg("\"%s\" is not a foreign table",
+                                        errmsg("\"%s\" is not a foreign table",
                                                        NameStr(pg_class_tuple->relname))));
 
                /* Adjust the default permissions based on object type */
@@ -1964,13 +1964,13 @@ ExecGrant_Relation(InternalGrant *istmt)
                                this_privileges &= (AclMode) ACL_SELECT;
                        }
                        else if (pg_class_tuple->relkind == RELKIND_FOREIGN_TABLE &&
-                               this_privileges & ~((AclMode) ACL_SELECT))
+                                        this_privileges & ~((AclMode) ACL_SELECT))
                        {
                                /* Foreign tables have the same restriction as sequences. */
                                ereport(WARNING,
-                                       (errcode(ERRCODE_INVALID_GRANT_OPERATION),
-                                        errmsg("foreign table \"%s\" only supports SELECT column privileges",
-                                                       NameStr(pg_class_tuple->relname))));
+                                               (errcode(ERRCODE_INVALID_GRANT_OPERATION),
+                                                errmsg("foreign table \"%s\" only supports SELECT column privileges",
+                                                               NameStr(pg_class_tuple->relname))));
                                this_privileges &= (AclMode) ACL_SELECT;
                        }
 
@@ -4768,7 +4768,7 @@ pg_extension_ownercheck(Oid ext_oid, Oid roleid)
  * Note: roles do not have owners per se; instead we use this test in
  * places where an ownership-like permissions test is needed for a role.
  * Be sure to apply it to the role trying to do the operation, not the
- * role being operated on!  Also note that this generally should not be
+ * role being operated on!     Also note that this generally should not be
  * considered enough privilege if the target role is a superuser.
  * (We don't handle that consideration here because we want to give a
  * separate error message for such cases, so the caller has to deal with it.)
index 12935754bc191f33d6219e213f2e19c1af04b0af..cbce0072de190e5c79d101cd1f1b1995b9ee2220 100644 (file)
@@ -80,11 +80,11 @@ forkname_to_number(char *forkName)
 
 /*
  * forkname_chars
- *             We use this to figure out whether a filename could be a relation
- *             fork (as opposed to an oddly named stray file that somehow ended
- *             up in the database directory).  If the passed string begins with
- *             a fork name (other than the main fork name), we return its length,
- *         and set *fork (if not NULL) to the fork number.  If not, we return 0.
+ *             We use this to figure out whether a filename could be a relation
+ *             fork (as opposed to an oddly named stray file that somehow ended
+ *             up in the database directory).  If the passed string begins with
+ *             a fork name (other than the main fork name), we return its length,
+ *             and set *fork (if not NULL) to the fork number.  If not, we return 0.
  *
  * Note that the present coding assumes that there are no fork names which
  * are prefixes of other fork names.
@@ -96,7 +96,8 @@ forkname_chars(const char *str, ForkNumber *fork)
 
        for (forkNum = 1; forkNum <= MAX_FORKNUM; forkNum++)
        {
-               int len = strlen(forkNames[forkNum]);
+               int                     len = strlen(forkNames[forkNum]);
+
                if (strncmp(forkNames[forkNum], str, len) == 0)
                {
                        if (fork)
@@ -150,7 +151,7 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
                {
                        /* OIDCHARS will suffice for an integer, too */
                        pathlen = 5 + OIDCHARS + 2 + OIDCHARS + 1 + OIDCHARS + 1
-                                       + FORKNAMECHARS + 1;
+                               + FORKNAMECHARS + 1;
                        path = (char *) palloc(pathlen);
                        if (forknum != MAIN_FORKNUM)
                                snprintf(path, pathlen, "base/%u/t%d_%u_%s",
@@ -167,8 +168,8 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
                if (backend == InvalidBackendId)
                {
                        pathlen = 9 + 1 + OIDCHARS + 1
-                                       + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1
-                                       + OIDCHARS + 1 + FORKNAMECHARS + 1;
+                               + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 1
+                               + OIDCHARS + 1 + FORKNAMECHARS + 1;
                        path = (char *) palloc(pathlen);
                        if (forknum != MAIN_FORKNUM)
                                snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/%u_%s",
@@ -184,8 +185,8 @@ relpathbackend(RelFileNode rnode, BackendId backend, ForkNumber forknum)
                {
                        /* OIDCHARS will suffice for an integer, too */
                        pathlen = 9 + 1 + OIDCHARS + 1
-                                       + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2
-                                       + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
+                               + strlen(TABLESPACE_VERSION_DIRECTORY) + 1 + OIDCHARS + 2
+                               + OIDCHARS + 1 + OIDCHARS + 1 + FORKNAMECHARS + 1;
                        path = (char *) palloc(pathlen);
                        if (forknum != MAIN_FORKNUM)
                                snprintf(path, pathlen, "pg_tblspc/%u/%s/%u/t%d_%u_%s",
index de24ef7a094c66f4cdd1e1549ec6f5006ca5b39e..ec9bb48c638946775a5d9fe9c6ca44997cc43c2d 100644 (file)
@@ -160,7 +160,7 @@ static const Oid object_classes[MAX_OCLASS] = {
        ForeignServerRelationId,        /* OCLASS_FOREIGN_SERVER */
        UserMappingRelationId,          /* OCLASS_USER_MAPPING */
        DefaultAclRelationId,           /* OCLASS_DEFACL */
-       ExtensionRelationId             /* OCLASS_EXTENSION */
+       ExtensionRelationId                     /* OCLASS_EXTENSION */
 };
 
 
@@ -1021,8 +1021,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel)
 
        /*
         * Delete any comments or security labels associated with this object.
-        * (This is a convenient place to do these things, rather than having every
-        * object type know to do it.)
+        * (This is a convenient place to do these things, rather than having
+        * every object type know to do it.)
         */
        DeleteComments(object->objectId, object->classId, object->objectSubId);
        DeleteSecurityLabel(object);
@@ -1263,7 +1263,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
  * whereas 'behavior' is used for everything else.
  *
  * NOTE: the caller should ensure that a whole-table dependency on the
- * specified relation is created separately, if one is needed.  In particular,
+ * specified relation is created separately, if one is needed. In particular,
  * a whole-row Var "relation.*" will not cause this routine to emit any
  * dependency item.  This is appropriate behavior for subexpressions of an
  * ordinary query, so other cases need to cope as necessary.
@@ -1383,7 +1383,7 @@ find_expr_references_walker(Node *node,
 
                /*
                 * A whole-row Var references no specific columns, so adds no new
-                * dependency.  (We assume that there is a whole-table dependency
+                * dependency.  (We assume that there is a whole-table dependency
                 * arising from each underlying rangetable entry.  While we could
                 * record such a dependency when finding a whole-row Var that
                 * references a relation directly, it's quite unclear how to extend
@@ -1431,8 +1431,8 @@ find_expr_references_walker(Node *node,
 
                /*
                 * We must also depend on the constant's collation: it could be
-                * different from the datatype's, if a CollateExpr was const-folded
-                * to a simple constant.  However we can save work in the most common
+                * different from the datatype's, if a CollateExpr was const-folded to
+                * a simple constant.  However we can save work in the most common
                 * case where the collation is "default", since we know that's pinned.
                 */
                if (OidIsValid(con->constcollid) &&
@@ -1695,7 +1695,7 @@ find_expr_references_walker(Node *node,
                                        }
                                        foreach(ct, rte->funccolcollations)
                                        {
-                                               Oid collid = lfirst_oid(ct);
+                                               Oid                     collid = lfirst_oid(ct);
 
                                                if (OidIsValid(collid) &&
                                                        collid != DEFAULT_COLLATION_OID)
@@ -2224,12 +2224,12 @@ getObjectDescription(const ObjectAddress *object)
                                HeapTuple       collTup;
 
                                collTup = SearchSysCache1(COLLOID,
-                                                                                ObjectIdGetDatum(object->objectId));
+                                                                                 ObjectIdGetDatum(object->objectId));
                                if (!HeapTupleIsValid(collTup))
                                        elog(ERROR, "cache lookup failed for collation %u",
                                                 object->objectId);
                                appendStringInfo(&buffer, _("collation %s"),
-                                NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname));
+                               NameStr(((Form_pg_collation) GETSTRUCT(collTup))->collname));
                                ReleaseSysCache(collTup);
                                break;
                        }
@@ -2796,7 +2796,7 @@ getObjectDescription(const ObjectAddress *object)
 char *
 getObjectDescriptionOids(Oid classid, Oid objid)
 {
-       ObjectAddress   address;
+       ObjectAddress address;
 
        address.classId = classid;
        address.objectId = objid;
index 5d25ce9ec88f82e9c6da5e6e3b13f5b16773ad1c..09b26a5c724066e6c2c39f686953b771e22f7741 100644 (file)
@@ -431,7 +431,7 @@ CheckAttributeNamesTypes(TupleDesc tupdesc, char relkind,
                CheckAttributeType(NameStr(tupdesc->attrs[i]->attname),
                                                   tupdesc->attrs[i]->atttypid,
                                                   tupdesc->attrs[i]->attcollation,
-                                                  NIL, /* assume we're creating a new rowtype */
+                                                  NIL, /* assume we're creating a new rowtype */
                                                   allow_system_table_mods);
        }
 }
@@ -497,7 +497,7 @@ CheckAttributeType(const char *attname,
                int                     i;
 
                /*
-                * Check for self-containment.  Eventually we might be able to allow
+                * Check for self-containment.  Eventually we might be able to allow
                 * this (just return without complaint, if so) but it's not clear how
                 * many other places would require anti-recursion defenses before it
                 * would be safe to allow tables to contain their own rowtype.
@@ -505,8 +505,8 @@ CheckAttributeType(const char *attname,
                if (list_member_oid(containing_rowtypes, atttypid))
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
-                                        errmsg("composite type %s cannot be made a member of itself",
-                                                       format_type_be(atttypid))));
+                               errmsg("composite type %s cannot be made a member of itself",
+                                          format_type_be(atttypid))));
 
                containing_rowtypes = lcons_oid(atttypid, containing_rowtypes);
 
@@ -541,15 +541,15 @@ CheckAttributeType(const char *attname,
        }
 
        /*
-        * This might not be strictly invalid per SQL standard, but it is
-        * pretty useless, and it cannot be dumped, so we must disallow it.
+        * This might not be strictly invalid per SQL standard, but it is pretty
+        * useless, and it cannot be dumped, so we must disallow it.
         */
        if (!OidIsValid(attcollation) && type_is_collatable(atttypid))
-                       ereport(ERROR,
-                                       (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
-                                        errmsg("no collation was derived for column \"%s\" with collatable type %s",
-                                                       attname, format_type_be(atttypid)),
-                                        errhint("Use the COLLATE clause to set the collation explicitly.")));
+               ereport(ERROR,
+                               (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
+                                errmsg("no collation was derived for column \"%s\" with collatable type %s",
+                                               attname, format_type_be(atttypid)),
+               errhint("Use the COLLATE clause to set the collation explicitly.")));
 }
 
 /*
@@ -921,7 +921,7 @@ AddNewRelationType(const char *typeName,
                                   -1,                  /* typmod */
                                   0,                   /* array dimensions for typBaseType */
                                   false,               /* Type NOT NULL */
-                                  InvalidOid); /* typcollation */
+                                  InvalidOid); /* typcollation */
 }
 
 /* --------------------------------
@@ -992,9 +992,9 @@ heap_create_with_catalog(const char *relname,
        CheckAttributeNamesTypes(tupdesc, relkind, allow_system_table_mods);
 
        /*
-        * If the relation already exists, it's an error, unless the user specifies
-        * "IF NOT EXISTS".  In that case, we just print a notice and do nothing
-        * further.
+        * If the relation already exists, it's an error, unless the user
+        * specifies "IF NOT EXISTS".  In that case, we just print a notice and do
+        * nothing further.
         */
        existing_relid = get_relname_relid(relname, relnamespace);
        if (existing_relid != InvalidOid)
@@ -1004,7 +1004,7 @@ heap_create_with_catalog(const char *relname,
                        ereport(NOTICE,
                                        (errcode(ERRCODE_DUPLICATE_TABLE),
                                         errmsg("relation \"%s\" already exists, skipping",
-                                        relname)));
+                                                       relname)));
                        heap_close(pg_class_desc, RowExclusiveLock);
                        return InvalidOid;
                }
@@ -1048,8 +1048,8 @@ heap_create_with_catalog(const char *relname,
        if (!OidIsValid(relid))
        {
                /*
-                *      Use binary-upgrade override for pg_class.oid/relfilenode,
-                *      if supplied.
+                * Use binary-upgrade override for pg_class.oid/relfilenode, if
+                * supplied.
                 */
                if (OidIsValid(binary_upgrade_next_heap_pg_class_oid) &&
                        (relkind == RELKIND_RELATION || relkind == RELKIND_SEQUENCE ||
@@ -1183,7 +1183,7 @@ heap_create_with_catalog(const char *relname,
                                   -1,                  /* typmod */
                                   0,                   /* array dimensions for typBaseType */
                                   false,               /* Type NOT NULL */
-                                  InvalidOid); /* typcollation */
+                                  InvalidOid); /* typcollation */
 
                pfree(relarrayname);
        }
@@ -1285,12 +1285,12 @@ heap_create_with_catalog(const char *relname,
                register_on_commit_action(relid, oncommit);
 
        /*
-        * If this is an unlogged relation, it needs an init fork so that it
-        * can be correctly reinitialized on restart.  Since we're going to
-        * do an immediate sync, we ony need to xlog this if archiving or
-        * streaming is enabled.  And the immediate sync is required, because
-        * otherwise there's no guarantee that this will hit the disk before
-        * the next checkpoint moves the redo pointer.
+        * If this is an unlogged relation, it needs an init fork so that it can
+        * be correctly reinitialized on restart.  Since we're going to do an
+        * immediate sync, we ony need to xlog this if archiving or streaming is
+        * enabled.  And the immediate sync is required, because otherwise there's
+        * no guarantee that this will hit the disk before the next checkpoint
+        * moves the redo pointer.
         */
        if (relpersistence == RELPERSISTENCE_UNLOGGED)
        {
@@ -1654,8 +1654,8 @@ heap_drop_with_catalog(Oid relid)
 
        /*
         * There can no longer be anyone *else* touching the relation, but we
-        * might still have open queries or cursors, or pending trigger events,
-        * in our own session.
+        * might still have open queries or cursors, or pending trigger events, in
+        * our own session.
         */
        CheckTableNotInUse(rel, "DROP TABLE");
 
@@ -1664,8 +1664,8 @@ heap_drop_with_catalog(Oid relid)
         */
        if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
        {
-               Relation    rel;
-               HeapTuple   tuple;
+               Relation        rel;
+               HeapTuple       tuple;
 
                rel = heap_open(ForeignTableRelationId, RowExclusiveLock);
 
@@ -1899,7 +1899,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
                                                  CONSTRAINT_CHECK,             /* Constraint Type */
                                                  false,        /* Is Deferrable */
                                                  false,        /* Is Deferred */
-                                                 true,         /* Is Validated */
+                                                 true, /* Is Validated */
                                                  RelationGetRelid(rel),                /* relation */
                                                  attNos,               /* attrs in the constraint */
                                                  keycount,             /* # attrs in the constraint */
index 679255a199bfc1e13b4bc46068cd0ad8d77b9462..1bf74b3d4fa664abf012917d66fa68af3d0fa70a 100644 (file)
@@ -187,18 +187,18 @@ index_check_primary_key(Relation heapRel,
        int                     i;
 
        /*
-        * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In
-        * CREATE TABLE, we have faith that the parser rejected multiple pkey
-        * clauses; and CREATE INDEX doesn't have a way to say PRIMARY KEY, so
-        * it's no problem either.
+        * If ALTER TABLE, check that there isn't already a PRIMARY KEY. In CREATE
+        * TABLE, we have faith that the parser rejected multiple pkey clauses;
+        * and CREATE INDEX doesn't have a way to say PRIMARY KEY, so it's no
+        * problem either.
         */
        if (is_alter_table &&
                relationHasPrimaryKey(heapRel))
        {
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
-                                errmsg("multiple primary keys for table \"%s\" are not allowed",
-                                               RelationGetRelationName(heapRel))));
+                        errmsg("multiple primary keys for table \"%s\" are not allowed",
+                                       RelationGetRelationName(heapRel))));
        }
 
        /*
@@ -222,7 +222,7 @@ index_check_primary_key(Relation heapRel,
                        continue;
 
                atttuple = SearchSysCache2(ATTNUM,
-                                                                  ObjectIdGetDatum(RelationGetRelid(heapRel)),
+                                                                ObjectIdGetDatum(RelationGetRelid(heapRel)),
                                                                   Int16GetDatum(attnum));
                if (!HeapTupleIsValid(atttuple))
                        elog(ERROR, "cache lookup failed for attribute %d of relation %u",
@@ -243,15 +243,14 @@ index_check_primary_key(Relation heapRel,
        }
 
        /*
-        * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
-        * tables?      Currently, since the PRIMARY KEY itself doesn't cascade,
-        * we don't cascade the notnull constraint(s) either; but this is
-        * pretty debatable.
+        * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child tables?
+        * Currently, since the PRIMARY KEY itself doesn't cascade, we don't
+        * cascade the notnull constraint(s) either; but this is pretty debatable.
         *
-        * XXX: possible future improvement: when being called from ALTER
-        * TABLE, it would be more efficient to merge this with the outer
-        * ALTER TABLE, so as to avoid two scans.  But that seems to
-        * complicate DefineIndex's API unduly.
+        * XXX: possible future improvement: when being called from ALTER TABLE,
+        * it would be more efficient to merge this with the outer ALTER TABLE, so
+        * as to avoid two scans.  But that seems to complicate DefineIndex's API
+        * unduly.
         */
        if (cmds)
                AlterTableInternal(RelationGetRelid(heapRel), cmds, false);
@@ -788,8 +787,8 @@ index_create(Relation heapRelation,
        if (!OidIsValid(indexRelationId))
        {
                /*
-                *      Use binary-upgrade override for pg_class.oid/relfilenode,
-                *      if supplied.
+                * Use binary-upgrade override for pg_class.oid/relfilenode, if
+                * supplied.
                 */
                if (OidIsValid(binary_upgrade_next_index_pg_class_oid))
                {
@@ -872,7 +871,7 @@ index_create(Relation heapRelation,
         * ----------------
         */
        UpdateIndexRelation(indexRelationId, heapRelationId, indexInfo,
-                                               collationObjectId, classObjectId, coloptions, isprimary, is_exclusion,
+          collationObjectId, classObjectId, coloptions, isprimary, is_exclusion,
                                                !deferrable,
                                                !concurrent);
 
@@ -947,7 +946,7 @@ index_create(Relation heapRelation,
 
                        /*
                         * If there are no simply-referenced columns, give the index an
-                        * auto dependency on the whole table.  In most cases, this will
+                        * auto dependency on the whole table.  In most cases, this will
                         * be redundant, but it might not be if the index expressions and
                         * predicate contain no Vars or only whole-row Vars.
                         */
@@ -1067,7 +1066,7 @@ index_create(Relation heapRelation,
 
        /*
         * Close the index; but we keep the lock that we acquired above until end
-        * of transaction.  Closing the heap is caller's responsibility.
+        * of transaction.      Closing the heap is caller's responsibility.
         */
        index_close(indexRelation, NoLock);
 
@@ -1176,8 +1175,8 @@ index_constraint_create(Relation heapRelation,
 
        /*
         * If the constraint is deferrable, create the deferred uniqueness
-        * checking trigger.  (The trigger will be given an internal
-        * dependency on the constraint by CreateTrigger.)
+        * checking trigger.  (The trigger will be given an internal dependency on
+        * the constraint by CreateTrigger.)
         */
        if (deferrable)
        {
@@ -1213,7 +1212,7 @@ index_constraint_create(Relation heapRelation,
         * have been so marked already, so no need to clear the flag in the other
         * case.
         *
-        * Note: this might better be done by callers.  We do it here to avoid
+        * Note: this might better be done by callers.  We do it here to avoid
         * exposing index_update_stats() globally, but that wouldn't be necessary
         * if relhaspkey went away.
         */
@@ -1235,10 +1234,10 @@ index_constraint_create(Relation heapRelation,
         */
        if (update_pgindex && (mark_as_primary || deferrable))
        {
-               Relation                pg_index;
-               HeapTuple               indexTuple;
-               Form_pg_index   indexForm;
-               bool                    dirty = false;
+               Relation        pg_index;
+               HeapTuple       indexTuple;
+               Form_pg_index indexForm;
+               bool            dirty = false;
 
                pg_index = heap_open(IndexRelationId, RowExclusiveLock);
 
@@ -1303,8 +1302,8 @@ index_drop(Oid indexId)
        userIndexRelation = index_open(indexId, AccessExclusiveLock);
 
        /*
-        * There can no longer be anyone *else* touching the index, but we
-        * might still have open queries using it in our own session.
+        * There can no longer be anyone *else* touching the index, but we might
+        * still have open queries using it in our own session.
         */
        CheckTableNotInUse(userIndexRelation, "DROP INDEX");
 
@@ -1739,7 +1738,8 @@ index_build(Relation heapRelation,
         */
        if (heapRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED)
        {
-               RegProcedure    ambuildempty = indexRelation->rd_am->ambuildempty;
+               RegProcedure ambuildempty = indexRelation->rd_am->ambuildempty;
+
                RelationOpenSmgr(indexRelation);
                smgrcreate(indexRelation->rd_smgr, INIT_FORKNUM, false);
                OidFunctionCall1(ambuildempty, PointerGetDatum(indexRelation));
@@ -2410,7 +2410,7 @@ validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
        ivinfo.strategy = NULL;
 
        state.tuplesort = tuplesort_begin_datum(TIDOID,
-                                                                                       TIDLessOperator, InvalidOid, false,
+                                                                                 TIDLessOperator, InvalidOid, false,
                                                                                        maintenance_work_mem,
                                                                                        false);
        state.htups = state.itups = state.tups_inserted = 0;
@@ -2834,7 +2834,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks)
  * use catalog indexes while collecting the list.)
  *
  * To avoid deadlocks, VACUUM FULL or CLUSTER on a system catalog must omit the
- * REINDEX_CHECK_CONSTRAINTS flag.  REINDEX should be used to rebuild an index
+ * REINDEX_CHECK_CONSTRAINTS flag.     REINDEX should be used to rebuild an index
  * if constraint inconsistency is suspected.  For optimal performance, other
  * callers should include the flag only after transforming the data in a manner
  * that risks a change in constraint validity.
index 734581e48557dcd72e8e7796729f344d00afd9ce..f8fd8276936a687e97655147845e4d4f9fa2f495 100644 (file)
@@ -2446,10 +2446,10 @@ CheckSetNamespace(Oid oldNspOid, Oid nspOid, Oid classid, Oid objid)
        if (oldNspOid == nspOid)
                ereport(ERROR,
                                (classid == RelationRelationId ?
-                                       errcode(ERRCODE_DUPLICATE_TABLE) :
+                                errcode(ERRCODE_DUPLICATE_TABLE) :
                                 classid == ProcedureRelationId ?
-                                       errcode(ERRCODE_DUPLICATE_FUNCTION) :
-                                       errcode(ERRCODE_DUPLICATE_OBJECT),
+                                errcode(ERRCODE_DUPLICATE_FUNCTION) :
+                                errcode(ERRCODE_DUPLICATE_OBJECT),
                                 errmsg("%s is already in schema \"%s\"",
                                                getObjectDescriptionOids(classid, objid),
                                                get_namespace_name(nspOid))));
@@ -2458,7 +2458,7 @@ CheckSetNamespace(Oid oldNspOid, Oid nspOid, Oid classid, Oid objid)
        if (isAnyTempNamespace(nspOid) || isAnyTempNamespace(oldNspOid))
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("cannot move objects into or out of temporary schemas")));
+                       errmsg("cannot move objects into or out of temporary schemas")));
 
        /* same for TOAST schema */
        if (nspOid == PG_TOAST_NAMESPACE || oldNspOid == PG_TOAST_NAMESPACE)
@@ -2525,7 +2525,7 @@ QualifiedNameGetCreationNamespace(List *names, char **objname_p)
 /*
  * get_namespace_oid - given a namespace name, look up the OID
  *
- * If missing_ok is false, throw an error if namespace name not found.  If
+ * If missing_ok is false, throw an error if namespace name not found. If
  * true, just return InvalidOid.
  */
 Oid
@@ -2535,9 +2535,9 @@ get_namespace_oid(const char *nspname, bool missing_ok)
 
        oid = GetSysCacheOid1(NAMESPACENAME, CStringGetDatum(nspname));
        if (!OidIsValid(oid) && !missing_ok)
-        ereport(ERROR,
-                (errcode(ERRCODE_UNDEFINED_SCHEMA),
-                 errmsg("schema \"%s\" does not exist", nspname)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_UNDEFINED_SCHEMA),
+                                errmsg("schema \"%s\" does not exist", nspname)));
 
        return oid;
 }
@@ -2727,7 +2727,7 @@ GetTempNamespaceBackendId(Oid namespaceId)
        /* See if the namespace name starts with "pg_temp_" or "pg_toast_temp_" */
        nspname = get_namespace_name(namespaceId);
        if (!nspname)
-               return InvalidBackendId;                                /* no such namespace? */
+               return InvalidBackendId;        /* no such namespace? */
        if (strncmp(nspname, "pg_temp_", 8) == 0)
                result = atoi(nspname + 8);
        else if (strncmp(nspname, "pg_toast_temp_", 14) == 0)
@@ -2798,8 +2798,8 @@ GetOverrideSearchPath(MemoryContext context)
  *
  * It's possible that newpath->useTemp is set but there is no longer any
  * active temp namespace, if the path was saved during a transaction that
- * created a temp namespace and was later rolled back.  In that case we just
- * ignore useTemp.  A plausible alternative would be to create a new temp
+ * created a temp namespace and was later rolled back. In that case we just
+ * ignore useTemp.     A plausible alternative would be to create a new temp
  * namespace, but for existing callers that's not necessary because an empty
  * temp namespace wouldn't affect their results anyway.
  *
@@ -3522,7 +3522,7 @@ check_search_path(char **newval, void **extra, GucSource source)
                                if (source == PGC_S_TEST)
                                        ereport(NOTICE,
                                                        (errcode(ERRCODE_UNDEFINED_SCHEMA),
-                                                        errmsg("schema \"%s\" does not exist", curname)));
+                                                  errmsg("schema \"%s\" does not exist", curname)));
                                else
                                {
                                        GUC_check_errdetail("schema \"%s\" does not exist", curname);
index 0d21d310a650a3b1f01cc4c554e4e6ac06b63fba..bf25091582ff2be7bb6239c150c495065c00ceed 100644 (file)
@@ -78,7 +78,7 @@ static Relation get_relation_by_qualified_name(ObjectType objtype,
 static ObjectAddress get_object_address_relobject(ObjectType objtype,
                                                         List *objname, Relation *relp);
 static ObjectAddress get_object_address_attribute(ObjectType objtype,
-                                                        List *objname, Relation *relp, LOCKMODE lockmode);
+                                                  List *objname, Relation *relp, LOCKMODE lockmode);
 static ObjectAddress get_object_address_opcf(ObjectType objtype, List *objname,
                                                List *objargs);
 static bool object_exists(ObjectAddress address);
@@ -108,8 +108,8 @@ ObjectAddress
 get_object_address(ObjectType objtype, List *objname, List *objargs,
                                   Relation *relp, LOCKMODE lockmode)
 {
-       ObjectAddress   address;
-       Relation                relation = NULL;
+       ObjectAddress address;
+       Relation        relation = NULL;
 
        /* Some kind of lock must be taken. */
        Assert(lockmode != NoLock);
@@ -130,7 +130,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                case OBJECT_COLUMN:
                        address =
                                get_object_address_attribute(objtype, objname, &relation,
-                                       lockmode);
+                                                                                        lockmode);
                        break;
                case OBJECT_RULE:
                case OBJECT_TRIGGER:
@@ -201,10 +201,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                        break;
                case OBJECT_CAST:
                        {
-                               TypeName *sourcetype = (TypeName *) linitial(objname);
-                               TypeName *targettype = (TypeName *) linitial(objargs);
-                               Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
-                               Oid targettypeid = typenameTypeId(NULL, targettype);
+                               TypeName   *sourcetype = (TypeName *) linitial(objname);
+                               TypeName   *targettype = (TypeName *) linitial(objargs);
+                               Oid                     sourcetypeid = typenameTypeId(NULL, sourcetype);
+                               Oid                     targettypeid = typenameTypeId(NULL, targettype);
 
                                address.classId = CastRelationId;
                                address.objectId =
@@ -242,8 +242,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
 
        /*
         * If we're dealing with a relation or attribute, then the relation is
-        * already locked.  If we're dealing with any other type of object, we need
-        * to lock it and then verify that it still exists.
+        * already locked.      If we're dealing with any other type of object, we
+        * need to lock it and then verify that it still exists.
         */
        if (address.classId != RelationRelationId)
        {
@@ -308,7 +308,7 @@ get_object_address_unqualified(ObjectType objtype, List *qualname)
                                break;
                        default:
                                elog(ERROR, "unrecognized objtype: %d", (int) objtype);
-                               msg = NULL;                     /* placate compiler */
+                               msg = NULL;             /* placate compiler */
                }
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
@@ -379,7 +379,7 @@ static Relation
 get_relation_by_qualified_name(ObjectType objtype, List *objname,
                                                           LOCKMODE lockmode)
 {
-       Relation relation;
+       Relation        relation;
 
        relation = relation_openrv(makeRangeVarFromNameList(objname), lockmode);
        switch (objtype)
@@ -449,7 +449,7 @@ get_object_address_relobject(ObjectType objtype, List *objname, Relation *relp)
        nnames = list_length(objname);
        if (nnames < 2)
        {
-               Oid             reloid;
+               Oid                     reloid;
 
                /*
                 * For compatibility with very old releases, we sometimes allow users
@@ -514,7 +514,7 @@ static ObjectAddress
 get_object_address_attribute(ObjectType objtype, List *objname,
                                                         Relation *relp, LOCKMODE lockmode)
 {
-       ObjectAddress   address;
+       ObjectAddress address;
        List       *relname;
        Oid                     reloid;
        Relation        relation;
@@ -534,7 +534,7 @@ get_object_address_attribute(ObjectType objtype, List *objname,
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_COLUMN),
                                 errmsg("column \"%s\" of relation \"%s\" does not exist",
-                                attname, RelationGetRelationName(relation))));
+                                               attname, RelationGetRelationName(relation))));
 
        *relp = relation;
        return address;
@@ -584,8 +584,8 @@ object_exists(ObjectAddress address)
        int                     cache = -1;
        Oid                     indexoid = InvalidOid;
        Relation        rel;
-       ScanKeyData     skey[1];
-       SysScanDesc     sd;
+       ScanKeyData skey[1];
+       SysScanDesc sd;
        bool            found;
 
        /* Sub-objects require special treatment. */
@@ -609,9 +609,9 @@ object_exists(ObjectAddress address)
 
        /*
         * For object types that have a relevant syscache, we use it; for
-        * everything else, we'll have to do an index-scan.  This switch
-        * sets either the cache to be used for the syscache lookup, or the
-        * index to be used for the index scan.
+        * everything else, we'll have to do an index-scan.  This switch sets
+        * either the cache to be used for the syscache lookup, or the index to be
+        * used for the index scan.
         */
        switch (address.classId)
        {
@@ -664,6 +664,7 @@ object_exists(ObjectAddress address)
                        cache = OPFAMILYOID;
                        break;
                case LargeObjectRelationId:
+
                        /*
                         * Weird backward compatibility hack: ObjectAddress notation uses
                         * LargeObjectRelationId for large objects, but since PostgreSQL
@@ -816,15 +817,15 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address,
                                ereport(ERROR,
                                                (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
                                                 errmsg("must be owner of large object %u",
-                                                       address.objectId)));
+                                                               address.objectId)));
                        break;
                case OBJECT_CAST:
                        {
                                /* We can only check permissions on the source/target types */
-                               TypeName *sourcetype = (TypeName *) linitial(objname);
-                               TypeName *targettype = (TypeName *) linitial(objargs);
-                               Oid sourcetypeid = typenameTypeId(NULL, sourcetype);
-                               Oid targettypeid = typenameTypeId(NULL, targettype);
+                               TypeName   *sourcetype = (TypeName *) linitial(objname);
+                               TypeName   *targettype = (TypeName *) linitial(objargs);
+                               Oid                     sourcetypeid = typenameTypeId(NULL, sourcetype);
+                               Oid                     targettypeid = typenameTypeId(NULL, targettype);
 
                                if (!pg_type_ownercheck(sourcetypeid, roleid)
                                        && !pg_type_ownercheck(targettypeid, roleid))
@@ -851,6 +852,7 @@ check_object_ownership(Oid roleid, ObjectType objtype, ObjectAddress address,
                                                           NameListToString(objname));
                        break;
                case OBJECT_ROLE:
+
                        /*
                         * We treat roles as being "owned" by those with CREATEROLE priv,
                         * except that superusers are only owned by superusers.
index 708078463ba79dbbd374da6b2a99b249fd75c169..5b92a4c0c2816d29d6e3379e6422b20a29b8743d 100644 (file)
@@ -46,7 +46,9 @@ CollationCreate(const char *collname, Oid collnamespace,
        HeapTuple       tup;
        Datum           values[Natts_pg_collation];
        bool            nulls[Natts_pg_collation];
-       NameData        name_name, name_collate, name_ctype;
+       NameData        name_name,
+                               name_collate,
+                               name_ctype;
        Oid                     oid;
        ObjectAddress myself,
                                referenced;
@@ -60,9 +62,9 @@ CollationCreate(const char *collname, Oid collnamespace,
        /*
         * Make sure there is no existing collation of same name & encoding.
         *
-        * This would be caught by the unique index anyway; we're just giving
-        * a friendlier error message.  The unique index provides a backstop
-        * against race conditions.
+        * This would be caught by the unique index anyway; we're just giving a
+        * friendlier error message.  The unique index provides a backstop against
+        * race conditions.
         */
        if (SearchSysCacheExists3(COLLNAMEENCNSP,
                                                          PointerGetDatum(collname),
@@ -74,9 +76,9 @@ CollationCreate(const char *collname, Oid collnamespace,
                                                collname, pg_encoding_to_char(collencoding))));
 
        /*
-        * Also forbid matching an any-encoding entry.  This test of course is
-        * not backed up by the unique index, but it's not a problem since we
-        * don't support adding any-encoding entries after initdb.
+        * Also forbid matching an any-encoding entry.  This test of course is not
+        * backed up by the unique index, but it's not a problem since we don't
+        * support adding any-encoding entries after initdb.
         */
        if (SearchSysCacheExists3(COLLNAMEENCNSP,
                                                          PointerGetDatum(collname),
index 6619eed431c937caa7c0eae2fac188468fb5e2bb..69979942af42a00922fb4bce3ada1fa622dd2212 100644 (file)
@@ -799,10 +799,10 @@ get_constraint_oid(Oid relid, const char *conname, bool missing_ok)
  * the rel of interest are Vars with the indicated varno/varlevelsup.
  *
  * Currently we only check to see if the rel has a primary key that is a
- * subset of the grouping_columns.  We could also use plain unique constraints
+ * subset of the grouping_columns.     We could also use plain unique constraints
  * if all their columns are known not null, but there's a problem: we need
  * to be able to represent the not-null-ness as part of the constraints added
- * to *constraintDeps.  FIXME whenever not-null constraints get represented
+ * to *constraintDeps. FIXME whenever not-null constraints get represented
  * in pg_constraint.
  */
 bool
@@ -852,7 +852,7 @@ check_functional_grouping(Oid relid,
                if (isNull)
                        elog(ERROR, "null conkey for constraint %u",
                                 HeapTupleGetOid(tuple));
-               arr = DatumGetArrayTypeP(adatum);       /* ensure not toasted */
+               arr = DatumGetArrayTypeP(adatum);               /* ensure not toasted */
                numkeys = ARR_DIMS(arr)[0];
                if (ARR_NDIM(arr) != 1 ||
                        numkeys < 0 ||
index 2bb7bb3d5fa920b7690959d37126589b0ef9147d..67aad86d4e705aa398c0f7a1f709aebef4a7da43 100644 (file)
@@ -126,7 +126,7 @@ recordMultipleDependencies(const ObjectAddress *depender,
 
 /*
  * If we are executing a CREATE EXTENSION operation, mark the given object
- * as being a member of the extension.  Otherwise, do nothing.
+ * as being a member of the extension. Otherwise, do nothing.
  *
  * This must be called during creation of any user-definable object type
  * that could be a member of an extension.
@@ -136,7 +136,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object)
 {
        if (creating_extension)
        {
-               ObjectAddress   extension;
+               ObjectAddress extension;
 
                extension.classId = ExtensionRelationId;
                extension.objectId = CurrentExtensionObject;
@@ -155,7 +155,7 @@ recordDependencyOnCurrentExtension(const ObjectAddress *object)
  * (possibly with some differences from before).
  *
  * If skipExtensionDeps is true, we do not delete any dependencies that
- * show that the given object is a member of an extension.  This avoids
+ * show that the given object is a member of an extension.     This avoids
  * needing a lot of extra logic to fetch and recreate that dependency.
  */
 long
@@ -185,7 +185,7 @@ deleteDependencyRecordsFor(Oid classId, Oid objectId,
        while (HeapTupleIsValid(tup = systable_getnext(scan)))
        {
                if (skipExtensionDeps &&
-                       ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
+                 ((Form_pg_depend) GETSTRUCT(tup))->deptype == DEPENDENCY_EXTENSION)
                        continue;
 
                simple_heap_delete(depRel, &tup->t_self);
index e87a9311bdd2dec32bac08239e5da9dd502ccf35..08d8aa13f332d6058572a51b4eff1d9871afed68 100644 (file)
@@ -29,7 +29,7 @@
 
 
 /* Potentially set by contrib/pg_upgrade_support functions */
-Oid      binary_upgrade_next_pg_enum_oid = InvalidOid;
+Oid                    binary_upgrade_next_pg_enum_oid = InvalidOid;
 
 static void RenumberEnumType(Relation pg_enum, HeapTuple *existing, int nelems);
 static int     oid_cmp(const void *p1, const void *p2);
@@ -58,9 +58,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
        num_elems = list_length(vals);
 
        /*
-        * We do not bother to check the list of values for duplicates --- if
-        * you have any, you'll get a less-than-friendly unique-index violation.
-        * It is probably not worth trying harder.
+        * We do not bother to check the list of values for duplicates --- if you
+        * have any, you'll get a less-than-friendly unique-index violation. It is
+        * probably not worth trying harder.
         */
 
        pg_enum = heap_open(EnumRelationId, RowExclusiveLock);
@@ -69,10 +69,9 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
         * Allocate OIDs for the enum's members.
         *
         * While this method does not absolutely guarantee that we generate no
-        * duplicate OIDs (since we haven't entered each oid into the table
-        * before allocating the next), trouble could only occur if the OID
-        * counter wraps all the way around before we finish. Which seems
-        * unlikely.
+        * duplicate OIDs (since we haven't entered each oid into the table before
+        * allocating the next), trouble could only occur if the OID counter wraps
+        * all the way around before we finish. Which seems unlikely.
         */
        oids = (Oid *) palloc(num_elems * sizeof(Oid));
 
@@ -83,9 +82,10 @@ EnumValuesCreate(Oid enumTypeOid, List *vals)
                 * tells the comparison functions the OIDs are in the correct sort
                 * order and can be compared directly.
                 */
-               Oid             new_oid;
+               Oid                     new_oid;
 
-               do {
+               do
+               {
                        new_oid = GetNewOid(pg_enum);
                } while (new_oid & 1);
                oids[elemno] = new_oid;
@@ -202,9 +202,9 @@ AddEnumLabel(Oid enumTypeOid,
        /*
         * Acquire a lock on the enum type, which we won't release until commit.
         * This ensures that two backends aren't concurrently modifying the same
-        * enum type.  Without that, we couldn't be sure to get a consistent
-        * view of the enum members via the syscache.  Note that this does not
-        * block other backends from inspecting the type; see comments for
+        * enum type.  Without that, we couldn't be sure to get a consistent view
+        * of the enum members via the syscache.  Note that this does not block
+        * other backends from inspecting the type; see comments for
         * RenumberEnumType.
         */
        LockDatabaseObject(TypeRelationId, enumTypeOid, 0, ExclusiveLock);
@@ -217,7 +217,7 @@ restart:
        /* Get the list of existing members of the enum */
        list = SearchSysCacheList1(ENUMTYPOIDNAME,
                                                           ObjectIdGetDatum(enumTypeOid));
-       nelems =  list->n_members;
+       nelems = list->n_members;
 
        /* Sort the existing members by enumsortorder */
        existing = (HeapTuple *) palloc(nelems * sizeof(HeapTuple));
@@ -229,8 +229,8 @@ restart:
        if (neighbor == NULL)
        {
                /*
-                * Put the new label at the end of the list.
-                * No change to existing tuples is required.
+                * Put the new label at the end of the list. No change to existing
+                * tuples is required.
                 */
                if (nelems > 0)
                {
@@ -244,10 +244,10 @@ restart:
        else
        {
                /* BEFORE or AFTER was specified */
-               int                             nbr_index;
-               int                             other_nbr_index;
-               Form_pg_enum    nbr_en;
-               Form_pg_enum    other_nbr_en;
+               int                     nbr_index;
+               int                     other_nbr_index;
+               Form_pg_enum nbr_en;
+               Form_pg_enum other_nbr_en;
 
                /* Locate the neighbor element */
                for (nbr_index = 0; nbr_index < nelems; nbr_index++)
@@ -265,14 +265,14 @@ restart:
                nbr_en = (Form_pg_enum) GETSTRUCT(existing[nbr_index]);
 
                /*
-                * Attempt to assign an appropriate enumsortorder value: one less
-                * than the smallest member, one more than the largest member,
-                * or halfway between two existing members.
+                * Attempt to assign an appropriate enumsortorder value: one less than
+                * the smallest member, one more than the largest member, or halfway
+                * between two existing members.
                 *
                 * In the "halfway" case, because of the finite precision of float4,
-                * we might compute a value that's actually equal to one or the
-                * other of its neighbors.  In that case we renumber the existing
-                * members and try again.
+                * we might compute a value that's actually equal to one or the other
+                * of its neighbors.  In that case we renumber the existing members
+                * and try again.
                 */
                if (newValIsAfter)
                        other_nbr_index = nbr_index + 1;
@@ -291,10 +291,10 @@ restart:
 
                        /*
                         * On some machines, newelemorder may be in a register that's
-                        * wider than float4.  We need to force it to be rounded to
-                        * float4 precision before making the following comparisons,
-                        * or we'll get wrong results.  (Such behavior violates the C
-                        * standard, but fixing the compilers is out of our reach.)
+                        * wider than float4.  We need to force it to be rounded to float4
+                        * precision before making the following comparisons, or we'll get
+                        * wrong results.  (Such behavior violates the C standard, but
+                        * fixing the compilers is out of our reach.)
                         */
                        newelemorder = DatumGetFloat4(Float4GetDatum(newelemorder));
 
@@ -314,9 +314,9 @@ restart:
        if (OidIsValid(binary_upgrade_next_pg_enum_oid))
        {
                /*
-                *      Use binary-upgrade override for pg_enum.oid, if supplied.
-                *      During binary upgrade, all pg_enum.oid's are set this way
-                *      so they are guaranteed to be consistent.
+                * Use binary-upgrade override for pg_enum.oid, if supplied. During
+                * binary upgrade, all pg_enum.oid's are set this way so they are
+                * guaranteed to be consistent.
                 */
                if (neighbor != NULL)
                        ereport(ERROR,
@@ -337,7 +337,7 @@ restart:
                 */
                for (;;)
                {
-                       bool    sorts_ok;
+                       bool            sorts_ok;
 
                        /* Get a new OID (different from all existing pg_enum tuples) */
                        newOid = GetNewOid(pg_enum);
@@ -345,8 +345,8 @@ restart:
                        /*
                         * Detect whether it sorts correctly relative to existing
                         * even-numbered labels of the enum.  We can ignore existing
-                        * labels with odd Oids, since a comparison involving one of
-                        * those will not take the fast path anyway.
+                        * labels with odd Oids, since a comparison involving one of those
+                        * will not take the fast path anyway.
                         */
                        sorts_ok = true;
                        for (i = 0; i < nelems; i++)
@@ -385,9 +385,9 @@ restart:
                                        break;
 
                                /*
-                                * If it's odd, and sorts OK, loop back to get another OID
-                                * and try again.  Probably, the next available even OID
-                                * will sort correctly too, so it's worth trying.
+                                * If it's odd, and sorts OK, loop back to get another OID and
+                                * try again.  Probably, the next available even OID will sort
+                                * correctly too, so it's worth trying.
                                 */
                        }
                        else
@@ -435,7 +435,7 @@ restart:
  * We avoid doing this unless absolutely necessary; in most installations
  * it will never happen.  The reason is that updating existing pg_enum
  * entries creates hazards for other backends that are concurrently reading
- * pg_enum with SnapshotNow semantics.  A concurrent SnapshotNow scan could
+ * pg_enum with SnapshotNow semantics. A concurrent SnapshotNow scan could
  * see both old and new versions of an updated row as valid, or neither of
  * them, if the commit happens between scanning the two versions.  It's
  * also quite likely for a concurrent scan to see an inconsistent set of
@@ -510,10 +510,10 @@ oid_cmp(const void *p1, const void *p2)
 static int
 sort_order_cmp(const void *p1, const void *p2)
 {
-       HeapTuple               v1 = *((const HeapTuple *) p1);
-       HeapTuple               v2 = *((const HeapTuple *) p2);
-       Form_pg_enum    en1 = (Form_pg_enum) GETSTRUCT(v1);
-       Form_pg_enum    en2 = (Form_pg_enum) GETSTRUCT(v2);
+       HeapTuple       v1 = *((const HeapTuple *) p1);
+       HeapTuple       v2 = *((const HeapTuple *) p2);
+       Form_pg_enum en1 = (Form_pg_enum) GETSTRUCT(v1);
+       Form_pg_enum en2 = (Form_pg_enum) GETSTRUCT(v2);
 
        if (en1->enumsortorder < en2->enumsortorder)
                return -1;
index 6138165cc376e759a2697ed821d0dd60e88b5f4a..47a8ff4d989675381524d3ff596ee3bd86bb6449 100644 (file)
@@ -842,8 +842,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
                if (!haspolyarg)
                {
                        /*
-                        * OK to do full precheck: analyze and rewrite the queries,
-                        * then verify the result type.
+                        * OK to do full precheck: analyze and rewrite the queries, then
+                        * verify the result type.
                         */
                        SQLFunctionParseInfoPtr pinfo;
 
@@ -858,7 +858,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
 
                                querytree_sublist = pg_analyze_and_rewrite_params(parsetree,
                                                                                                                                  prosrc,
-                                                                                                                                 (ParserSetupHook) sql_fn_parser_setup,
+                                                                          (ParserSetupHook) sql_fn_parser_setup,
                                                                                                                                  pinfo);
                                querytree_list = list_concat(querytree_list,
                                                                                         querytree_sublist);
index 06301c075bbfc85bee0ade4ccbab6ee9b1c1584c..9e35e73f9cf9349565f206b67d2c8bb27130df8a 100644 (file)
@@ -115,7 +115,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
        values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
        values[i++] = Int32GetDatum(-1);        /* typtypmod */
        values[i++] = Int32GetDatum(0);         /* typndims */
-       values[i++] = ObjectIdGetDatum(InvalidOid);     /* typcollation */
+       values[i++] = ObjectIdGetDatum(InvalidOid); /* typcollation */
        nulls[i++] = true;                      /* typdefaultbin */
        nulls[i++] = true;                      /* typdefault */
 
@@ -352,7 +352,7 @@ TypeCreate(Oid newTypeOid,
        values[i++] = ObjectIdGetDatum(baseType);       /* typbasetype */
        values[i++] = Int32GetDatum(typeMod);           /* typtypmod */
        values[i++] = Int32GetDatum(typNDims);          /* typndims */
-       values[i++] = ObjectIdGetDatum(typeCollation);  /* typcollation */
+       values[i++] = ObjectIdGetDatum(typeCollation);          /* typcollation */
 
        /*
         * initialize the default binary value for this type.  Check for nulls of
index 221f9f5c12ce7f293174eb923458b669f682fa67..57987be2c0aa922e964ae8c2add3035c092400c2 100644 (file)
@@ -119,7 +119,7 @@ RelationCreateStorage(RelFileNode rnode, char relpersistence)
                        break;
                default:
                        elog(ERROR, "invalid relpersistence: %c", relpersistence);
-                       return;                 /* placate compiler */
+                       return;                         /* placate compiler */
        }
 
        srel = smgropen(rnode, backend);
@@ -379,7 +379,7 @@ smgrDoPendingDeletes(bool isCommit)
  * *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
  * If there are no relations to be deleted, *ptr is set to NULL.
  *
- * Only non-temporary relations are included in the returned list.  This is OK
+ * Only non-temporary relations are included in the returned list.     This is OK
  * because the list is used only in contexts where temporary relations don't
  * matter: we're either writing to the two-phase state file (and transactions
  * that have touched temp tables can't be prepared) or we're writing to xlog
index 5d5496df98963ccb0e808234f9e6667acafd65dd..452ca9bef02a31fa1a652879970f394a4aae002a 100644 (file)
@@ -279,7 +279,7 @@ create_toast_table(Relation rel, Oid toastOid, Oid toastIndexOid, Datum reloptio
                                                           list_make2("chunk_id", "chunk_seq"),
                                                           BTREE_AM_OID,
                                                           rel->rd_rel->reltablespace,
-                                                          collationObjectId, classObjectId, coloptions, (Datum) 0,
+                                        collationObjectId, classObjectId, coloptions, (Datum) 0,
                                                           true, false, false, false,
                                                           true, false, false);
 
index 99fdd7dba30aa2c08249b0cfcfbbceee303a346b..215e21cae0826b870fce4856e7ed366f03c82a2b 100644 (file)
@@ -282,26 +282,26 @@ AlterObjectNamespace_oid(Oid classId, Oid objid, Oid nspOid)
        switch (getObjectClass(&dep))
        {
                case OCLASS_CLASS:
-               {
-                       Relation rel;
-                       Relation classRel;
+                       {
+                               Relation        rel;
+                               Relation        classRel;
 
-                       rel = relation_open(objid, AccessExclusiveLock);
-                       oldNspOid = RelationGetNamespace(rel);
+                               rel = relation_open(objid, AccessExclusiveLock);
+                               oldNspOid = RelationGetNamespace(rel);
 
-                       classRel = heap_open(RelationRelationId, RowExclusiveLock);
+                               classRel = heap_open(RelationRelationId, RowExclusiveLock);
 
-                       AlterRelationNamespaceInternal(classRel,
-                                                                                  objid,
-                                                                                  oldNspOid,
-                                                                                  nspOid,
-                                                                                  true);
+                               AlterRelationNamespaceInternal(classRel,
+                                                                                          objid,
+                                                                                          oldNspOid,
+                                                                                          nspOid,
+                                                                                          true);
 
-                       heap_close(classRel, RowExclusiveLock);
+                               heap_close(classRel, RowExclusiveLock);
 
-                       relation_close(rel, NoLock);
-                       break;
-               }
+                               relation_close(rel, NoLock);
+                               break;
+                       }
 
                case OCLASS_PROC:
                        oldNspOid = AlterFunctionNamespace_oid(objid, nspOid);
@@ -386,9 +386,11 @@ AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId,
 {
        Oid                     classId = RelationGetRelid(rel);
        Oid                     oldNspOid;
-       Datum       name, namespace;
-       bool        isnull;
-       HeapTuple       tup, newtup;
+       Datum           name,
+                               namespace;
+       bool            isnull;
+       HeapTuple       tup,
+                               newtup;
        Datum      *values;
        bool       *nulls;
        bool       *replaces;
@@ -410,7 +412,7 @@ AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId,
        /* Permission checks ... superusers can always do it */
        if (!superuser())
        {
-               Datum       owner;
+               Datum           owner;
                Oid                     ownerId;
                AclResult       aclresult;
 
index 774bb044715123b7f7f120e2a91eedc3687d7975..dde301b89aa2b73254dde0a9dc4dc3c81819799d 100644 (file)
@@ -95,7 +95,7 @@ static void compute_index_stats(Relation onerel, double totalrows,
                                        HeapTuple *rows, int numrows,
                                        MemoryContext col_context);
 static VacAttrStats *examine_attribute(Relation onerel, int attnum,
-                                                                          Node *index_expr);
+                                 Node *index_expr);
 static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
                                        int targrows, double *totalrows, double *totaldeadrows);
 static double random_fract(void);
@@ -160,8 +160,8 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt,
                if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
                        ereport(LOG,
                                        (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-                                        errmsg("skipping analyze of \"%s\" --- lock not available",
-                                               vacstmt->relation->relname)));
+                                 errmsg("skipping analyze of \"%s\" --- lock not available",
+                                                vacstmt->relation->relname)));
        }
        if (!onerel)
                return;
@@ -853,10 +853,10 @@ examine_attribute(Relation onerel, int attnum, Node *index_expr)
        /*
         * When analyzing an expression index, believe the expression tree's type
         * not the column datatype --- the latter might be the opckeytype storage
-        * type of the opclass, which is not interesting for our purposes.  (Note:
+        * type of the opclass, which is not interesting for our purposes.      (Note:
         * if we did anything with non-expression index columns, we'd need to
         * figure out where to get the correct type info from, but for now that's
-        * not a problem.)  It's not clear whether anyone will care about the
+        * not a problem.)      It's not clear whether anyone will care about the
         * typmod, but we store that too just in case.
         */
        if (index_expr)
index 4c4f356e79086699518651e6fb8fb52ca96be130..2cc2aaa8f64052b0044823624fbc3a0545e9fb65 100644 (file)
@@ -718,7 +718,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
        TransactionId OldestXmin;
        TransactionId FreezeXid;
        RewriteState rwstate;
-       bool             use_sort;
+       bool            use_sort;
        Tuplesortstate *tuplesort;
        double          num_tuples = 0,
                                tups_vacuumed = 0,
@@ -813,11 +813,11 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
        rwstate = begin_heap_rewrite(NewHeap, OldestXmin, FreezeXid, use_wal);
 
        /*
-        * Decide whether to use an indexscan or seqscan-and-optional-sort to
-        * scan the OldHeap.  We know how to use a sort to duplicate the ordering
-        * of a btree index, and will use seqscan-and-sort for that case if the
-        * planner tells us it's cheaper.  Otherwise, always indexscan if an
-        * index is provided, else plain seqscan.
+        * Decide whether to use an indexscan or seqscan-and-optional-sort to scan
+        * the OldHeap.  We know how to use a sort to duplicate the ordering of a
+        * btree index, and will use seqscan-and-sort for that case if the planner
+        * tells us it's cheaper.  Otherwise, always indexscan if an index is
+        * provided, else plain seqscan.
         */
        if (OldIndex != NULL && OldIndex->rd_rel->relam == BTREE_AM_OID)
                use_sort = plan_cluster_use_sort(OIDOldHeap, OIDOldIndex);
@@ -869,8 +869,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
        /*
         * Scan through the OldHeap, either in OldIndex order or sequentially;
         * copy each tuple into the NewHeap, or transiently to the tuplesort
-        * module.  Note that we don't bother sorting dead tuples (they won't
-        * get to the new table anyway).
+        * module.      Note that we don't bother sorting dead tuples (they won't get
+        * to the new table anyway).
         */
        for (;;)
        {
@@ -984,8 +984,8 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
                heap_endscan(heapScan);
 
        /*
-        * In scan-and-sort mode, complete the sort, then read out all live
-        * tuples from the tuplestore and write them to the new relation.
+        * In scan-and-sort mode, complete the sort, then read out all live tuples
+        * from the tuplestore and write them to the new relation.
         */
        if (tuplesort != NULL)
        {
@@ -1554,7 +1554,7 @@ reform_and_rewrite_tuple(HeapTuple tuple,
                                                 bool newRelHasOids, RewriteState rwstate)
 {
        HeapTuple       copiedTuple;
-       int             i;
+       int                     i;
 
        heap_deform_tuple(tuple, oldTupDesc, values, isnull);
 
index 2a6938fd04bcd1f757e4d6562fc3389093155022..7f8a108374ec8a307607aa1b20b2240c821b7868 100644 (file)
@@ -34,7 +34,7 @@
 #include "utils/syscache.h"
 
 static void AlterCollationOwner_internal(Relation rel, Oid collationOid,
-                                                         Oid newOwnerId);
+                                                        Oid newOwnerId);
 
 /*
  * CREATE COLLATION
@@ -46,10 +46,10 @@ DefineCollation(List *names, List *parameters)
        Oid                     collNamespace;
        AclResult       aclresult;
        ListCell   *pl;
-       DefElem    *fromEl = NULL;
-       DefElem    *localeEl = NULL;
-       DefElem    *lccollateEl = NULL;
-       DefElem    *lcctypeEl = NULL;
+       DefElem    *fromEl = NULL;
+       DefElem    *localeEl = NULL;
+       DefElem    *lccollateEl = NULL;
+       DefElem    *lcctypeEl = NULL;
        char       *collcollate = NULL;
        char       *collctype = NULL;
        Oid                     newoid;
@@ -63,7 +63,7 @@ DefineCollation(List *names, List *parameters)
 
        foreach(pl, parameters)
        {
-               DefElem    *defel = (DefElem *) lfirst(pl);
+               DefElem    *defel = (DefElem *) lfirst(pl);
                DefElem   **defelp;
 
                if (pg_strcasecmp(defel->defname, "from") == 0)
@@ -97,7 +97,7 @@ DefineCollation(List *names, List *parameters)
                Oid                     collid;
                HeapTuple       tp;
 
-               collid =  get_collation_oid(defGetQualifiedName(fromEl), false);
+               collid = get_collation_oid(defGetQualifiedName(fromEl), false);
                tp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
                if (!HeapTupleIsValid(tp))
                        elog(ERROR, "cache lookup failed for collation %u", collid);
@@ -123,7 +123,7 @@ DefineCollation(List *names, List *parameters)
        if (!collcollate)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                errmsg("parameter \"lc_collate\" parameter must be specified")));
+                       errmsg("parameter \"lc_collate\" parameter must be specified")));
 
        if (!collctype)
                ereport(ERROR,
@@ -391,7 +391,7 @@ AlterCollationNamespace(List *name, const char *newschema)
 Oid
 AlterCollationNamespace_oid(Oid collOid, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
        char       *collation_name;
 
index 3fbeefa018b828b7d47f23acb63d1bf357f5b1cb..d09bef06824611c55a12289c923bcf461904bbf4 100644 (file)
@@ -37,8 +37,8 @@
 void
 CommentObject(CommentStmt *stmt)
 {
-       ObjectAddress   address;
-       Relation                relation;
+       ObjectAddress address;
+       Relation        relation;
 
        /*
         * When loading a dump, we may see a COMMENT ON DATABASE for the old name
@@ -46,12 +46,13 @@ CommentObject(CommentStmt *stmt)
         * (which is really pg_restore's fault, but for now we will work around
         * the problem here).  Consensus is that the best fix is to treat wrong
         * database name as a WARNING not an ERROR; hence, the following special
-        * case.  (If the length of stmt->objname is not 1, get_object_address will
-        * throw an error below; that's OK.)
+        * case.  (If the length of stmt->objname is not 1, get_object_address
+        * will throw an error below; that's OK.)
         */
        if (stmt->objtype == OBJECT_DATABASE && list_length(stmt->objname) == 1)
        {
-               char   *database = strVal(linitial(stmt->objname));
+               char       *database = strVal(linitial(stmt->objname));
+
                if (!OidIsValid(get_database_oid(database, true)))
                {
                        ereport(WARNING,
@@ -62,10 +63,10 @@ CommentObject(CommentStmt *stmt)
        }
 
        /*
-        * Translate the parser representation that identifies this object into
-        * an ObjectAddress.  get_object_address() will throw an error if the
-        * object does not exist, and will also acquire a lock on the target
-     * to guard against concurrent DROP operations.
+        * Translate the parser representation that identifies this object into an
+        * ObjectAddress.  get_object_address() will throw an error if the object
+        * does not exist, and will also acquire a lock on the target to guard
+        * against concurrent DROP operations.
         */
        address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
                                                                 &relation, ShareUpdateExclusiveLock);
@@ -78,6 +79,7 @@ CommentObject(CommentStmt *stmt)
        switch (stmt->objtype)
        {
                case OBJECT_COLUMN:
+
                        /*
                         * Allow comments only on columns of tables, views, composite
                         * types, and foreign tables (which are the only relkinds for
index b5e4420ca8d983464a695acfcd87b11cc709b3e5..2c1c6da90092787b8187bdc26286b1c0a0cccd0c 100644 (file)
@@ -335,7 +335,8 @@ AlterConversionOwner_internal(Relation rel, Oid conversionOid, Oid newOwnerId)
 void
 AlterConversionNamespace(List *name, const char *newschema)
 {
-       Oid                     convOid, nspOid;
+       Oid                     convOid,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(ConversionRelationId, RowExclusiveLock);
@@ -361,7 +362,7 @@ AlterConversionNamespace(List *name, const char *newschema)
 Oid
 AlterConversionNamespace_oid(Oid convOid, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(ConversionRelationId, RowExclusiveLock);
index 3af0b097198df0939e56d1bdf8b5e0073eb27215..57429035e895e5b74f158e005b726ea3d40915ce 100644 (file)
@@ -115,7 +115,7 @@ typedef struct CopyStateData
        char       *quote;                      /* CSV quote char (must be 1 byte) */
        char       *escape;                     /* CSV escape char (must be 1 byte) */
        List       *force_quote;        /* list of column names */
-       bool            force_quote_all; /* FORCE QUOTE *? */
+       bool            force_quote_all;        /* FORCE QUOTE *? */
        bool       *force_quote_flags;          /* per-column CSV FQ flags */
        List       *force_notnull;      /* list of column names */
        bool       *force_notnull_flags;        /* per-column CSV FNN flags */
@@ -161,8 +161,8 @@ typedef struct CopyStateData
 
        /* field raw data pointers found by COPY FROM */
 
-       int max_fields;
-       char ** raw_fields;
+       int                     max_fields;
+       char      **raw_fields;
 
        /*
         * Similarly, line_buf holds the whole input line being processed. The
@@ -266,10 +266,10 @@ static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
 
 /* non-export function prototypes */
 static CopyState BeginCopy(bool is_from, Relation rel, Node *raw_query,
-                               const char *queryString, List *attnamelist, List *options);
+                 const char *queryString, List *attnamelist, List *options);
 static void EndCopy(CopyState cstate);
 static CopyState BeginCopyTo(Relation rel, Node *query, const char *queryString,
-                               const char *filename, List *attnamelist, List *options);
+                       const char *filename, List *attnamelist, List *options);
 static void EndCopyTo(CopyState cstate);
 static uint64 DoCopyTo(CopyState cstate);
 static uint64 CopyTo(CopyState cstate);
@@ -278,8 +278,8 @@ static void CopyOneRowTo(CopyState cstate, Oid tupleOid,
 static uint64 CopyFrom(CopyState cstate);
 static bool CopyReadLine(CopyState cstate);
 static bool CopyReadLineText(CopyState cstate);
-static int CopyReadAttributesText(CopyState cstate);
-static int CopyReadAttributesCSV(CopyState cstate);
+static int     CopyReadAttributesText(CopyState cstate);
+static int     CopyReadAttributesCSV(CopyState cstate);
 static Datum CopyReadBinaryAttribute(CopyState cstate,
                                                int column_no, FmgrInfo *flinfo,
                                                Oid typioparam, int32 typmod,
@@ -748,17 +748,17 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
 
        if (stmt->relation)
        {
-               TupleDesc               tupDesc;
-               AclMode                 required_access = (is_from ? ACL_INSERT : ACL_SELECT);
-               RangeTblEntry  *rte;
-               List               *attnums;
-               ListCell           *cur;
+               TupleDesc       tupDesc;
+               AclMode         required_access = (is_from ? ACL_INSERT : ACL_SELECT);
+               RangeTblEntry *rte;
+               List       *attnums;
+               ListCell   *cur;
 
                Assert(!stmt->query);
 
                /* Open and lock the relation, using the appropriate lock type. */
                rel = heap_openrv(stmt->relation,
-                                                        (is_from ? RowExclusiveLock : AccessShareLock));
+                                                 (is_from ? RowExclusiveLock : AccessShareLock));
 
                rte = makeNode(RangeTblEntry);
                rte->rtekind = RTE_RELATION;
@@ -770,8 +770,8 @@ DoCopy(const CopyStmt *stmt, const char *queryString)
                attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist);
                foreach(cur, attnums)
                {
-                       int             attno = lfirst_int(cur) -
-                                                       FirstLowInvalidHeapAttributeNumber;
+                       int                     attno = lfirst_int(cur) -
+                       FirstLowInvalidHeapAttributeNumber;
 
                        if (is_from)
                                rte->modifiedCols = bms_add_member(rte->modifiedCols, attno);
@@ -1136,8 +1136,8 @@ BeginCopy(bool is_from,
        cstate = (CopyStateData *) palloc0(sizeof(CopyStateData));
 
        /*
-        * We allocate everything used by a cstate in a new memory context.
-        * This avoids memory leaks during repeated use of COPY in a query.
+        * We allocate everything used by a cstate in a new memory context. This
+        * avoids memory leaks during repeated use of COPY in a query.
         */
        cstate->copycontext = AllocSetContextCreate(CurrentMemoryContext,
                                                                                                "COPY",
@@ -1300,9 +1300,9 @@ BeginCopy(bool is_from,
                cstate->file_encoding = pg_get_client_encoding();
 
        /*
-        * Set up encoding conversion info.  Even if the file and server
-        * encodings are the same, we must apply pg_any_to_server() to validate
-        * data in multibyte encodings.
+        * Set up encoding conversion info.  Even if the file and server encodings
+        * are the same, we must apply pg_any_to_server() to validate data in
+        * multibyte encodings.
         */
        cstate->need_transcoding =
                (cstate->file_encoding != GetDatabaseEncoding() ||
@@ -1552,8 +1552,8 @@ CopyTo(CopyState cstate)
                 */
                if (cstate->need_transcoding)
                        cstate->null_print_client = pg_server_to_any(cstate->null_print,
-                                                                                                                cstate->null_print_len,
-                                                                                                                cstate->file_encoding);
+                                                                                                         cstate->null_print_len,
+                                                                                                         cstate->file_encoding);
 
                /* if a header has been requested send the line */
                if (cstate->header_line)
@@ -2001,9 +2001,9 @@ CopyFrom(CopyState cstate)
                {
                        slot = ExecBRInsertTriggers(estate, resultRelInfo, slot);
 
-                       if (slot == NULL)               /* "do nothing" */
+                       if (slot == NULL)       /* "do nothing" */
                                skip_tuple = true;
-                       else                                    /* trigger might have changed tuple */
+                       else    /* trigger might have changed tuple */
                                tuple = ExecMaterializeSlot(slot);
                }
 
@@ -2159,7 +2159,7 @@ BeginCopyFrom(Relation rel,
                        {
                                /* Initialize expressions in copycontext. */
                                defexprs[num_defaults] = ExecInitExpr(
-                                                               expression_planner((Expr *) defexpr), NULL);
+                                                                expression_planner((Expr *) defexpr), NULL);
                                defmap[num_defaults] = attnum - 1;
                                num_defaults++;
                        }
@@ -2255,7 +2255,7 @@ BeginCopyFrom(Relation rel,
        if (!cstate->binary)
        {
                AttrNumber      attr_count = list_length(cstate->attnumlist);
-               int     nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count;
+               int                     nfields = cstate->file_has_oids ? (attr_count + 1) : attr_count;
 
                cstate->max_fields = nfields;
                cstate->raw_fields = (char **) palloc(nfields * sizeof(char *));
@@ -2291,7 +2291,7 @@ NextCopyFromRawFields(CopyState cstate, char ***fields, int *nfields)
        {
                cstate->cur_lineno++;
                if (CopyReadLine(cstate))
-                       return false;   /* done */
+                       return false;           /* done */
        }
 
        cstate->cur_lineno++;
@@ -2300,9 +2300,9 @@ NextCopyFromRawFields(CopyState cstate, char ***fields, int *nfields)
        done = CopyReadLine(cstate);
 
        /*
-        * EOF at start of line means we're done.  If we see EOF after
-        * some characters, we act as though it was newline followed by
-        * EOF, ie, process the line and then exit loop on next iteration.
+        * EOF at start of line means we're done.  If we see EOF after some
+        * characters, we act as though it was newline followed by EOF, ie,
+        * process the line and then exit loop on next iteration.
         */
        if (done && cstate->line_buf.len == 0)
                return false;
@@ -2341,7 +2341,7 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
        FmgrInfo   *in_functions = cstate->in_functions;
        Oid                *typioparams = cstate->typioparams;
        int                     i;
-       int         nfields;
+       int                     nfields;
        bool            isnull;
        bool            file_has_oids = cstate->file_has_oids;
        int                *defmap = cstate->defmap;
@@ -2456,18 +2456,18 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
                if (fld_count == -1)
                {
                        /*
-                        * Received EOF marker.  In a V3-protocol copy, wait for
-                        * the protocol-level EOF, and complain if it doesn't come
-                        * immediately.  This ensures that we correctly handle
-                        * CopyFail, if client chooses to send that now.
+                        * Received EOF marker.  In a V3-protocol copy, wait for the
+                        * protocol-level EOF, and complain if it doesn't come
+                        * immediately.  This ensures that we correctly handle CopyFail,
+                        * if client chooses to send that now.
                         *
-                        * Note that we MUST NOT try to read more data in an
-                        * old-protocol copy, since there is no protocol-level EOF
-                        * marker then.  We could go either way for copy from file,
-                        * but choose to throw error if there's data after the EOF
-                        * marker, for consistency with the new-protocol case.
+                        * Note that we MUST NOT try to read more data in an old-protocol
+                        * copy, since there is no protocol-level EOF marker then.      We
+                        * could go either way for copy from file, but choose to throw
+                        * error if there's data after the EOF marker, for consistency
+                        * with the new-protocol case.
                         */
-                       char    dummy;
+                       char            dummy;
 
                        if (cstate->copy_dest != COPY_OLD_FE &&
                                CopyGetData(cstate, &dummy, 1, 1) > 0)
@@ -2485,14 +2485,14 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
 
                if (file_has_oids)
                {
-                       Oid             loaded_oid;
+                       Oid                     loaded_oid;
 
                        cstate->cur_attname = "oid";
                        loaded_oid =
                                DatumGetObjectId(CopyReadBinaryAttribute(cstate,
                                                                                                                 0,
-                                                                                                                &cstate->oid_in_function,
-                                                                                                                cstate->oid_typioparam,
+                                                                                                       &cstate->oid_in_function,
+                                                                                                         cstate->oid_typioparam,
                                                                                                                 -1,
                                                                                                                 &isnull));
                        if (isnull || loaded_oid == InvalidOid)
@@ -2524,8 +2524,8 @@ NextCopyFrom(CopyState cstate, ExprContext *econtext,
 
        /*
         * Now compute and insert any defaults available for the columns not
-        * provided by the input data.  Anything not processed here or above
-        * will remain NULL.
+        * provided by the input data.  Anything not processed here or above will
+        * remain NULL.
         */
        for (i = 0; i < num_defaults; i++)
        {
@@ -3023,12 +3023,12 @@ GetDecimalFromHex(char hex)
  * performing de-escaping as needed.
  *
  * The input is in line_buf.  We use attribute_buf to hold the result
- * strings.  cstate->raw_fields[k] is set to point to the k'th attribute 
- * string, or NULL when the input matches the null marker string.  
+ * strings.  cstate->raw_fields[k] is set to point to the k'th attribute
+ * string, or NULL when the input matches the null marker string.
  * This array is expanded as necessary.
  *
- * (Note that the caller cannot check for nulls since the returned 
- * string would be the post-de-escaping equivalent, which may look 
+ * (Note that the caller cannot check for nulls since the returned
+ * string would be the post-de-escaping equivalent, which may look
  * the same as some valid data string.)
  *
  * delim is the column delimiter string (must be just one byte for now).
@@ -3090,8 +3090,8 @@ CopyReadAttributesText(CopyState cstate)
                if (fieldno >= cstate->max_fields)
                {
                        cstate->max_fields *= 2;
-                       cstate->raw_fields = 
-                               repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *));
+                       cstate->raw_fields =
+                               repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *));
                }
 
                /* Remember start of field on both input and output sides */
@@ -3307,8 +3307,8 @@ CopyReadAttributesCSV(CopyState cstate)
                if (fieldno >= cstate->max_fields)
                {
                        cstate->max_fields *= 2;
-                       cstate->raw_fields = 
-                               repalloc(cstate->raw_fields, cstate->max_fields*sizeof(char *));
+                       cstate->raw_fields =
+                               repalloc(cstate->raw_fields, cstate->max_fields * sizeof(char *));
                }
 
                /* Remember start of field on both input and output sides */
index 87d9e545b4fb698424967021036d9168c9e83507..f319eb539c319a386f589b370b6a8d9ccbfe216d 100644 (file)
@@ -680,8 +680,8 @@ createdb(const CreatedbStmt *stmt)
 void
 check_encoding_locale_matches(int encoding, const char *collate, const char *ctype)
 {
-       int ctype_encoding = pg_get_encoding_from_locale(ctype, true);
-       int collate_encoding = pg_get_encoding_from_locale(collate, true);
+       int                     ctype_encoding = pg_get_encoding_from_locale(ctype, true);
+       int                     collate_encoding = pg_get_encoding_from_locale(collate, true);
 
        if (!(ctype_encoding == encoding ||
                  ctype_encoding == PG_SQL_ASCII ||
@@ -1849,10 +1849,10 @@ get_database_oid(const char *dbname, bool missing_ok)
        heap_close(pg_database, AccessShareLock);
 
        if (!OidIsValid(oid) && !missing_ok)
-        ereport(ERROR,
-                (errcode(ERRCODE_UNDEFINED_DATABASE),
-                 errmsg("database \"%s\" does not exist",
-                        dbname)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_UNDEFINED_DATABASE),
+                                errmsg("database \"%s\" does not exist",
+                                               dbname)));
 
        return oid;
 }
index 1d9586f07d44d1a780f990e3cbe192c5b09a4d16..7a361585bddf5a210ba2d7b33309d0fa20fd4a6c 100644 (file)
@@ -59,26 +59,26 @@ static void ExplainNode(PlanState *planstate, List *ancestors,
                        const char *relationship, const char *plan_name,
                        ExplainState *es);
 static void show_plan_tlist(PlanState *planstate, List *ancestors,
-                                                       ExplainState *es);
+                               ExplainState *es);
 static void show_expression(Node *node, const char *qlabel,
                                PlanState *planstate, List *ancestors,
                                bool useprefix, ExplainState *es);
 static void show_qual(List *qual, const char *qlabel,
-                                         PlanState *planstate, List *ancestors,
-                                         bool useprefix, ExplainState *es);
+                 PlanState *planstate, List *ancestors,
+                 bool useprefix, ExplainState *es);
 static void show_scan_qual(List *qual, const char *qlabel,
-                                                  PlanState *planstate, List *ancestors,
-                                                  ExplainState *es);
+                          PlanState *planstate, List *ancestors,
+                          ExplainState *es);
 static void show_upper_qual(List *qual, const char *qlabel,
-                                                       PlanState *planstate, List *ancestors,
-                                                       ExplainState *es);
+                               PlanState *planstate, List *ancestors,
+                               ExplainState *es);
 static void show_sort_keys(SortState *sortstate, List *ancestors,
-                                                  ExplainState *es);
+                          ExplainState *es);
 static void show_merge_append_keys(MergeAppendState *mstate, List *ancestors,
-                                                                  ExplainState *es);
+                                          ExplainState *es);
 static void show_sort_keys_common(PlanState *planstate,
-                                                                 int nkeys, AttrNumber *keycols,
-                                                                 List *ancestors, ExplainState *es);
+                                         int nkeys, AttrNumber *keycols,
+                                         List *ancestors, ExplainState *es);
 static void show_sort_info(SortState *sortstate, ExplainState *es);
 static void show_hash_info(HashState *hashstate, ExplainState *es);
 static void show_foreignscan_info(ForeignScanState *fsstate, ExplainState *es);
@@ -89,7 +89,7 @@ static void ExplainTargetRel(Plan *plan, Index rti, ExplainState *es);
 static void ExplainMemberNodes(List *plans, PlanState **planstates,
                                   List *ancestors, ExplainState *es);
 static void ExplainSubPlans(List *plans, List *ancestors,
-                                                       const char *relationship, ExplainState *es);
+                               const char *relationship, ExplainState *es);
 static void ExplainProperty(const char *qlabel, const char *value,
                                bool numeric, ExplainState *es);
 static void ExplainOpenGroup(const char *objtype, const char *labelname,
@@ -1358,7 +1358,7 @@ show_scan_qual(List *qual, const char *qlabel,
 {
        bool            useprefix;
 
-       useprefix = (IsA(planstate->plan, SubqueryScan) || es->verbose);
+       useprefix = (IsA(planstate->plan, SubqueryScan) ||es->verbose);
        show_qual(qual, qlabel, planstate, ancestors, useprefix, es);
 }
 
index 7c3e8107deec84eb892b03626dd8c1d26f467bf0..d848926ae55ef8ec2b341bd55cea813fc487cdb8 100644 (file)
@@ -56,8 +56,8 @@
 
 
 /* Globally visible state variables */
-bool                   creating_extension = false;
-Oid                            CurrentExtensionObject = InvalidOid;
+bool           creating_extension = false;
+Oid                    CurrentExtensionObject = InvalidOid;
 
 /*
  * Internal data structure to hold the results of parsing a control file
@@ -66,8 +66,8 @@ typedef struct ExtensionControlFile
 {
        char       *name;                       /* name of the extension */
        char       *directory;          /* directory for script files */
-       char       *default_version; /* default install target version, if any */
-       char       *module_pathname; /* string to substitute for MODULE_PATHNAME */
+       char       *default_version;    /* default install target version, if any */
+       char       *module_pathname;    /* string to substitute for MODULE_PATHNAME */
        char       *comment;            /* comment, if any */
        char       *schema;                     /* target schema (allowed if !relocatable) */
        bool            relocatable;    /* is ALTER EXTENSION SET SCHEMA supported? */
@@ -85,9 +85,9 @@ typedef struct ExtensionVersionInfo
        List       *reachable;          /* List of ExtensionVersionInfo's */
        bool            installable;    /* does this version have an install script? */
        /* working state for Dijkstra's algorithm: */
-       bool            distance_known; /* is distance from start known yet? */
+       bool            distance_known; /* is distance from start known yet? */
        int                     distance;               /* current worst-case distance estimate */
-       struct ExtensionVersionInfo *previous; /* current best predecessor */
+       struct ExtensionVersionInfo *previous;          /* current best predecessor */
 } ExtensionVersionInfo;
 
 /* Local functions */
@@ -107,7 +107,7 @@ static void ApplyExtensionUpdates(Oid extensionOid,
 /*
  * get_extension_oid - given an extension name, look up the OID
  *
- * If missing_ok is false, throw an error if extension name not found.  If
+ * If missing_ok is false, throw an error if extension name not found. If
  * true, just return InvalidOid.
  */
 Oid
@@ -142,10 +142,10 @@ get_extension_oid(const char *extname, bool missing_ok)
        heap_close(rel, AccessShareLock);
 
        if (!OidIsValid(result) && !missing_ok)
-        ereport(ERROR,
-                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                 errmsg("extension \"%s\" does not exist",
-                        extname)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_UNDEFINED_OBJECT),
+                                errmsg("extension \"%s\" does not exist",
+                                               extname)));
 
        return result;
 }
@@ -237,8 +237,8 @@ check_valid_extension_name(const char *extensionname)
        int                     namelen = strlen(extensionname);
 
        /*
-        * Disallow empty names (the parser rejects empty identifiers anyway,
-        * but let's check).
+        * Disallow empty names (the parser rejects empty identifiers anyway, but
+        * let's check).
         */
        if (namelen == 0)
                ereport(ERROR,
@@ -256,16 +256,16 @@ check_valid_extension_name(const char *extensionname)
                                 errdetail("Extension names must not contain \"--\".")));
 
        /*
-        * No leading or trailing dash either.  (We could probably allow this,
-        * but it would require much care in filename parsing and would make
-        * filenames visually if not formally ambiguous.  Since there's no
-        * real-world use case, let's just forbid it.)
+        * No leading or trailing dash either.  (We could probably allow this, but
+        * it would require much care in filename parsing and would make filenames
+        * visually if not formally ambiguous.  Since there's no real-world use
+        * case, let's just forbid it.)
         */
        if (extensionname[0] == '-' || extensionname[namelen - 1] == '-')
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("invalid extension name: \"%s\"", extensionname),
-                                errdetail("Extension names must not begin or end with \"-\".")));
+                       errdetail("Extension names must not begin or end with \"-\".")));
 
        /*
         * No directory separators either (this is sufficient to prevent ".."
@@ -290,7 +290,7 @@ check_valid_version_name(const char *versionname)
        if (namelen == 0)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("invalid extension version name: \"%s\"", versionname),
+                          errmsg("invalid extension version name: \"%s\"", versionname),
                                 errdetail("Version names must not be empty.")));
 
        /*
@@ -299,7 +299,7 @@ check_valid_version_name(const char *versionname)
        if (strstr(versionname, "--"))
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("invalid extension version name: \"%s\"", versionname),
+                          errmsg("invalid extension version name: \"%s\"", versionname),
                                 errdetail("Version names must not contain \"--\".")));
 
        /*
@@ -308,8 +308,8 @@ check_valid_version_name(const char *versionname)
        if (versionname[0] == '-' || versionname[namelen - 1] == '-')
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("invalid extension version name: \"%s\"", versionname),
-                                errdetail("Version names must not begin or end with \"-\".")));
+                          errmsg("invalid extension version name: \"%s\"", versionname),
+                         errdetail("Version names must not begin or end with \"-\".")));
 
        /*
         * No directory separators either (this is sufficient to prevent ".."
@@ -318,7 +318,7 @@ check_valid_version_name(const char *versionname)
        if (first_dir_separator(versionname) != NULL)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("invalid extension version name: \"%s\"", versionname),
+                          errmsg("invalid extension version name: \"%s\"", versionname),
                                 errdetail("Version names must not contain directory separator characters.")));
 }
 
@@ -386,7 +386,7 @@ get_extension_script_directory(ExtensionControlFile *control)
 
        get_share_path(my_exec_path, sharepath);
        result = (char *) palloc(MAXPGPATH);
-    snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory);
+       snprintf(result, MAXPGPATH, "%s/%s", sharepath, control->directory);
 
        return result;
 }
@@ -434,7 +434,7 @@ get_extension_script_filename(ExtensionControlFile *control,
 
 /*
  * Parse contents of primary or auxiliary control file, and fill in
- * fields of *control.  We parse primary file if version == NULL,
+ * fields of *control. We parse primary file if version == NULL,
  * else the optional auxiliary file for that version.
  *
  * Control files are supposed to be very short, half a dozen lines,
@@ -448,8 +448,8 @@ parse_extension_control_file(ExtensionControlFile *control,
        char       *filename;
        FILE       *file;
        ConfigVariable *item,
-                                  *head = NULL,
-                                  *tail = NULL;
+                          *head = NULL,
+                          *tail = NULL;
 
        /*
         * Locate the file to read.  Auxiliary files are optional.
@@ -553,8 +553,8 @@ parse_extension_control_file(ExtensionControlFile *control,
                                /* syntax error in name list */
                                ereport(ERROR,
                                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                                errmsg("parameter \"%s\" must be a list of extension names",
-                                                               item->name)));
+                                errmsg("parameter \"%s\" must be a list of extension names",
+                                               item->name)));
                        }
                }
                else
@@ -632,12 +632,12 @@ static char *
 read_extension_script_file(const ExtensionControlFile *control,
                                                   const char *filename)
 {
-       int         src_encoding;
-       int         dest_encoding = GetDatabaseEncoding();
-       bytea      *content;
+       int                     src_encoding;
+       int                     dest_encoding = GetDatabaseEncoding();
+       bytea      *content;
        char       *src_str;
-       char       *dest_str;
-       int         len;
+       char       *dest_str;
+       int                     len;
 
        content = read_binary_file(filename, 0, -1);
 
@@ -675,7 +675,7 @@ read_extension_script_file(const ExtensionControlFile *control,
  * filename is used only to report errors.
  *
  * Note: it's tempting to just use SPI to execute the string, but that does
- * not work very well.  The really serious problem is that SPI will parse,
+ * not work very well. The really serious problem is that SPI will parse,
  * analyze, and plan the whole string before executing any of it; of course
  * this fails if there are any plannable statements referring to objects
  * created earlier in the script.  A lesser annoyance is that SPI insists
@@ -774,7 +774,7 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
                                                 List *requiredSchemas,
                                                 const char *schemaName, Oid schemaOid)
 {
-       char       *filename;
+       char       *filename;
        char       *save_client_min_messages,
                           *save_log_min_messages,
                           *save_search_path;
@@ -809,8 +809,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
         * so that we won't spam the user with useless NOTICE messages from common
         * script actions like creating shell types.
         *
-        * We use the equivalent of SET LOCAL to ensure the setting is undone
-        * upon error.
+        * We use the equivalent of SET LOCAL to ensure the setting is undone upon
+        * error.
         */
        save_client_min_messages =
                pstrdup(GetConfigOption("client_min_messages", false));
@@ -832,8 +832,8 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
         * makes the target schema be the default creation target namespace.
         *
         * Note: it might look tempting to use PushOverrideSearchPath for this,
-        * but we cannot do that.  We have to actually set the search_path GUC
-        * in case the extension script examines or changes it.
+        * but we cannot do that.  We have to actually set the search_path GUC in
+        * case the extension script examines or changes it.
         */
        save_search_path = pstrdup(GetConfigOption("search_path", false));
 
@@ -855,32 +855,32 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
        /*
         * Set creating_extension and related variables so that
         * recordDependencyOnCurrentExtension and other functions do the right
-        * things.  On failure, ensure we reset these variables.
+        * things.      On failure, ensure we reset these variables.
         */
        creating_extension = true;
        CurrentExtensionObject = extensionOid;
        PG_TRY();
        {
-               char *sql = read_extension_script_file(control, filename);
+               char       *sql = read_extension_script_file(control, filename);
 
                /*
                 * If it's not relocatable, substitute the target schema name for
                 * occcurrences of @extschema@.
                 *
-                * For a relocatable extension, we just run the script as-is.
-                * There cannot be any need for @extschema@, else it wouldn't
-                * be relocatable.
+                * For a relocatable extension, we just run the script as-is. There
+                * cannot be any need for @extschema@, else it wouldn't be
+                * relocatable.
                 */
                if (!control->relocatable)
                {
-                       const char   *qSchemaName = quote_identifier(schemaName);
+                       const char *qSchemaName = quote_identifier(schemaName);
 
                        sql = text_to_cstring(
-                               DatumGetTextPP(
-                                       DirectFunctionCall3(replace_text,
-                                                                               CStringGetTextDatum(sql),
-                                                                               CStringGetTextDatum("@extschema@"),
-                                                                               CStringGetTextDatum(qSchemaName))));
+                                                                 DatumGetTextPP(
+                                                                                       DirectFunctionCall3(replace_text,
+                                                                                                       CStringGetTextDatum(sql),
+                                                                                 CStringGetTextDatum("@extschema@"),
+                                                                                CStringGetTextDatum(qSchemaName))));
                }
 
                /*
@@ -890,11 +890,11 @@ execute_extension_script(Oid extensionOid, ExtensionControlFile *control,
                if (control->module_pathname)
                {
                        sql = text_to_cstring(
-                               DatumGetTextPP(
-                                       DirectFunctionCall3(replace_text,
-                                                                               CStringGetTextDatum(sql),
-                                                                               CStringGetTextDatum("MODULE_PATHNAME"),
-                                                                               CStringGetTextDatum(control->module_pathname))));
+                                                                 DatumGetTextPP(
+                                                                                       DirectFunctionCall3(replace_text,
+                                                                                                       CStringGetTextDatum(sql),
+                                                                         CStringGetTextDatum("MODULE_PATHNAME"),
+                                                       CStringGetTextDatum(control->module_pathname))));
                }
 
                execute_sql_string(sql, filename);
@@ -1004,7 +1004,7 @@ get_ext_ver_list(ExtensionControlFile *control)
        struct dirent *de;
 
        location = get_extension_script_directory(control);
-       dir  = AllocateDir(location);
+       dir = AllocateDir(location);
        while ((de = ReadDir(dir, location)) != NULL)
        {
                char       *vername;
@@ -1094,7 +1094,7 @@ identify_update_path(ExtensionControlFile *control,
  * is still good.
  *
  * Result is a List of names of versions to transition through (the initial
- * version is *not* included).  Returns NIL if no such path.
+ * version is *not* included). Returns NIL if no such path.
  */
 static List *
 find_update_path(List *evi_list,
@@ -1132,7 +1132,7 @@ find_update_path(List *evi_list,
                foreach(lc, evi->reachable)
                {
                        ExtensionVersionInfo *evi2 = (ExtensionVersionInfo *) lfirst(lc);
-                       int             newdist;
+                       int                     newdist;
 
                        newdist = evi->distance + 1;
                        if (newdist < evi2->distance)
@@ -1178,10 +1178,10 @@ CreateExtension(CreateExtensionStmt *stmt)
        DefElem    *d_schema = NULL;
        DefElem    *d_new_version = NULL;
        DefElem    *d_old_version = NULL;
-       char       *schemaName;
+       char       *schemaName;
        Oid                     schemaOid;
-       char       *versionName;
-       char       *oldVersionName;
+       char       *versionName;
+       char       *oldVersionName;
        Oid                     extowner = GetUserId();
        ExtensionControlFile *pcontrol;
        ExtensionControlFile *control;
@@ -1195,10 +1195,10 @@ CreateExtension(CreateExtensionStmt *stmt)
        check_valid_extension_name(stmt->extname);
 
        /*
-        * Check for duplicate extension name.  The unique index on
+        * Check for duplicate extension name.  The unique index on
         * pg_extension.extname would catch this anyway, and serves as a backstop
-        * in case of race conditions; but this is a friendlier error message,
-        * and besides we need a check to support IF NOT EXISTS.
+        * in case of race conditions; but this is a friendlier error message, and
+        * besides we need a check to support IF NOT EXISTS.
         */
        if (get_extension_oid(stmt->extname, true) != InvalidOid)
        {
@@ -1218,8 +1218,8 @@ CreateExtension(CreateExtensionStmt *stmt)
        }
 
        /*
-        * We use global variables to track the extension being created, so we
-        * can create only one extension at the same time.
+        * We use global variables to track the extension being created, so we can
+        * create only one extension at the same time.
         */
        if (creating_extension)
                ereport(ERROR,
@@ -1306,8 +1306,8 @@ CreateExtension(CreateExtensionStmt *stmt)
                if (list_length(updateVersions) == 1)
                {
                        /*
-                        * Simple case where there's just one update script to run.
-                        * We will not need any follow-on update steps.
+                        * Simple case where there's just one update script to run. We
+                        * will not need any follow-on update steps.
                         */
                        Assert(strcmp((char *) linitial(updateVersions), versionName) == 0);
                        updateVersions = NIL;
@@ -1351,9 +1351,9 @@ CreateExtension(CreateExtensionStmt *stmt)
                        strcmp(control->schema, schemaName) != 0)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                        errmsg("extension \"%s\" must be installed in schema \"%s\"",
-                                                       control->name,
-                                                       control->schema)));
+                               errmsg("extension \"%s\" must be installed in schema \"%s\"",
+                                          control->name,
+                                          control->schema)));
 
                /* If the user is giving us the schema name, it must exist already */
                schemaOid = get_namespace_oid(schemaName, false);
@@ -1362,7 +1362,7 @@ CreateExtension(CreateExtensionStmt *stmt)
        {
                /*
                 * The extension is not relocatable and the author gave us a schema
-                * for it.  We create the schema here if it does not already exist.
+                * for it.      We create the schema here if it does not already exist.
                 */
                schemaName = control->schema;
                schemaOid = get_namespace_oid(schemaName, true);
@@ -1380,13 +1380,13 @@ CreateExtension(CreateExtensionStmt *stmt)
                 * Else, use the current default creation namespace, which is the
                 * first explicit entry in the search_path.
                 */
-               List *search_path = fetch_search_path(false);
+               List       *search_path = fetch_search_path(false);
 
-               if (search_path == NIL)                         /* probably can't happen */
+               if (search_path == NIL) /* probably can't happen */
                        elog(ERROR, "there is no default creation target");
                schemaOid = linitial_oid(search_path);
                schemaName = get_namespace_name(schemaOid);
-               if (schemaName == NULL)                         /* recently-deleted namespace? */
+               if (schemaName == NULL) /* recently-deleted namespace? */
                        elog(ERROR, "there is no default creation target");
 
                list_free(search_path);
@@ -1397,13 +1397,13 @@ CreateExtension(CreateExtensionStmt *stmt)
         * extension script actually creates any objects there, it will fail if
         * the user doesn't have such permissions.  But there are cases such as
         * procedural languages where it's convenient to set schema = pg_catalog
-        * yet we don't want to restrict the command to users with ACL_CREATE
-        * for pg_catalog.
+        * yet we don't want to restrict the command to users with ACL_CREATE for
+        * pg_catalog.
         */
 
        /*
-        * Look up the prerequisite extensions, and build lists of their OIDs
-        * and the OIDs of their target schemas.
+        * Look up the prerequisite extensions, and build lists of their OIDs and
+        * the OIDs of their target schemas.
         */
        requiredExtensions = NIL;
        requiredSchemas = NIL;
@@ -1453,8 +1453,8 @@ CreateExtension(CreateExtensionStmt *stmt)
                                                         schemaName, schemaOid);
 
        /*
-        * If additional update scripts have to be executed, apply the updates
-        * as though a series of ALTER EXTENSION UPDATE commands were given
+        * If additional update scripts have to be executed, apply the updates as
+        * though a series of ALTER EXTENSION UPDATE commands were given
         */
        ApplyExtensionUpdates(extensionOid, pcontrol,
                                                  versionName, updateVersions);
@@ -1653,7 +1653,7 @@ RemoveExtensionById(Oid extId)
 
 /*
  * This function lists the available extensions (one row per primary control
- * file in the control directory).  We parse each control file and report the
+ * file in the control directory).     We parse each control file and report the
  * interesting fields.
  *
  * The system view pg_available_extensions provides a user interface to this
@@ -1663,14 +1663,14 @@ RemoveExtensionById(Oid extId)
 Datum
 pg_available_extensions(PG_FUNCTION_ARGS)
 {
-       ReturnSetInfo      *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       TupleDesc                       tupdesc;
-       Tuplestorestate    *tupstore;
-       MemoryContext           per_query_ctx;
-       MemoryContext           oldcontext;
-       char                       *location;
-       DIR                                *dir;
-       struct dirent      *de;
+       ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+       TupleDesc       tupdesc;
+       Tuplestorestate *tupstore;
+       MemoryContext per_query_ctx;
+       MemoryContext oldcontext;
+       char       *location;
+       DIR                *dir;
+       struct dirent *de;
 
        /* check to see if caller supports us returning a tuplestore */
        if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -1699,11 +1699,11 @@ pg_available_extensions(PG_FUNCTION_ARGS)
        MemoryContextSwitchTo(oldcontext);
 
        location = get_extension_control_directory();
-       dir  = AllocateDir(location);
+       dir = AllocateDir(location);
 
        /*
-        * If the control directory doesn't exist, we want to silently return
-        * an empty set.  Any other error will be reported by ReadDir.
+        * If the control directory doesn't exist, we want to silently return an
+        * empty set.  Any other error will be reported by ReadDir.
         */
        if (dir == NULL && errno == ENOENT)
        {
@@ -1762,7 +1762,7 @@ pg_available_extensions(PG_FUNCTION_ARGS)
 
 /*
  * This function lists the available extension versions (one row per
- * extension installation script).  For each version, we parse the related
+ * extension installation script).     For each version, we parse the related
  * control file(s) and report the interesting fields.
  *
  * The system view pg_available_extension_versions provides a user interface
@@ -1772,14 +1772,14 @@ pg_available_extensions(PG_FUNCTION_ARGS)
 Datum
 pg_available_extension_versions(PG_FUNCTION_ARGS)
 {
-       ReturnSetInfo      *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       TupleDesc                       tupdesc;
-       Tuplestorestate    *tupstore;
-       MemoryContext           per_query_ctx;
-       MemoryContext           oldcontext;
-       char                       *location;
-       DIR                                *dir;
-       struct dirent      *de;
+       ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+       TupleDesc       tupdesc;
+       Tuplestorestate *tupstore;
+       MemoryContext per_query_ctx;
+       MemoryContext oldcontext;
+       char       *location;
+       DIR                *dir;
+       struct dirent *de;
 
        /* check to see if caller supports us returning a tuplestore */
        if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -1808,11 +1808,11 @@ pg_available_extension_versions(PG_FUNCTION_ARGS)
        MemoryContextSwitchTo(oldcontext);
 
        location = get_extension_control_directory();
-       dir  = AllocateDir(location);
+       dir = AllocateDir(location);
 
        /*
-        * If the control directory doesn't exist, we want to silently return
-        * an empty set.  Any other error will be reported by ReadDir.
+        * If the control directory doesn't exist, we want to silently return an
+        * empty set.  Any other error will be reported by ReadDir.
         */
        if (dir == NULL && errno == ENOENT)
        {
@@ -1867,7 +1867,7 @@ get_available_versions_for_extension(ExtensionControlFile *pcontrol,
        struct dirent *de;
 
        location = get_extension_script_directory(pcontrol);
-       dir  = AllocateDir(location);
+       dir = AllocateDir(location);
        /* Note this will fail if script directory doesn't exist */
        while ((de = ReadDir(dir, location)) != NULL)
        {
@@ -1962,11 +1962,11 @@ Datum
 pg_extension_update_paths(PG_FUNCTION_ARGS)
 {
        Name            extname = PG_GETARG_NAME(0);
-       ReturnSetInfo      *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
-       TupleDesc                       tupdesc;
-       Tuplestorestate    *tupstore;
-       MemoryContext           per_query_ctx;
-       MemoryContext           oldcontext;
+       ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+       TupleDesc       tupdesc;
+       Tuplestorestate *tupstore;
+       MemoryContext per_query_ctx;
+       MemoryContext oldcontext;
        List       *evi_list;
        ExtensionControlFile *control;
        ListCell   *lc1;
@@ -2079,8 +2079,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
        text       *wherecond = PG_GETARG_TEXT_P(1);
        char       *tablename;
        Relation        extRel;
-       ScanKeyData     key[1];
-       SysScanDesc     extScan;
+       ScanKeyData key[1];
+       SysScanDesc extScan;
        HeapTuple       extTup;
        Datum           arrayDatum;
        Datum           elementDatum;
@@ -2092,8 +2092,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
        ArrayType  *a;
 
        /*
-        * We only allow this to be called from an extension's SQL script.
-        * We shouldn't need any permissions check beyond that.
+        * We only allow this to be called from an extension's SQL script. We
+        * shouldn't need any permissions check beyond that.
         */
        if (!creating_extension)
                ereport(ERROR,
@@ -2103,8 +2103,8 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
 
        /*
         * Check that the table exists and is a member of the extension being
-        * created.  This ensures that we don't need to register a dependency
-        * to protect the extconfig entry.
+        * created.  This ensures that we don't need to register a dependency to
+        * protect the extconfig entry.
         */
        tablename = get_rel_name(tableoid);
        if (tablename == NULL)
@@ -2115,12 +2115,12 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
                CurrentExtensionObject)
                ereport(ERROR,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                errmsg("table \"%s\" is not a member of the extension being created",
-                                               tablename)));
+               errmsg("table \"%s\" is not a member of the extension being created",
+                          tablename)));
 
        /*
-        * Add the table OID and WHERE condition to the extension's extconfig
-        * and extcondition arrays.
+        * Add the table OID and WHERE condition to the extension's extconfig and
+        * extcondition arrays.
         */
 
        /* Find the pg_extension tuple */
@@ -2136,7 +2136,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
 
        extTup = systable_getnext(extScan);
 
-       if (!HeapTupleIsValid(extTup)) /* should not happen */
+       if (!HeapTupleIsValid(extTup))          /* should not happen */
                elog(ERROR, "extension with oid %u does not exist",
                         CurrentExtensionObject);
 
@@ -2162,7 +2162,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
                Assert(ARR_NDIM(a) == 1);
                Assert(ARR_LBOUND(a)[0] == 1);
 
-               arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
+               arrayIndex = ARR_DIMS(a)[0] + 1;                /* add after end */
 
                a = array_set(a, 1, &arrayIndex,
                                          elementDatum,
@@ -2193,7 +2193,7 @@ pg_extension_config_dump(PG_FUNCTION_ARGS)
                Assert(ARR_NDIM(a) == 1);
                Assert(ARR_LBOUND(a)[0] == 1);
 
-               arrayIndex = ARR_DIMS(a)[0] + 1; /* add after end */
+               arrayIndex = ARR_DIMS(a)[0] + 1;                /* add after end */
 
                a = array_set(a, 1, &arrayIndex,
                                          elementDatum,
@@ -2231,12 +2231,12 @@ AlterExtensionNamespace(List *names, const char *newschema)
        Oid                     oldNspOid = InvalidOid;
        AclResult       aclresult;
        Relation        extRel;
-       ScanKeyData     key[2];
-       SysScanDesc     extScan;
+       ScanKeyData key[2];
+       SysScanDesc extScan;
        HeapTuple       extTup;
        Form_pg_extension extForm;
        Relation        depRel;
-       SysScanDesc     depScan;
+       SysScanDesc depScan;
        HeapTuple       depTup;
 
        if (list_length(names) != 1)
@@ -2275,7 +2275,7 @@ AlterExtensionNamespace(List *names, const char *newschema)
 
        extTup = systable_getnext(extScan);
 
-       if (!HeapTupleIsValid(extTup)) /* should not happen */
+       if (!HeapTupleIsValid(extTup))          /* should not happen */
                elog(ERROR, "extension with oid %u does not exist", extensionOid);
 
        /* Copy tuple so we can modify it below */
@@ -2285,8 +2285,8 @@ AlterExtensionNamespace(List *names, const char *newschema)
        systable_endscan(extScan);
 
        /*
-        * If the extension is already in the target schema, just silently
-        * do nothing.
+        * If the extension is already in the target schema, just silently do
+        * nothing.
         */
        if (extForm->extnamespace == nspOid)
        {
@@ -2323,10 +2323,10 @@ AlterExtensionNamespace(List *names, const char *newschema)
        {
                Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
                ObjectAddress dep;
-               Oid dep_oldNspOid;
+               Oid                     dep_oldNspOid;
 
                /*
-                * Ignore non-membership dependencies.  (Currently, the only other
+                * Ignore non-membership dependencies.  (Currently, the only other
                 * case we could see here is a normal dependency from another
                 * extension.)
                 */
@@ -2388,13 +2388,13 @@ void
 ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
 {
        DefElem    *d_new_version = NULL;
-       char       *versionName;
-       char       *oldVersionName;
+       char       *versionName;
+       char       *oldVersionName;
        ExtensionControlFile *control;
        Oid                     extensionOid;
        Relation        extRel;
-       ScanKeyData     key[1];
-       SysScanDesc     extScan;
+       ScanKeyData key[1];
+       SysScanDesc extScan;
        HeapTuple       extTup;
        List       *updateVersions;
        Datum           datum;
@@ -2402,8 +2402,8 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
        ListCell   *lc;
 
        /*
-        * We use global variables to track the extension being created, so we
-        * can create/update only one extension at the same time.
+        * We use global variables to track the extension being created, so we can
+        * create/update only one extension at the same time.
         */
        if (creating_extension)
                ereport(ERROR,
@@ -2426,10 +2426,10 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
        extTup = systable_getnext(extScan);
 
        if (!HeapTupleIsValid(extTup))
-        ereport(ERROR,
-                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                 errmsg("extension \"%s\" does not exist",
-                        stmt->extname)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_UNDEFINED_OBJECT),
+                                errmsg("extension \"%s\" does not exist",
+                                               stmt->extname)));
 
        extensionOid = HeapTupleGetOid(extTup);
 
@@ -2499,8 +2499,8 @@ ExecAlterExtensionStmt(AlterExtensionStmt *stmt)
        if (strcmp(oldVersionName, versionName) == 0)
        {
                ereport(NOTICE,
-                               (errmsg("version \"%s\" of extension \"%s\" is already installed",
-                                               versionName, stmt->extname)));
+                  (errmsg("version \"%s\" of extension \"%s\" is already installed",
+                                  versionName, stmt->extname)));
                return;
        }
 
@@ -2545,8 +2545,8 @@ ApplyExtensionUpdates(Oid extensionOid,
                List       *requiredExtensions;
                List       *requiredSchemas;
                Relation        extRel;
-               ScanKeyData     key[1];
-               SysScanDesc     extScan;
+               ScanKeyData key[1];
+               SysScanDesc extScan;
                HeapTuple       extTup;
                Form_pg_extension extForm;
                Datum           values[Natts_pg_extension];
@@ -2573,7 +2573,7 @@ ApplyExtensionUpdates(Oid extensionOid,
 
                extTup = systable_getnext(extScan);
 
-               if (!HeapTupleIsValid(extTup)) /* should not happen */
+               if (!HeapTupleIsValid(extTup))  /* should not happen */
                        elog(ERROR, "extension with oid %u does not exist",
                                 extensionOid);
 
@@ -2668,9 +2668,9 @@ ApplyExtensionUpdates(Oid extensionOid,
                                                                 schemaName, schemaOid);
 
                /*
-                * Update prior-version name and loop around.  Since execute_sql_string
-                * did a final CommandCounterIncrement, we can update the pg_extension
-                * row again.
+                * Update prior-version name and loop around.  Since
+                * execute_sql_string did a final CommandCounterIncrement, we can
+                * update the pg_extension row again.
                 */
                oldVersionName = versionName;
        }
@@ -2682,10 +2682,10 @@ ApplyExtensionUpdates(Oid extensionOid,
 void
 ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
 {
-       ObjectAddress   extension;
-       ObjectAddress   object;
-       Relation                relation;
-       Oid                             oldExtension;
+       ObjectAddress extension;
+       ObjectAddress object;
+       Relation        relation;
+       Oid                     oldExtension;
 
        extension.classId = ExtensionRelationId;
        extension.objectId = get_extension_oid(stmt->extname, false);
@@ -2697,10 +2697,10 @@ ExecAlterExtensionContentsStmt(AlterExtensionContentsStmt *stmt)
                                           stmt->extname);
 
        /*
-        * Translate the parser representation that identifies the object into
-        * an ObjectAddress.  get_object_address() will throw an error if the
-        * object does not exist, and will also acquire a lock on the object to
-        * guard against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
+        * Translate the parser representation that identifies the object into an
+        * ObjectAddress.  get_object_address() will throw an error if the object
+        * does not exist, and will also acquire a lock on the object to guard
+        * against concurrent DROP and ALTER EXTENSION ADD/DROP operations.
         */
        object = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
                                                                &relation, ShareUpdateExclusiveLock);
index 13d6d882f8852989163e04c759bf71410c4977a0..21d52e06ba035fa095ce029e0fc5f49970c2d3ff 100644 (file)
@@ -586,8 +586,8 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt)
                 */
                if (OidIsValid(fdwvalidator))
                        ereport(WARNING,
-                                       (errmsg("changing the foreign-data wrapper validator can cause "
-                                                       "the options for dependent objects to become invalid")));
+                        (errmsg("changing the foreign-data wrapper validator can cause "
+                                        "the options for dependent objects to become invalid")));
        }
        else
        {
@@ -643,8 +643,8 @@ AlterForeignDataWrapper(AlterFdwStmt *stmt)
                ObjectAddress referenced;
 
                /*
-                * Flush all existing dependency records of this FDW on functions;
-                * we assume there can be none other than the ones we are fixing.
+                * Flush all existing dependency records of this FDW on functions; we
+                * assume there can be none other than the ones we are fixing.
                 */
                deleteDependencyRecordsForClass(ForeignDataWrapperRelationId,
                                                                                fdwId,
index c8cbe035f057e7103a1d0608b3a68c55f7f7fa66..03da168ff2c14d1ef1e414f7481ba7e72cd377b5 100644 (file)
@@ -1665,7 +1665,7 @@ CreateCast(CreateCastStmt *stmt)
                 * We also disallow creating binary-compatibility casts involving
                 * domains.  Casting from a domain to its base type is already
                 * allowed, and casting the other way ought to go through domain
-                * coercion to permit constraint checking.  Again, if you're intent on
+                * coercion to permit constraint checking.      Again, if you're intent on
                 * having your own semantics for that, create a no-op cast function.
                 *
                 * NOTE: if we were to relax this, the above checks for composites
@@ -1830,7 +1830,7 @@ DropCast(DropCastStmt *stmt)
 Oid
 get_cast_oid(Oid sourcetypeid, Oid targettypeid, bool missing_ok)
 {
-       Oid             oid;
+       Oid                     oid;
 
        oid = GetSysCacheOid2(CASTSOURCETARGET,
                                                  ObjectIdGetDatum(sourcetypeid),
index cfcce559675a177d9f9afb0f51c44797c506194e..05e8234a0f223c4616cdcd90a2d746c260d1d497 100644 (file)
@@ -395,7 +395,7 @@ DefineIndex(RangeVar *heapRelation,
        indexRelationId =
                index_create(rel, indexRelationName, indexRelationId,
                                         indexInfo, indexColNames,
-                                        accessMethodId, tablespaceId, collationObjectId, classObjectId,
+                         accessMethodId, tablespaceId, collationObjectId, classObjectId,
                                         coloptions, reloptions, primary,
                                         isconstraint, deferrable, initdeferred,
                                         allowSystemTableMods,
@@ -840,14 +840,14 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
                else
                {
                        /* Index expression */
-                       Node   *expr = attribute->expr;
+                       Node       *expr = attribute->expr;
 
                        Assert(expr != NULL);
                        atttype = exprType(expr);
                        attcollation = exprCollation(expr);
 
                        /*
-                        * Strip any top-level COLLATE clause.  This ensures that we treat
+                        * Strip any top-level COLLATE clause.  This ensures that we treat
                         * "x COLLATE y" and "(x COLLATE y)" alike.
                         */
                        while (IsA(expr, CollateExpr))
@@ -864,7 +864,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
                        }
                        else
                        {
-                               indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+                               indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
                                indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
                                                                                                        expr);
 
@@ -876,7 +876,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
                                if (contain_subplans(expr))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                        errmsg("cannot use subquery in index expression")));
+                                                errmsg("cannot use subquery in index expression")));
                                if (contain_agg_clause(expr))
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_GROUPING_ERROR),
@@ -904,8 +904,8 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
                /*
                 * Check we have a collation iff it's a collatable type.  The only
                 * expected failures here are (1) COLLATE applied to a noncollatable
-                * type, or (2) index expression had an unresolved collation.  But
-                * we might as well code this to be a complete consistency check.
+                * type, or (2) index expression had an unresolved collation.  But we
+                * might as well code this to be a complete consistency check.
                 */
                if (type_is_collatable(atttype))
                {
index 68072dd42184de43b18143d333cc6196eaf0fdf8..aff5ac6ec43ba15e02ac7450a8e76af3b5f25751 100644 (file)
@@ -126,7 +126,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok)
 
        if (!HeapTupleIsValid(htup) && !missing_ok)
        {
-               HeapTuple amtup;
+               HeapTuple       amtup;
 
                amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID));
                if (!HeapTupleIsValid(amtup))
@@ -134,8 +134,8 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok)
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
                                 errmsg("operator family \"%s\" does not exist for access method \"%s\"",
-                                  NameListToString(opfamilyname),
-                                  NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname))));
+                                               NameListToString(opfamilyname),
+                                               NameStr(((Form_pg_am) GETSTRUCT(amtup))->amname))));
        }
 
        return htup;
@@ -143,7 +143,7 @@ OpFamilyCacheLookup(Oid amID, List *opfamilyname, bool missing_ok)
 
 /*
  * get_opfamily_oid
- *    find an opfamily OID by possibly qualified name
+ *       find an opfamily OID by possibly qualified name
  *
  * If not found, returns InvalidOid if missing_ok, else throws error.
  */
@@ -202,7 +202,7 @@ OpClassCacheLookup(Oid amID, List *opclassname, bool missing_ok)
 
        if (!HeapTupleIsValid(htup) && !missing_ok)
        {
-               HeapTuple amtup;
+               HeapTuple       amtup;
 
                amtup = SearchSysCache1(AMOID, ObjectIdGetDatum(amID));
                if (!HeapTupleIsValid(amtup))
@@ -219,7 +219,7 @@ OpClassCacheLookup(Oid amID, List *opclassname, bool missing_ok)
 
 /*
  * get_opclass_oid
- *    find an opclass OID by possibly qualified name
+ *       find an opclass OID by possibly qualified name
  *
  * If not found, returns InvalidOid if missing_ok, else throws error.
  */
@@ -1088,11 +1088,11 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
        if (OidIsValid(member->sortfamily))
        {
                /*
-                * Ordering op, check index supports that.  (We could perhaps also
+                * Ordering op, check index supports that.      (We could perhaps also
                 * check that the operator returns a type supported by the sortfamily,
                 * but that seems more trouble than it's worth here.  If it does not,
-                * the operator will never be matchable to any ORDER BY clause, but
-                * no worse consequences can ensue.  Also, trying to check that would
+                * the operator will never be matchable to any ORDER BY clause, but no
+                * worse consequences can ensue.  Also, trying to check that would
                 * create an ordering hazard during dump/reload: it's possible that
                 * the family has been created but not yet populated with the required
                 * operators.)
@@ -1108,8 +1108,8 @@ assignOperTypes(OpFamilyMember *member, Oid amoid, Oid typeoid)
                if (!pg_am->amcanorderbyop)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
-                                        errmsg("access method \"%s\" does not support ordering operators",
-                                                       NameStr(pg_am->amname))));
+                       errmsg("access method \"%s\" does not support ordering operators",
+                                  NameStr(pg_am->amname))));
 
                ReleaseSysCache(amtup);
        }
@@ -1276,7 +1276,7 @@ storeOperators(List *opfamilyname, Oid amoid,
        foreach(l, operators)
        {
                OpFamilyMember *op = (OpFamilyMember *) lfirst(l);
-               char    oppurpose;
+               char            oppurpose;
 
                /*
                 * If adding to an existing family, check for conflict with an
@@ -1566,7 +1566,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
        {
                ereport(NOTICE,
                                (errmsg("operator class \"%s\" does not exist for access method \"%s\"",
-                                       NameListToString(stmt->opclassname), stmt->amname)));
+                                               NameListToString(stmt->opclassname), stmt->amname)));
                return;
        }
 
@@ -1617,7 +1617,7 @@ RemoveOpFamily(RemoveOpFamilyStmt *stmt)
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
                                 errmsg("operator family \"%s\" does not exist for access method \"%s\"",
-                                  NameListToString(stmt->opfamilyname), stmt->amname)));
+                                               NameListToString(stmt->opfamilyname), stmt->amname)));
                return;
        }
 
@@ -2029,7 +2029,7 @@ AlterOpClassNamespace(List *name, char *access_method, const char *newschema)
 Oid
 AlterOpClassNamespace_oid(Oid opclassOid, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
@@ -2238,7 +2238,7 @@ AlterOpFamilyNamespace(List *name, char *access_method, const char *newschema)
 Oid
 AlterOpFamilyNamespace_oid(Oid opfamilyOid, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(OperatorFamilyRelationId, RowExclusiveLock);
index b4374a62f4f0830bfb7b91b8326b314584260884..c99de4b240f6a88de5a437e74161e3598d854138 100644 (file)
@@ -464,7 +464,8 @@ AlterOperatorNamespace(List *names, List *argtypes, const char *newschema)
        List       *operatorName = names;
        TypeName   *typeName1 = (TypeName *) linitial(argtypes);
        TypeName   *typeName2 = (TypeName *) lsecond(argtypes);
-       Oid                     operOid, nspOid;
+       Oid                     operOid,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(OperatorRelationId, RowExclusiveLock);
@@ -490,7 +491,7 @@ AlterOperatorNamespace(List *names, List *argtypes, const char *newschema)
 Oid
 AlterOperatorNamespace_oid(Oid operOid, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(OperatorRelationId, RowExclusiveLock);
index 60aca3ce8ec21999ced1b5605afcfe78516a355f..89086aa371738620f049993a328525bc88209a65 100644 (file)
@@ -255,10 +255,10 @@ PortalCleanup(Portal portal)
        if (queryDesc)
        {
                /*
-                * Reset the queryDesc before anything else.  This prevents us
-                * from trying to shut down the executor twice, in case of an
-                * error below.  The transaction abort mechanisms will take care
-                * of resource cleanup in such a case.
+                * Reset the queryDesc before anything else.  This prevents us from
+                * trying to shut down the executor twice, in case of an error below.
+                * The transaction abort mechanisms will take care of resource cleanup
+                * in such a case.
                 */
                portal->queryDesc = NULL;
 
index adbf5872f38dc32f4bd8540b1b3fa0a89c65c9d2..dfa2ab00262222a2ff9974c53bad751f5ca82959 100644 (file)
@@ -382,7 +382,7 @@ EvaluateParams(PreparedStatement *pstmt, List *params,
        /* sizeof(ParamListInfoData) includes the first array element */
        paramLI = (ParamListInfo)
                palloc(sizeof(ParamListInfoData) +
-                          (num_params - 1) *sizeof(ParamExternData));
+                          (num_params - 1) * sizeof(ParamExternData));
        /* we have static list of params, so no hooks needed */
        paramLI->paramFetch = NULL;
        paramLI->paramFetchArg = NULL;
index 1c96b005d7f5f507bccd748da96ab6911a8f532b..7afb7139a6315347a02dfe90d8eb97a1c9ba0d22 100644 (file)
@@ -1,7 +1,7 @@
 /* -------------------------------------------------------------------------
  *
  * seclabel.c
- *    routines to support security label feature.
+ *       routines to support security label feature.
  *
  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -28,7 +28,7 @@
 typedef struct
 {
        const char *provider_name;
-       check_object_relabel_type       hook;
+       check_object_relabel_type hook;
 } LabelProvider;
 
 static List *label_provider_list = NIL;
@@ -42,9 +42,9 @@ void
 ExecSecLabelStmt(SecLabelStmt *stmt)
 {
        LabelProvider *provider = NULL;
-       ObjectAddress   address;
-       Relation                relation;
-       ListCell           *lc;
+       ObjectAddress address;
+       Relation        relation;
+       ListCell   *lc;
 
        /*
         * Find the named label provider, or if none specified, check whether
@@ -55,16 +55,16 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
                if (label_provider_list == NIL)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("no security label providers have been loaded")));
+                                        errmsg("no security label providers have been loaded")));
                if (lnext(list_head(label_provider_list)) != NULL)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-                                errmsg("must specify provider when multiple security label providers have been loaded")));
+                                        errmsg("must specify provider when multiple security label providers have been loaded")));
                provider = (LabelProvider *) linitial(label_provider_list);
        }
        else
        {
-               foreach (lc, label_provider_list)
+               foreach(lc, label_provider_list)
                {
                        LabelProvider *lp = lfirst(lc);
 
@@ -82,10 +82,10 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
        }
 
        /*
-        * Translate the parser representation which identifies this object
-        * into an ObjectAddress. get_object_address() will throw an error if
-     * the object does not exist, and will also acquire a lock on the
-     * target to guard against concurrent modifications.
+        * Translate the parser representation which identifies this object into
+        * an ObjectAddress. get_object_address() will throw an error if the
+        * object does not exist, and will also acquire a lock on the target to
+        * guard against concurrent modifications.
         */
        address = get_object_address(stmt->objtype, stmt->objname, stmt->objargs,
                                                                 &relation, ShareUpdateExclusiveLock);
@@ -98,6 +98,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
        switch (stmt->objtype)
        {
                case OBJECT_COLUMN:
+
                        /*
                         * Allow security labels only on columns of tables, views,
                         * composite types, and foreign tables (which are the only
@@ -117,7 +118,7 @@ ExecSecLabelStmt(SecLabelStmt *stmt)
        }
 
        /* Provider gets control here, may throw ERROR to veto new label. */
-       (*provider->hook)(&address, stmt->label);
+       (*provider->hook) (&address, stmt->label);
 
        /* Apply new label. */
        SetSecurityLabel(&address, provider->provider_name, stmt->label);
@@ -140,8 +141,8 @@ char *
 GetSecurityLabel(const ObjectAddress *object, const char *provider)
 {
        Relation        pg_seclabel;
-       ScanKeyData     keys[4];
-       SysScanDesc     scan;
+       ScanKeyData keys[4];
+       SysScanDesc scan;
        HeapTuple       tuple;
        Datum           datum;
        bool            isnull;
@@ -196,8 +197,8 @@ SetSecurityLabel(const ObjectAddress *object,
                                 const char *provider, const char *label)
 {
        Relation        pg_seclabel;
-       ScanKeyData     keys[4];
-       SysScanDesc     scan;
+       ScanKeyData keys[4];
+       SysScanDesc scan;
        HeapTuple       oldtup;
        HeapTuple       newtup = NULL;
        Datum           values[Natts_pg_seclabel];
@@ -281,8 +282,8 @@ void
 DeleteSecurityLabel(const ObjectAddress *object)
 {
        Relation        pg_seclabel;
-       ScanKeyData     skey[3];
-       SysScanDesc     scan;
+       ScanKeyData skey[3];
+       SysScanDesc scan;
        HeapTuple       oldtup;
        int                     nkeys;
 
@@ -323,8 +324,8 @@ DeleteSecurityLabel(const ObjectAddress *object)
 void
 register_label_provider(const char *provider_name, check_object_relabel_type hook)
 {
-       LabelProvider  *provider;
-       MemoryContext   oldcxt;
+       LabelProvider *provider;
+       MemoryContext oldcxt;
 
        oldcxt = MemoryContextSwitchTo(TopMemoryContext);
        provider = palloc(sizeof(LabelProvider));
index bfa94a0c1149b740bfd648617fe69c37da9a1523..6a91a102dcd3f4e4d3ceca558c59ed1247bae831 100644 (file)
@@ -287,7 +287,7 @@ ResetSequence(Oid seq_relid)
        seq->log_cnt = 1;
 
        /*
-        * Create a new storage file for the sequence.  We want to keep the
+        * Create a new storage file for the sequence.  We want to keep the
         * sequence's relfrozenxid at 0, since it won't contain any unfrozen XIDs.
         */
        RelationSetNewRelfilenode(seq_rel, InvalidTransactionId);
@@ -1037,7 +1037,7 @@ init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
 
        /*
         * If the sequence has been transactionally replaced since we last saw it,
-        * discard any cached-but-unissued values.  We do not touch the currval()
+        * discard any cached-but-unissued values.      We do not touch the currval()
         * state, however.
         */
        if (seqrel->rd_rel->relfilenode != elm->filenode)
index 886b656b4376e51b920714b24ce983c9d4342a26..790bc2a521555627029d7250cca81db9aeb25e67 100644 (file)
@@ -286,9 +286,9 @@ static void ATWrongRelkindError(Relation rel, int allowed_targets);
 static void ATSimpleRecursion(List **wqueue, Relation rel,
                                  AlterTableCmd *cmd, bool recurse, LOCKMODE lockmode);
 static void ATTypedTableRecursion(List **wqueue, Relation rel, AlterTableCmd *cmd,
-                                                                 LOCKMODE lockmode);
+                                         LOCKMODE lockmode);
 static List *find_typed_table_dependencies(Oid typeOid, const char *typeName,
-                                                                                  DropBehavior behavior);
+                                                         DropBehavior behavior);
 static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse, bool recursing,
                                AlterTableCmd *cmd, LOCKMODE lockmode);
 static void ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
@@ -311,7 +311,7 @@ static void ATExecSetOptions(Relation rel, const char *colName,
 static void ATExecSetStorage(Relation rel, const char *colName,
                                 Node *newValue, LOCKMODE lockmode);
 static void ATPrepDropColumn(List **wqueue, Relation rel, bool recurse, bool recursing,
-                                                        AlterTableCmd *cmd, LOCKMODE lockmode);
+                                AlterTableCmd *cmd, LOCKMODE lockmode);
 static void ATExecDropColumn(List **wqueue, Relation rel, const char *colName,
                                 DropBehavior behavior,
                                 bool recurse, bool recursing,
@@ -320,9 +320,9 @@ static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
                           IndexStmt *stmt, bool is_rebuild, LOCKMODE lockmode);
 static void ATExecAddConstraint(List **wqueue,
                                        AlteredTableInfo *tab, Relation rel,
-                                       Constraint *newConstraint, bool recurse, LOCKMODE lockmode);
+                                Constraint *newConstraint, bool recurse, LOCKMODE lockmode);
 static void ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
-                          IndexStmt *stmt, LOCKMODE lockmode);
+                                                IndexStmt *stmt, LOCKMODE lockmode);
 static void ATAddCheckConstraint(List **wqueue,
                                         AlteredTableInfo *tab, Relation rel,
                                         Constraint *constr,
@@ -339,7 +339,7 @@ static void ATPrepAlterColumnType(List **wqueue,
                                          AlterTableCmd *cmd, LOCKMODE lockmode);
 static bool ATColumnChangeRequiresRewrite(Node *expr, AttrNumber varattno);
 static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
-                                                                 AlterTableCmd *cmd, LOCKMODE lockmode);
+                                         AlterTableCmd *cmd, LOCKMODE lockmode);
 static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode);
 static void ATPostAlterTypeParse(char *cmd, List **wqueue, LOCKMODE lockmode);
 static void change_owner_recurse_to_sequences(Oid relationOid,
@@ -351,7 +351,7 @@ static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
 static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace, LOCKMODE lockmode);
 static void ATExecSetRelOptions(Relation rel, List *defList, bool isReset, LOCKMODE lockmode);
 static void ATExecEnableDisableTrigger(Relation rel, char *trigname,
-                                                  char fires_when, bool skip_system, LOCKMODE lockmode);
+                                          char fires_when, bool skip_system, LOCKMODE lockmode);
 static void ATExecEnableDisableRule(Relation rel, char *rulename,
                                                char fires_when, LOCKMODE lockmode);
 static void ATPrepAddInherit(Relation child_rel);
@@ -412,7 +412,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
        /*
         * Check consistency of arguments
         */
-       if (stmt->oncommit != ONCOMMIT_NOOP 
+       if (stmt->oncommit != ONCOMMIT_NOOP
                && stmt->relation->relpersistence != RELPERSISTENCE_TEMP)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
@@ -547,7 +547,7 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId)
                        if (relkind == RELKIND_FOREIGN_TABLE)
                                ereport(ERROR,
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("default values on foreign tables are not supported")));
+                                                errmsg("default values on foreign tables are not supported")));
 
                        Assert(colDef->cooked_default == NULL);
 
@@ -706,7 +706,7 @@ DropErrorMsgWrongType(const char *relname, char wrongkind, char rightkind)
 /*
  * RemoveRelations
  *             Implements DROP TABLE, DROP INDEX, DROP SEQUENCE, DROP VIEW,
- *      DROP FOREIGN TABLE
+ *             DROP FOREIGN TABLE
  */
 void
 RemoveRelations(DropStmt *drop)
@@ -1454,11 +1454,11 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
                                if (defCollId != attribute->attcollation)
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_COLLATION_MISMATCH),
-                                                        errmsg("inherited column \"%s\" has a collation conflict",
-                                                                       attributeName),
+                                       errmsg("inherited column \"%s\" has a collation conflict",
+                                                  attributeName),
                                                         errdetail("\"%s\" versus \"%s\"",
                                                                           get_collation_name(defCollId),
-                                                                          get_collation_name(attribute->attcollation))));
+                                                         get_collation_name(attribute->attcollation))));
 
                                /* Copy storage parameter */
                                if (def->storage == 0)
@@ -2061,8 +2061,8 @@ renameatt_internal(Oid myrelid,
                relkind != RELKIND_FOREIGN_TABLE)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                          errmsg("\"%s\" is not a table, view, composite type, index or foreign table",
-                                         RelationGetRelationName(targetrelation))));
+                                errmsg("\"%s\" is not a table, view, composite type, index or foreign table",
+                                               RelationGetRelationName(targetrelation))));
 
        /*
         * permissions checking.  only the owner of a class can change its schema.
@@ -2138,7 +2138,7 @@ renameatt_internal(Oid myrelid,
                ListCell   *lo;
 
                child_oids = find_typed_table_dependencies(targetrelation->rd_rel->reltype,
-                                                                                                  RelationGetRelationName(targetrelation),
+                                                                        RelationGetRelationName(targetrelation),
                                                                                                   behavior);
 
                foreach(lo, child_oids)
@@ -2211,11 +2211,11 @@ void
 renameatt(Oid myrelid, RenameStmt *stmt)
 {
        renameatt_internal(myrelid,
-                                          stmt->subname,               /* old att name */
-                                          stmt->newname,               /* new att name */
-                                          interpretInhOption(stmt->relation->inhOpt),  /* recursive? */
-                                          false,  /* recursing? */
-                                          0,   /* expected inhcount */
+                                          stmt->subname,       /* old att name */
+                                          stmt->newname,       /* new att name */
+                                          interpretInhOption(stmt->relation->inhOpt),          /* recursive? */
+                                          false,       /* recursing? */
+                                          0,           /* expected inhcount */
                                           stmt->behavior);
 }
 
@@ -2460,7 +2460,7 @@ void
 AlterTable(AlterTableStmt *stmt)
 {
        Relation        rel;
-       LOCKMODE lockmode = AlterTableGetLockLevel(stmt->cmds);
+       LOCKMODE        lockmode = AlterTableGetLockLevel(stmt->cmds);
 
        /*
         * Acquire same level of lock as already acquired during parsing.
@@ -2531,7 +2531,7 @@ AlterTable(AlterTableStmt *stmt)
        }
 
        ATController(rel, stmt->cmds, interpretInhOption(stmt->relation->inhOpt),
-                                               lockmode);
+                                lockmode);
 }
 
 /*
@@ -2549,7 +2549,7 @@ void
 AlterTableInternal(Oid relid, List *cmds, bool recurse)
 {
        Relation        rel;
-       LOCKMODE lockmode = AlterTableGetLockLevel(cmds);
+       LOCKMODE        lockmode = AlterTableGetLockLevel(cmds);
 
        rel = relation_open(relid, lockmode);
 
@@ -2581,31 +2581,33 @@ LOCKMODE
 AlterTableGetLockLevel(List *cmds)
 {
        ListCell   *lcmd;
-       LOCKMODE lockmode = ShareUpdateExclusiveLock;
+       LOCKMODE        lockmode = ShareUpdateExclusiveLock;
 
        foreach(lcmd, cmds)
        {
                AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
-               LOCKMODE cmd_lockmode  = AccessExclusiveLock; /* default for compiler */
+               LOCKMODE        cmd_lockmode = AccessExclusiveLock; /* default for compiler */
 
                switch (cmd->subtype)
                {
-                       /*
-                        * Need AccessExclusiveLock for these subcommands because they
-                        * affect or potentially affect both read and write operations.
-                        *
-                        * New subcommand types should be added here by default.
-                        */
-                       case AT_AddColumn:                      /* may rewrite heap, in some cases and visible to SELECT */
-                       case AT_DropColumn:                     /* change visible to SELECT */
+                               /*
+                                * Need AccessExclusiveLock for these subcommands because they
+                                * affect or potentially affect both read and write
+                                * operations.
+                                *
+                                * New subcommand types should be added here by default.
+                                */
+                       case AT_AddColumn:      /* may rewrite heap, in some cases and visible
+                                                                * to SELECT */
+                       case AT_DropColumn:     /* change visible to SELECT */
                        case AT_AddColumnToView:        /* CREATE VIEW */
                        case AT_AlterColumnType:        /* must rewrite heap */
                        case AT_DropConstraint:         /* as DROP INDEX */
-                       case AT_AddOids:                        /* must rewrite heap */
-                       case AT_DropOids:                       /* calls AT_DropColumn */
+                       case AT_AddOids:        /* must rewrite heap */
+                       case AT_DropOids:       /* calls AT_DropColumn */
                        case AT_EnableAlwaysRule:       /* may change SELECT rules */
                        case AT_EnableReplicaRule:      /* may change SELECT rules */
-                       case AT_EnableRule:                     /* may change SELECT rules */
+                       case AT_EnableRule:     /* may change SELECT rules */
                        case AT_DisableRule:            /* may change SELECT rules */
                        case AT_ChangeOwner:            /* change visible to SELECT */
                        case AT_SetTableSpace:          /* must rewrite heap */
@@ -2615,12 +2617,12 @@ AlterTableGetLockLevel(List *cmds)
                                cmd_lockmode = AccessExclusiveLock;
                                break;
 
-                       /*
-                        * These subcommands affect write operations only.
-                        */
+                               /*
+                                * These subcommands affect write operations only.
+                                */
                        case AT_ColumnDefault:
-                       case AT_ProcessedConstraint:    /* becomes AT_AddConstraint */
-                       case AT_AddConstraintRecurse:   /* becomes AT_AddConstraint */
+                       case AT_ProcessedConstraint:            /* becomes AT_AddConstraint */
+                       case AT_AddConstraintRecurse:           /* becomes AT_AddConstraint */
                        case AT_EnableTrig:
                        case AT_EnableAlwaysTrig:
                        case AT_EnableReplicaTrig:
@@ -2629,7 +2631,7 @@ AlterTableGetLockLevel(List *cmds)
                        case AT_DisableTrig:
                        case AT_DisableTrigAll:
                        case AT_DisableTrigUser:
-                       case AT_AddIndex:                               /* from ADD CONSTRAINT */
+                       case AT_AddIndex:       /* from ADD CONSTRAINT */
                        case AT_AddIndexConstraint:
                                cmd_lockmode = ShareRowExclusiveLock;
                                break;
@@ -2644,14 +2646,17 @@ AlterTableGetLockLevel(List *cmds)
                                                case CONSTR_EXCLUSION:
                                                case CONSTR_PRIMARY:
                                                case CONSTR_UNIQUE:
+
                                                        /*
                                                         * Cases essentially the same as CREATE INDEX. We
-                                                        * could reduce the lock strength to ShareLock if we
-                                                        * can work out how to allow concurrent catalog updates.
+                                                        * could reduce the lock strength to ShareLock if
+                                                        * we can work out how to allow concurrent catalog
+                                                        * updates.
                                                         */
                                                        cmd_lockmode = ShareRowExclusiveLock;
                                                        break;
                                                case CONSTR_FOREIGN:
+
                                                        /*
                                                         * We add triggers to both tables when we add a
                                                         * Foreign Key, so the lock level must be at least
@@ -2666,26 +2671,29 @@ AlterTableGetLockLevel(List *cmds)
                                }
                                break;
 
-                       /*
-                        * These subcommands affect inheritance behaviour. Queries started before us
-                        * will continue to see the old inheritance behaviour, while queries started
-                        * after we commit will see new behaviour. No need to prevent reads or writes
-                        * to the subtable while we hook it up though. In both cases the parent table
-                        * is locked with AccessShareLock.
-                        */
+                               /*
+                                * These subcommands affect inheritance behaviour. Queries
+                                * started before us will continue to see the old inheritance
+                                * behaviour, while queries started after we commit will see
+                                * new behaviour. No need to prevent reads or writes to the
+                                * subtable while we hook it up though. In both cases the
+                                * parent table is locked with AccessShareLock.
+                                */
                        case AT_AddInherit:
                        case AT_DropInherit:
                                cmd_lockmode = ShareUpdateExclusiveLock;
                                break;
 
-                       /*
-                        * These subcommands affect general strategies for performance and maintenance,
-                        * though don't change the semantic results from normal data reads and writes.
-                        * Delaying an ALTER TABLE behind currently active writes only delays the point
-                        * where the new strategy begins to take effect, so there is no benefit in waiting.
-                        * In this case the minimum restriction applies: we don't currently allow
-                        * concurrent catalog updates.
-                        */
+                               /*
+                                * These subcommands affect general strategies for performance
+                                * and maintenance, though don't change the semantic results
+                                * from normal data reads and writes. Delaying an ALTER TABLE
+                                * behind currently active writes only delays the point where
+                                * the new strategy begins to take effect, so there is no
+                                * benefit in waiting. In this case the minimum restriction
+                                * applies: we don't currently allow concurrent catalog
+                                * updates.
+                                */
                        case AT_SetStatistics:
                        case AT_ClusterOn:
                        case AT_DropCluster:
@@ -2698,7 +2706,7 @@ AlterTableGetLockLevel(List *cmds)
                                cmd_lockmode = ShareUpdateExclusiveLock;
                                break;
 
-                       default:                                /* oops */
+                       default:                        /* oops */
                                elog(ERROR, "unrecognized alter table type: %d",
                                         (int) cmd->subtype);
                                break;
@@ -2773,7 +2781,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
        {
                case AT_AddColumn:              /* ADD COLUMN */
                        ATSimplePermissions(rel,
-                               ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+                                                ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
                        ATPrepAddColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
                        /* Recursion occurs during execution phase */
                        pass = AT_PASS_ADD_COL;
@@ -2793,19 +2801,19 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                         * substitutes default values into INSERTs before it expands
                         * rules.
                         */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_VIEW);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_VIEW);
                        ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
                        /* No command-specific prep needed */
                        pass = cmd->def ? AT_PASS_ADD_CONSTR : AT_PASS_DROP;
                        break;
                case AT_DropNotNull:    /* ALTER COLUMN DROP NOT NULL */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE);
                        ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
                        /* No command-specific prep needed */
                        pass = AT_PASS_DROP;
                        break;
                case AT_SetNotNull:             /* ALTER COLUMN SET NOT NULL */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_FOREIGN_TABLE);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_FOREIGN_TABLE);
                        ATSimpleRecursion(wqueue, rel, cmd, recurse, lockmode);
                        /* No command-specific prep needed */
                        pass = AT_PASS_ADD_CONSTR;
@@ -2818,7 +2826,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                        break;
                case AT_SetOptions:             /* ALTER COLUMN SET ( options ) */
                case AT_ResetOptions:   /* ALTER COLUMN RESET ( options ) */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
                        /* This command never recurses */
                        pass = AT_PASS_MISC;
                        break;
@@ -2830,7 +2838,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                        break;
                case AT_DropColumn:             /* DROP COLUMN */
                        ATSimplePermissions(rel,
-                               ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+                                                ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
                        ATPrepDropColumn(wqueue, rel, recurse, recursing, cmd, lockmode);
                        /* Recursion occurs during execution phase */
                        pass = AT_PASS_DROP;
@@ -2849,7 +2857,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                                cmd->subtype = AT_AddConstraintRecurse;
                        pass = AT_PASS_ADD_CONSTR;
                        break;
-               case AT_AddIndexConstraint:     /* ADD CONSTRAINT USING INDEX */
+               case AT_AddIndexConstraint:             /* ADD CONSTRAINT USING INDEX */
                        ATSimplePermissions(rel, ATT_TABLE);
                        /* This command never recurses */
                        /* No command-specific prep needed */
@@ -2865,7 +2873,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                        break;
                case AT_AlterColumnType:                /* ALTER COLUMN TYPE */
                        ATSimplePermissions(rel,
-                               ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE);
+                                                ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE);
                        /* Performs own recursion */
                        ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd, lockmode);
                        pass = AT_PASS_ALTER_TYPE;
@@ -2904,14 +2912,14 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
                        pass = AT_PASS_DROP;
                        break;
                case AT_SetTableSpace:  /* SET TABLESPACE */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
                        /* This command never recurses */
                        ATPrepSetTableSpace(tab, rel, cmd->name, lockmode);
                        pass = AT_PASS_MISC;    /* doesn't actually matter */
                        break;
                case AT_SetRelOptions:  /* SET (...) */
                case AT_ResetRelOptions:                /* RESET (...) */
-                       ATSimplePermissions(rel, ATT_TABLE|ATT_INDEX);
+                       ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
                        /* This command never recurses */
                        /* No command-specific prep needed */
                        pass = AT_PASS_MISC;
@@ -3072,11 +3080,11 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        break;
                case AT_DropColumn:             /* DROP COLUMN */
                        ATExecDropColumn(wqueue, rel, cmd->name,
-                                                        cmd->behavior, false, false, cmd->missing_ok, lockmode);
+                                        cmd->behavior, false, false, cmd->missing_ok, lockmode);
                        break;
                case AT_DropColumnRecurse:              /* DROP COLUMN with recursion */
                        ATExecDropColumn(wqueue, rel, cmd->name,
-                                                        cmd->behavior, true, false, cmd->missing_ok, lockmode);
+                                         cmd->behavior, true, false, cmd->missing_ok, lockmode);
                        break;
                case AT_AddIndex:               /* ADD INDEX */
                        ATExecAddIndex(tab, rel, (IndexStmt *) cmd->def, false, lockmode);
@@ -3092,7 +3100,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def,
                                                                true, lockmode);
                        break;
-               case AT_AddIndexConstraint:     /* ADD CONSTRAINT USING INDEX */
+               case AT_AddIndexConstraint:             /* ADD CONSTRAINT USING INDEX */
                        ATExecAddIndexConstraint(tab, rel, (IndexStmt *) cmd->def, lockmode);
                        break;
                case AT_ValidateConstraint:
@@ -3156,7 +3164,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
 
                case AT_EnableTrig:             /* ENABLE TRIGGER name */
                        ATExecEnableDisableTrigger(rel, cmd->name,
-                                                                          TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+                                                                  TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
                        break;
                case AT_EnableAlwaysTrig:               /* ENABLE ALWAYS TRIGGER name */
                        ATExecEnableDisableTrigger(rel, cmd->name,
@@ -3164,7 +3172,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        break;
                case AT_EnableReplicaTrig:              /* ENABLE REPLICA TRIGGER name */
                        ATExecEnableDisableTrigger(rel, cmd->name,
-                                                                          TRIGGER_FIRES_ON_REPLICA, false, lockmode);
+                                                                 TRIGGER_FIRES_ON_REPLICA, false, lockmode);
                        break;
                case AT_DisableTrig:    /* DISABLE TRIGGER name */
                        ATExecEnableDisableTrigger(rel, cmd->name,
@@ -3172,7 +3180,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        break;
                case AT_EnableTrigAll:  /* ENABLE TRIGGER ALL */
                        ATExecEnableDisableTrigger(rel, NULL,
-                                                                          TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
+                                                                  TRIGGER_FIRES_ON_ORIGIN, false, lockmode);
                        break;
                case AT_DisableTrigAll: /* DISABLE TRIGGER ALL */
                        ATExecEnableDisableTrigger(rel, NULL,
@@ -3180,7 +3188,7 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel,
                        break;
                case AT_EnableTrigUser: /* ENABLE TRIGGER USER */
                        ATExecEnableDisableTrigger(rel, NULL,
-                                                                          TRIGGER_FIRES_ON_ORIGIN, true, lockmode);
+                                                                       TRIGGER_FIRES_ON_ORIGIN, true, lockmode);
                        break;
                case AT_DisableTrigUser:                /* DISABLE TRIGGER USER */
                        ATExecEnableDisableTrigger(rel, NULL,
@@ -3254,8 +3262,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
                 * (Eventually we'll probably need to check for composite type
                 * dependencies even when we're just scanning the table without a
                 * rewrite, but at the moment a composite type does not enforce any
-                * constraints, so it's not necessary/appropriate to enforce them
-                * just during ALTER.)
+                * constraints, so it's not necessary/appropriate to enforce them just
+                * during ALTER.)
                 */
                if (tab->newvals != NIL || tab->rewrite)
                {
@@ -3386,8 +3394,8 @@ ATRewriteTables(List **wqueue, LOCKMODE lockmode)
                                                                                         con->conid);
 
                                /*
-                                * No need to mark the constraint row as validated,
-                                * we did that when we inserted the row earlier.
+                                * No need to mark the constraint row as validated, we did
+                                * that when we inserted the row earlier.
                                 */
 
                                heap_close(refrel, NoLock);
@@ -3723,7 +3731,7 @@ ATGetQueueEntry(List **wqueue, Relation rel)
 static void
 ATSimplePermissions(Relation rel, int allowed_targets)
 {
-       int             actual_target;
+       int                     actual_target;
 
        switch (rel->rd_rel->relkind)
        {
@@ -3779,16 +3787,16 @@ ATWrongRelkindError(Relation rel, int allowed_targets)
                case ATT_TABLE:
                        msg = _("\"%s\" is not a table");
                        break;
-               case ATT_TABLE|ATT_INDEX:
+               case ATT_TABLE | ATT_INDEX:
                        msg = _("\"%s\" is not a table or index");
                        break;
-               case ATT_TABLE|ATT_VIEW:
+               case ATT_TABLE | ATT_VIEW:
                        msg = _("\"%s\" is not a table or view");
                        break;
-               case ATT_TABLE|ATT_FOREIGN_TABLE:
+               case ATT_TABLE | ATT_FOREIGN_TABLE:
                        msg = _("\"%s\" is not a table or foreign table");
                        break;
-               case ATT_TABLE|ATT_COMPOSITE_TYPE|ATT_FOREIGN_TABLE:
+               case ATT_TABLE | ATT_COMPOSITE_TYPE | ATT_FOREIGN_TABLE:
                        msg = _("\"%s\" is not a table, composite type, or foreign table");
                        break;
                case ATT_VIEW:
@@ -4032,7 +4040,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
                                        (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
                                         errmsg("cannot alter type \"%s\" because it is the type of a typed table",
                                                        typeName),
-                                        errhint("Use ALTER ... CASCADE to alter the typed tables too.")));
+                       errhint("Use ALTER ... CASCADE to alter the typed tables too.")));
                else
                        result = lappend_oid(result, HeapTupleGetOid(tuple));
        }
@@ -4103,9 +4111,9 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
 
        /*
         * Are we adding the column to a recursion child?  If so, check whether to
-        * merge with an existing definition for the column.  If we do merge,
-        * we must not recurse.  Children will already have the column, and
-     * recursing into them would mess up attinhcount.
+        * merge with an existing definition for the column.  If we do merge, we
+        * must not recurse.  Children will already have the column, and recursing
+        * into them would mess up attinhcount.
         */
        if (colDef->inhcount > 0)
        {
@@ -4133,10 +4141,10 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
                                ereport(ERROR,
                                                (errcode(ERRCODE_COLLATION_MISMATCH),
                                                 errmsg("child table \"%s\" has different collation for column \"%s\"",
-                                                               RelationGetRelationName(rel), colDef->colname),
+                                                         RelationGetRelationName(rel), colDef->colname),
                                                 errdetail("\"%s\" versus \"%s\"",
                                                                   get_collation_name(ccollid),
-                                                                  get_collation_name(childatt->attcollation))));
+                                                          get_collation_name(childatt->attcollation))));
 
                        /* If it's OID, child column must actually be OID */
                        if (isOid && childatt->attnum != ObjectIdAttributeNumber)
@@ -4265,7 +4273,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
                if (relkind == RELKIND_FOREIGN_TABLE)
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("default values on foreign tables are not supported")));
+                         errmsg("default values on foreign tables are not supported")));
 
                rawEnt = (RawColumnDefault *) palloc(sizeof(RawColumnDefault));
                rawEnt->attnum = attribute.attnum;
@@ -5170,10 +5178,11 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
                elog(ERROR, "index \"%s\" is not unique", indexName);
 
        /*
-        * Determine name to assign to constraint.  We require a constraint to
+        * Determine name to assign to constraint.      We require a constraint to
         * have the same name as the underlying index; therefore, use the index's
-        * existing name as the default constraint name, and if the user explicitly
-        * gives some other name for the constraint, rename the index to match.
+        * existing name as the default constraint name, and if the user
+        * explicitly gives some other name for the constraint, rename the index
+        * to match.
         */
        constraintName = stmt->idxname;
        if (constraintName == NULL)
@@ -5216,7 +5225,7 @@ ATExecAddIndexConstraint(AlteredTableInfo *tab, Relation rel,
  */
 static void
 ATExecAddConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
-                                       Constraint *newConstraint, bool recurse, LOCKMODE lockmode)
+                                 Constraint *newConstraint, bool recurse, LOCKMODE lockmode)
 {
        Assert(IsA(newConstraint, Constraint));
 
@@ -5337,9 +5346,9 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
 
        /*
         * If the constraint got merged with an existing constraint, we're done.
-        * We mustn't recurse to child tables in this case, because they've already
-        * got the constraint, and visiting them again would lead to an incorrect
-        * value for coninhcount.
+        * We mustn't recurse to child tables in this case, because they've
+        * already got the constraint, and visiting them again would lead to an
+        * incorrect value for coninhcount.
         */
        if (newcons == NIL)
                return;
@@ -5655,8 +5664,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
 
        /*
         * Tell Phase 3 to check that the constraint is satisfied by existing rows
-        * We can skip this during table creation or if requested explicitly
-        * by specifying NOT VALID on an alter table statement.
+        * We can skip this during table creation or if requested explicitly by
+        * specifying NOT VALID on an alter table statement.
         */
        if (!fkconstraint->skip_validation)
        {
@@ -5718,8 +5727,8 @@ ATExecValidateConstraint(Relation rel, const char *constrName)
        if (!found)
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                       errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist",
-                                  constrName, RelationGetRelationName(rel))));
+                                errmsg("foreign key constraint \"%s\" of relation \"%s\" does not exist",
+                                               constrName, RelationGetRelationName(rel))));
 
        if (!con->convalidated)
        {
@@ -5729,17 +5738,16 @@ ATExecValidateConstraint(Relation rel, const char *constrName)
                Relation        refrel;
 
                /*
-                * Triggers are already in place on both tables, so a
-                * concurrent write that alters the result here is not
-                * possible. Normally we can run a query here to do the
-                * validation, which would only require AccessShareLock.
-                * In some cases, it is possible that we might need to
-                * fire triggers to perform the check, so we take a lock
-                * at RowShareLock level just in case.
+                * Triggers are already in place on both tables, so a concurrent write
+                * that alters the result here is not possible. Normally we can run a
+                * query here to do the validation, which would only require
+                * AccessShareLock. In some cases, it is possible that we might need
+                * to fire triggers to perform the check, so we take a lock at
+                * RowShareLock level just in case.
                 */
                refrel = heap_open(con->confrelid, RowShareLock);
 
-               validateForeignKeyConstraint((char *)constrName, rel, refrel,
+               validateForeignKeyConstraint((char *) constrName, rel, refrel,
                                                                         con->conindid,
                                                                         conid);
 
@@ -6571,12 +6579,12 @@ ATPrepAlterColumnType(List **wqueue,
        if (tab->relkind == RELKIND_RELATION)
        {
                /*
-                * Set up an expression to transform the old data value to the new type.
-                * If a USING option was given, transform and use that expression, else
-                * just take the old value and try to coerce it.  We do this first so that
-                * type incompatibility can be detected before we waste effort, and
-                * because we need the expression to be parsed against the original table
-                * rowtype.
+                * Set up an expression to transform the old data value to the new
+                * type. If a USING option was given, transform and use that
+                * expression, else just take the old value and try to coerce it.  We
+                * do this first so that type incompatibility can be detected before
+                * we waste effort, and because we need the expression to be parsed
+                * against the original table rowtype.
                 */
                if (transform)
                {
@@ -6596,13 +6604,13 @@ ATPrepAlterColumnType(List **wqueue,
                        if (expression_returns_set(transform))
                                ereport(ERROR,
                                                (errcode(ERRCODE_DATATYPE_MISMATCH),
-                                                errmsg("transform expression must not return a set")));
+                                         errmsg("transform expression must not return a set")));
 
                        /* No subplans or aggregates, either... */
                        if (pstate->p_hasSubLinks)
                                ereport(ERROR,
                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                errmsg("cannot use subquery in transform expression")));
+                                        errmsg("cannot use subquery in transform expression")));
                        if (pstate->p_hasAggs)
                                ereport(ERROR,
                                                (errcode(ERRCODE_GROUPING_ERROR),
@@ -6615,7 +6623,7 @@ ATPrepAlterColumnType(List **wqueue,
                else
                {
                        transform = (Node *) makeVar(1, attnum,
-                                                                                attTup->atttypid, attTup->atttypmod, attTup->attcollation,
+                                  attTup->atttypid, attTup->atttypmod, attTup->attcollation,
                                                                                 0);
                }
 
@@ -6649,14 +6657,14 @@ ATPrepAlterColumnType(List **wqueue,
        else if (transform)
                ereport(ERROR,
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                errmsg("ALTER TYPE USING is only supported on plain tables")));
+                         errmsg("ALTER TYPE USING is only supported on plain tables")));
 
        if (tab->relkind == RELKIND_COMPOSITE_TYPE ||
                tab->relkind == RELKIND_FOREIGN_TABLE)
        {
                /*
-                * For composite types, do this check now.  Tables will check
-                * it later when the table is being rewritten.
+                * For composite types, do this check now.      Tables will check it later
+                * when the table is being rewritten.
                 */
                find_composite_type_dependencies(rel->rd_rel->reltype, rel, NULL);
        }
@@ -6699,7 +6707,7 @@ ATColumnChangeRequiresRewrite(Node *expr, AttrNumber varattno)
        for (;;)
        {
                /* only one varno, so no need to check that */
-               if (IsA(expr, Var) && ((Var *) expr)->varattno == varattno)
+               if (IsA(expr, Var) &&((Var *) expr)->varattno == varattno)
                        return false;
                else if (IsA(expr, RelabelType))
                        expr = (Node *) ((RelabelType *) expr)->arg;
@@ -6924,13 +6932,14 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
                                break;
 
                        case OCLASS_TRIGGER:
+
                                /*
                                 * A trigger can depend on a column because the column is
                                 * specified as an update target, or because the column is
                                 * used in the trigger's WHEN condition.  The first case would
                                 * not require any extra work, but the second case would
                                 * require updating the WHEN expression, which will take a
-                                * significant amount of new code.  Since we can't easily tell
+                                * significant amount of new code.      Since we can't easily tell
                                 * which case applies, we punt for both.  FIXME someday.
                                 */
                                ereport(ERROR,
@@ -7940,7 +7949,7 @@ copy_relation_data(SMgrRelation src, SMgrRelation dst,
  */
 static void
 ATExecEnableDisableTrigger(Relation rel, char *trigname,
-                                                  char fires_when, bool skip_system, LOCKMODE lockmode)
+                                               char fires_when, bool skip_system, LOCKMODE lockmode)
 {
        EnableDisableTrigger(rel, trigname, fires_when, skip_system);
 }
@@ -8558,18 +8567,18 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
 static void
 ATExecGenericOptions(Relation rel, List *options)
 {
-       Relation                ftrel;
-       ForeignServer  *server;
+       Relation        ftrel;
+       ForeignServer *server;
        ForeignDataWrapper *fdw;
-       HeapTuple               tuple;
-       bool                    isnull;
-       Datum                   repl_val[Natts_pg_foreign_table];
-       bool                    repl_null[Natts_pg_foreign_table];
-       bool                    repl_repl[Natts_pg_foreign_table];
-       Datum                   datum;
-       Form_pg_foreign_table   tableform;
-
-       if (options == NIL) 
+       HeapTuple       tuple;
+       bool            isnull;
+       Datum           repl_val[Natts_pg_foreign_table];
+       bool            repl_null[Natts_pg_foreign_table];
+       bool            repl_repl[Natts_pg_foreign_table];
+       Datum           datum;
+       Form_pg_foreign_table tableform;
+
+       if (options == NIL)
                return;
 
        ftrel = heap_open(ForeignTableRelationId, RowExclusiveLock);
@@ -8579,7 +8588,7 @@ ATExecGenericOptions(Relation rel, List *options)
                ereport(ERROR,
                                (errcode(ERRCODE_UNDEFINED_OBJECT),
                                 errmsg("foreign table \"%s\" does not exist",
-                                                                               RelationGetRelationName(rel))));
+                                               RelationGetRelationName(rel))));
        tableform = (Form_pg_foreign_table) GETSTRUCT(tuple);
        server = GetForeignServer(tableform->ftserver);
        fdw = GetForeignDataWrapper(server->fdwid);
@@ -8718,8 +8727,8 @@ AlterTableNamespace(RangeVar *relation, const char *newschema,
                default:
                        ereport(ERROR,
                                        (errcode(ERRCODE_WRONG_OBJECT_TYPE),
-                                        errmsg("\"%s\" is not a table, view, sequence, or foreign table",
-                                                       RelationGetRelationName(rel))));
+                       errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+                                  RelationGetRelationName(rel))));
        }
 
        /* get schema OID and check its permissions */
@@ -8836,7 +8845,7 @@ AlterIndexNamespaces(Relation classRel, Relation rel,
  */
 static void
 AlterSeqNamespaces(Relation classRel, Relation rel,
-                                  Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode)
+        Oid oldNspOid, Oid newNspOid, const char *newNspName, LOCKMODE lockmode)
 {
        Relation        depRel;
        SysScanDesc scan;
index 42a704beb164f39316862f4bdcdcc6c6b6edd1ba..3024dc4b6468ebf0c4b3eac1152f10550ab5bda7 100644 (file)
@@ -559,7 +559,7 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
                                        (errcode(ERRCODE_UNDEFINED_FILE),
                                         errmsg("directory \"%s\" does not exist", location),
                                         InRecovery ? errhint("Create this directory for the tablespace before "
-                                                                  "restarting the server."): 0));
+                                                                                 "restarting the server.") : 0));
                else
                        ereport(ERROR,
                                        (errcode_for_file_access(),
@@ -573,8 +573,8 @@ create_tablespace_directories(const char *location, const Oid tablespaceoid)
 
                /*
                 * Our theory for replaying a CREATE is to forcibly drop the target
-                * subdirectory if present, and then recreate it. This may be
-                * more work than needed, but it is simple to implement.
+                * subdirectory if present, and then recreate it. This may be more
+                * work than needed, but it is simple to implement.
                 */
                if (stat(location_with_version_dir, &st) == 0 && S_ISDIR(st.st_mode))
                {
@@ -1353,10 +1353,10 @@ get_tablespace_oid(const char *tablespacename, bool missing_ok)
        heap_close(rel, AccessShareLock);
 
        if (!OidIsValid(result) && !missing_ok)
-        ereport(ERROR,
-                (errcode(ERRCODE_UNDEFINED_OBJECT),
-                 errmsg("tablespace \"%s\" does not exist",
-                        tablespacename)));
+               ereport(ERROR,
+                               (errcode(ERRCODE_UNDEFINED_OBJECT),
+                                errmsg("tablespace \"%s\" does not exist",
+                                               tablespacename)));
 
        return result;
 }
index 329d4d95f151e292e509cd0cd5e152ca041097d5..6b1ade899032f001f02de50194b74e49e8e3b3ef 100644 (file)
@@ -144,11 +144,11 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                                referenced;
 
        /*
-        * ShareRowExclusiveLock is sufficient to prevent concurrent write activity
-        * to the relation, and thus to lock out any operations that might want to
-        * fire triggers on the relation.  If we had ON SELECT triggers we would
-        * need to take an AccessExclusiveLock to add one of those, just as we do
-        * with ON SELECT rules.
+        * ShareRowExclusiveLock is sufficient to prevent concurrent write
+        * activity to the relation, and thus to lock out any operations that
+        * might want to fire triggers on the relation.  If we had ON SELECT
+        * triggers we would need to take an AccessExclusiveLock to add one of
+        * those, just as we do with ON SELECT rules.
         */
        rel = heap_openrv(stmt->relation, ShareRowExclusiveLock);
 
@@ -244,7 +244,7 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
                if (stmt->whenClause)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                        errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
+                                errmsg("INSTEAD OF triggers cannot have WHEN conditions")));
                if (stmt->columns != NIL)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -480,8 +480,8 @@ CreateTrigger(CreateTrigStmt *stmt, const char *queryString,
         * can skip this for internally generated triggers, since the name
         * modification above should be sufficient.
         *
-        * NOTE that this is cool only because we have ShareRowExclusiveLock on the
-        * relation, so the trigger set won't be changing underneath us.
+        * NOTE that this is cool only because we have ShareRowExclusiveLock on
+        * the relation, so the trigger set won't be changing underneath us.
         */
        if (!isInternal)
        {
@@ -1036,8 +1036,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior,
        if (!OidIsValid(object.objectId))
        {
                ereport(NOTICE,
-                               (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping",
-                                               trigname, get_rel_name(relid))));
+                 (errmsg("trigger \"%s\" for table \"%s\" does not exist, skipping",
+                                 trigname, get_rel_name(relid))));
                return;
        }
 
@@ -1083,9 +1083,9 @@ RemoveTriggerById(Oid trigOid)
 
        /*
         * Open and lock the relation the trigger belongs to.  As in
-        * CreateTrigger, this is sufficient to lock out all operations that
-        * could fire or add triggers; but it would need to be revisited if
-        * we had ON SELECT triggers.
+        * CreateTrigger, this is sufficient to lock out all operations that could
+        * fire or add triggers; but it would need to be revisited if we had ON
+        * SELECT triggers.
         */
        relid = ((Form_pg_trigger) GETSTRUCT(tup))->tgrelid;
 
@@ -1960,7 +1960,7 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
        if (newtuple != slottuple)
        {
                /*
-                * Return the modified tuple using the es_trig_tuple_slot.  We assume
+                * Return the modified tuple using the es_trig_tuple_slot.      We assume
                 * the tuple was allocated in per-tuple memory context, and therefore
                 * will go away by itself. The tuple table slot should not try to
                 * clear it.
@@ -2035,7 +2035,7 @@ ExecIRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
        if (newtuple != slottuple)
        {
                /*
-                * Return the modified tuple using the es_trig_tuple_slot.  We assume
+                * Return the modified tuple using the es_trig_tuple_slot.      We assume
                 * the tuple was allocated in per-tuple memory context, and therefore
                 * will go away by itself. The tuple table slot should not try to
                 * clear it.
@@ -2378,7 +2378,7 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
        if (newtuple != slottuple)
        {
                /*
-                * Return the modified tuple using the es_trig_tuple_slot.  We assume
+                * Return the modified tuple using the es_trig_tuple_slot.      We assume
                 * the tuple was allocated in per-tuple memory context, and therefore
                 * will go away by itself. The tuple table slot should not try to
                 * clear it.
@@ -2461,7 +2461,7 @@ ExecIRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
        if (newtuple != slottuple)
        {
                /*
-                * Return the modified tuple using the es_trig_tuple_slot.  We assume
+                * Return the modified tuple using the es_trig_tuple_slot.      We assume
                 * the tuple was allocated in per-tuple memory context, and therefore
                 * will go away by itself. The tuple table slot should not try to
                 * clear it.
@@ -2891,7 +2891,7 @@ typedef struct AfterTriggerEventDataOneCtid
 {
        TriggerFlags ate_flags;         /* status bits and offset to shared data */
        ItemPointerData ate_ctid1;      /* inserted, deleted, or old updated tuple */
-} AfterTriggerEventDataOneCtid;
+}      AfterTriggerEventDataOneCtid;
 
 #define SizeofTriggerEvent(evt) \
        (((evt)->ate_flags & AFTER_TRIGGER_2CTIDS) ? \
index 81f129dff6bc4f11a290657d6179e13f11fafffc..80a30e180dc7f62220d204c2189958108211c5a9 100644 (file)
@@ -407,7 +407,8 @@ RenameTSParser(List *oldname, const char *newname)
 void
 AlterTSParserNamespace(List *name, const char *newschema)
 {
-       Oid                     prsId, nspOid;
+       Oid                     prsId,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(TSParserRelationId, RowExclusiveLock);
@@ -429,7 +430,7 @@ AlterTSParserNamespace(List *name, const char *newschema)
 Oid
 AlterTSParserNamespace_oid(Oid prsId, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(TSParserRelationId, RowExclusiveLock);
@@ -685,7 +686,8 @@ RenameTSDictionary(List *oldname, const char *newname)
 void
 AlterTSDictionaryNamespace(List *name, const char *newschema)
 {
-       Oid                     dictId, nspOid;
+       Oid                     dictId,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(TSDictionaryRelationId, RowExclusiveLock);
@@ -708,7 +710,7 @@ AlterTSDictionaryNamespace(List *name, const char *newschema)
 Oid
 AlterTSDictionaryNamespace_oid(Oid dictId, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(TSDictionaryRelationId, RowExclusiveLock);
@@ -1218,7 +1220,8 @@ RenameTSTemplate(List *oldname, const char *newname)
 void
 AlterTSTemplateNamespace(List *name, const char *newschema)
 {
-       Oid                     tmplId, nspOid;
+       Oid                     tmplId,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
@@ -1240,7 +1243,7 @@ AlterTSTemplateNamespace(List *name, const char *newschema)
 Oid
 AlterTSTemplateNamespace_oid(Oid tmplId, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(TSTemplateRelationId, RowExclusiveLock);
@@ -1668,7 +1671,8 @@ RenameTSConfiguration(List *oldname, const char *newname)
 void
 AlterTSConfigurationNamespace(List *name, const char *newschema)
 {
-       Oid                     cfgId, nspOid;
+       Oid                     cfgId,
+                               nspOid;
        Relation        rel;
 
        rel = heap_open(TSConfigRelationId, RowExclusiveLock);
@@ -1691,7 +1695,7 @@ AlterTSConfigurationNamespace(List *name, const char *newschema)
 Oid
 AlterTSConfigurationNamespace_oid(Oid cfgId, Oid newNspOid)
 {
-       Oid         oldNspOid;
+       Oid                     oldNspOid;
        Relation        rel;
 
        rel = heap_open(TSConfigRelationId, RowExclusiveLock);
index 4c06d898a88cce473d24bd5d1db92b07d8ee6d89..1a20b0d91be78ccfa3dc535eb817a8073a837666 100644 (file)
@@ -138,7 +138,7 @@ DefineType(List *names, List *parameters)
        DefElem    *byValueEl = NULL;
        DefElem    *alignmentEl = NULL;
        DefElem    *storageEl = NULL;
-       DefElem    *collatableEl = NULL;
+       DefElem    *collatableEl = NULL;
        Oid                     inputOid;
        Oid                     outputOid;
        Oid                     receiveOid = InvalidOid;
@@ -537,7 +537,7 @@ DefineType(List *names, List *parameters)
         * now have TypeCreate do all the real work.
         *
         * Note: the pg_type.oid is stored in user tables as array elements (base
-        * types) in ArrayType and in composite types in DatumTupleFields.  This
+        * types) in ArrayType and in composite types in DatumTupleFields.      This
         * oid must be preserved by binary upgrades.
         */
        typoid =
@@ -1179,7 +1179,7 @@ DefineEnum(CreateEnumStmt *stmt)
                                   -1,                  /* typMod (Domains only) */
                                   0,                   /* Array dimensions of typbasetype */
                                   false,               /* Type NOT NULL */
-                                  InvalidOid); /* typcollation */
+                                  InvalidOid); /* typcollation */
 
        /* Enter the enum's values into pg_enum */
        EnumValuesCreate(enumTypeOid, stmt->vals);
@@ -2416,7 +2416,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
                                                  CONSTRAINT_CHECK,             /* Constraint Type */
                                                  false,        /* Is Deferrable */
                                                  false,        /* Is Deferred */
-                                                 true,         /* Is Validated */
+                                                 true, /* Is Validated */
                                                  InvalidOid,   /* not a relation constraint */
                                                  NULL,
                                                  0,
index f13eb2891e2b5921f4b0dc35cef5efd60451eedb..9c9164d3bc722d71484e2d012a7bf65e95fed77b 100644 (file)
@@ -84,7 +84,7 @@ CreateRole(CreateRoleStmt *stmt)
        bool            createrole = false;             /* Can this user create roles? */
        bool            createdb = false;               /* Can the user create databases? */
        bool            canlogin = false;               /* Can this user login? */
-       bool            isreplication = false; /* Is this a replication role? */
+       bool            isreplication = false;  /* Is this a replication role? */
        int                     connlimit = -1; /* maximum connections allowed */
        List       *addroleto = NIL;    /* roles to make this a member of */
        List       *rolemembers = NIL;          /* roles to be members of this role */
@@ -98,7 +98,7 @@ CreateRole(CreateRoleStmt *stmt)
        DefElem    *dcreaterole = NULL;
        DefElem    *dcreatedb = NULL;
        DefElem    *dcanlogin = NULL;
-       DefElem    *disreplication = NULL;
+       DefElem    *disreplication = NULL;
        DefElem    *dconnlimit = NULL;
        DefElem    *daddroleto = NULL;
        DefElem    *drolemembers = NULL;
@@ -240,9 +240,10 @@ CreateRole(CreateRoleStmt *stmt)
        if (dissuper)
        {
                issuper = intVal(dissuper->arg) != 0;
+
                /*
-                * Superusers get replication by default, but only if
-                * NOREPLICATION wasn't explicitly mentioned
+                * Superusers get replication by default, but only if NOREPLICATION
+                * wasn't explicitly mentioned
                 */
                if (!(disreplication && intVal(disreplication->arg) == 0))
                        isreplication = 1;
@@ -287,7 +288,7 @@ CreateRole(CreateRoleStmt *stmt)
                if (!superuser())
                        ereport(ERROR,
                                        (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
-                                        errmsg("must be superuser to create replication users")));
+                                  errmsg("must be superuser to create replication users")));
        }
        else
        {
@@ -384,8 +385,8 @@ CreateRole(CreateRoleStmt *stmt)
        tuple = heap_form_tuple(pg_authid_dsc, new_record, new_record_nulls);
 
        /*
-        * pg_largeobject_metadata contains pg_authid.oid's, so we
-        * use the binary-upgrade override, if specified.
+        * pg_largeobject_metadata contains pg_authid.oid's, so we use the
+        * binary-upgrade override, if specified.
         */
        if (OidIsValid(binary_upgrade_next_pg_authid_oid))
        {
@@ -467,7 +468,7 @@ AlterRole(AlterRoleStmt *stmt)
        int                     createrole = -1;        /* Can this user create roles? */
        int                     createdb = -1;  /* Can the user create databases? */
        int                     canlogin = -1;  /* Can this user login? */
-       int                     isreplication = -1; /* Is this a replication role? */
+       int                     isreplication = -1;             /* Is this a replication role? */
        int                     connlimit = -1; /* maximum connections allowed */
        List       *rolemembers = NIL;          /* roles to be added/removed */
        char       *validUntil = NULL;          /* time the login is valid until */
@@ -479,7 +480,7 @@ AlterRole(AlterRoleStmt *stmt)
        DefElem    *dcreaterole = NULL;
        DefElem    *dcreatedb = NULL;
        DefElem    *dcanlogin = NULL;
-       DefElem    *disreplication = NULL;
+       DefElem    *disreplication = NULL;
        DefElem    *dconnlimit = NULL;
        DefElem    *drolemembers = NULL;
        DefElem    *dvalidUntil = NULL;
index 1651aa94dc22f4e2ff9991381f64da871252fa9d..90c413a9880f671981c8e45196029189a826f408 100644 (file)
@@ -527,7 +527,7 @@ vac_update_relstats(Relation relation,
 
        /*
         * If we have discovered that there are no indexes, then there's no
-        * primary key either.  This could be done more thoroughly...
+        * primary key either.  This could be done more thoroughly...
         */
        if (pgcform->relhaspkey && !hasindex)
        {
@@ -839,8 +839,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
         * There's a race condition here: the rel may have gone away since the
         * last time we saw it.  If so, we don't need to vacuum it.
         *
-        * If we've been asked not to wait for the relation lock, acquire it
-        * first in non-blocking mode, before calling try_relation_open().
+        * If we've been asked not to wait for the relation lock, acquire it first
+        * in non-blocking mode, before calling try_relation_open().
         */
        if (!(vacstmt->options & VACOPT_NOWAIT))
                onerel = try_relation_open(relid, lmode);
@@ -852,8 +852,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, bool do_toast, bool for_wraparound,
                if (IsAutoVacuumWorkerProcess() && Log_autovacuum_min_duration >= 0)
                        ereport(LOG,
                                        (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
-                                        errmsg("skipping vacuum of \"%s\" --- lock not available",
-                                               vacstmt->relation->relname)));
+                                  errmsg("skipping vacuum of \"%s\" --- lock not available",
+                                                 vacstmt->relation->relname)));
        }
 
        if (!onerel)
index a5c024cc19b2ac1f8e964269b9325d58e18660db..9393fa0727aaad7508e1163623322b4066412257 100644 (file)
@@ -705,15 +705,16 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
                        PageSetAllVisible(page);
                        SetBufferCommitInfoNeedsSave(buf);
                }
+
                /*
                 * It's possible for the value returned by GetOldestXmin() to move
                 * backwards, so it's not wrong for us to see tuples that appear to
                 * not be visible to everyone yet, while PD_ALL_VISIBLE is already
                 * set. The real safe xmin value never moves backwards, but
                 * GetOldestXmin() is conservative and sometimes returns a value
-                * that's unnecessarily small, so if we see that contradiction it
-                * just means that the tuples that we think are not visible to
-                * everyone yet actually are, and the PD_ALL_VISIBLE flag is correct.
+                * that's unnecessarily small, so if we see that contradiction it just
+                * means that the tuples that we think are not visible to everyone yet
+                * actually are, and the PD_ALL_VISIBLE flag is correct.
                 *
                 * There should never be dead tuples on a page with PD_ALL_VISIBLE
                 * set, however.
index 2cec713089668825d49e6cf9e95c2dbc5ee6ea42..5d0fbdfb40fa79a01304ef72a632c2c47f0bab7d 100644 (file)
@@ -132,8 +132,8 @@ check_datestyle(char **newval, void **extra, GucSource source)
                         * We can't simply "return check_datestyle(...)" because we need
                         * to handle constructs like "DEFAULT, ISO".
                         */
-                       char   *subval;
-                       void   *subextra = NULL;
+                       char       *subval;
+                       void       *subextra = NULL;
 
                        subval = strdup(GetConfigOptionResetString("datestyle"));
                        if (!subval)
@@ -262,9 +262,9 @@ check_timezone(char **newval, void **extra, GucSource source)
        {
                /*
                 * The boot_val given for TimeZone in guc.c is NULL.  When we see this
-                * we just do nothing.  If this isn't overridden from the config file
+                * we just do nothing.  If this isn't overridden from the config file
                 * then pg_timezone_initialize() will eventually select a default
-                * value from the environment.  This hack has two purposes: to avoid
+                * value from the environment.  This hack has two purposes: to avoid
                 * wasting cycles loading values that might soon be overridden from
                 * the config file, and to avoid trying to read the timezone files
                 * during InitializeGUCOptions().  The latter doesn't work in an
@@ -289,7 +289,7 @@ check_timezone(char **newval, void **extra, GucSource source)
        if (pg_strncasecmp(*newval, "interval", 8) == 0)
        {
                /*
-                * Support INTERVAL 'foo'.  This is for SQL spec compliance, not
+                * Support INTERVAL 'foo'.      This is for SQL spec compliance, not
                 * because it has any actual real-world usefulness.
                 */
                const char *valueptr = *newval;
@@ -391,13 +391,13 @@ check_timezone(char **newval, void **extra, GucSource source)
         *
         * Note: the result string should be something that we'd accept as input.
         * We use the numeric format for interval cases, because it's simpler to
-        * reload.  In the named-timezone case, *newval is already OK and need not
+        * reload.      In the named-timezone case, *newval is already OK and need not
         * be changed; it might not have the canonical casing, but that's taken
         * care of by show_timezone.
         */
        if (myextra.HasCTZSet)
        {
-               char    *result = (char *) malloc(64);
+               char       *result = (char *) malloc(64);
 
                if (!result)
                        return false;
@@ -567,7 +567,7 @@ show_log_timezone(void)
  * We allow idempotent changes (r/w -> r/w and r/o -> r/o) at any time, and
  * we also always allow changes from read-write to read-only.  However,
  * read-only may be changed to read-write only when in a top-level transaction
- * that has not yet taken an initial snapshot.  Can't do it in a hot standby
+ * that has not yet taken an initial snapshot. Can't do it in a hot standby
  * slave, either.
  */
 bool
@@ -719,7 +719,7 @@ check_transaction_deferrable(bool *newval, void **extra, GucSource source)
  *
  * We can't roll back the random sequence on error, and we don't want
  * config file reloads to affect it, so we only want interactive SET SEED
- * commands to set it.  We use the "extra" storage to ensure that rollbacks
+ * commands to set it. We use the "extra" storage to ensure that rollbacks
  * don't try to do the operation again.
  */
 
@@ -851,8 +851,8 @@ check_session_authorization(char **newval, void **extra, GucSource source)
        {
                /*
                 * Can't do catalog lookups, so fail.  The result of this is that
-                * session_authorization cannot be set in postgresql.conf, which
-                * seems like a good thing anyway, so we don't work hard to avoid it.
+                * session_authorization cannot be set in postgresql.conf, which seems
+                * like a good thing anyway, so we don't work hard to avoid it.
                 */
                return false;
        }
@@ -977,7 +977,7 @@ const char *
 show_role(void)
 {
        /*
-        * Check whether SET ROLE is active; if not return "none".  This is a
+        * Check whether SET ROLE is active; if not return "none".      This is a
         * kluge to deal with the fact that SET SESSION AUTHORIZATION logically
         * resets SET ROLE to NONE, but we cannot set the GUC role variable from
         * assign_session_authorization (because we haven't got enough info to
index 508fb23c9ac14a40daa58d39cabc9eeef10a858c..be681e3fd4f9fa24c3b8fe4fc22dd13549e02f03 100644 (file)
@@ -120,7 +120,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
 
                        def->colname = pstrdup(tle->resname);
                        def->typeName = makeTypeNameFromOid(exprType((Node *) tle->expr),
-                                                                                               exprTypmod((Node *) tle->expr));
+                                                                                        exprTypmod((Node *) tle->expr));
                        def->inhcount = 0;
                        def->is_local = true;
                        def->is_not_null = false;
@@ -130,6 +130,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
                        def->cooked_default = NULL;
                        def->collClause = NULL;
                        def->collOid = exprCollation((Node *) tle->expr);
+
                        /*
                         * It's possible that the column is of a collatable type but the
                         * collation could not be resolved, so double-check.
@@ -240,7 +241,7 @@ DefineVirtualRelation(const RangeVar *relation, List *tlist, bool replace)
        }
        else
        {
-               Oid             relid;
+               Oid                     relid;
 
                /*
                 * now set the parameters for keys/inheritance etc. All of these are
@@ -437,8 +438,8 @@ DefineView(ViewStmt *stmt, const char *queryString)
 
        /*
         * Check for unsupported cases.  These tests are redundant with ones in
-        * DefineQueryRewrite(), but that function will complain about a bogus
-        * ON SELECT rule, and we'd rather the message complain about a view.
+        * DefineQueryRewrite(), but that function will complain about a bogus ON
+        * SELECT rule, and we'd rather the message complain about a view.
         */
        if (viewParse->intoClause != NULL)
                ereport(ERROR,
@@ -447,7 +448,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
        if (viewParse->hasModifyingCTE)
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("views must not contain data-modifying statements in WITH")));
+               errmsg("views must not contain data-modifying statements in WITH")));
 
        /*
         * If a list of column names was given, run through and insert these into
@@ -500,7 +501,7 @@ DefineView(ViewStmt *stmt, const char *queryString)
        if (view->relpersistence == RELPERSISTENCE_UNLOGGED)
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
-                                errmsg("views cannot be unlogged because they do not have storage")));
+               errmsg("views cannot be unlogged because they do not have storage")));
 
        /*
         * Create the view relation
index caa9faea87f4f9bca5c71d8588ae54548300491a..86ec9870198af543e8d45b90e92357c908d2f22c 100644 (file)
@@ -19,7 +19,7 @@
  *     ExecutorRun accepts direction and count arguments that specify whether
  *     the plan is to be executed forwards, backwards, and for how many tuples.
  *     In some cases ExecutorRun may be called multiple times to process all
- *     the tuples for a plan.  It is also acceptable to stop short of executing
+ *     the tuples for a plan.  It is also acceptable to stop short of executing
  *     the whole plan (but only if it is a SELECT).
  *
  *     ExecutorFinish must be called after the final ExecutorRun call and
@@ -168,6 +168,7 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
        switch (queryDesc->operation)
        {
                case CMD_SELECT:
+
                        /*
                         * SELECT INTO, SELECT FOR UPDATE/SHARE and modifying CTEs need to
                         * mark tuples
@@ -332,12 +333,12 @@ standard_ExecutorRun(QueryDesc *queryDesc,
  *             ExecutorFinish
  *
  *             This routine must be called after the last ExecutorRun call.
- *             It performs cleanup such as firing AFTER triggers.  It is
+ *             It performs cleanup such as firing AFTER triggers.      It is
  *             separate from ExecutorEnd because EXPLAIN ANALYZE needs to
  *             include these actions in the total runtime.
  *
  *             We provide a function hook variable that lets loadable plugins
- *             get control when ExecutorFinish is called.  Such a plugin would
+ *             get control when ExecutorFinish is called.      Such a plugin would
  *             normally call standard_ExecutorFinish().
  *
  * ----------------------------------------------------------------
@@ -425,9 +426,9 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
        Assert(estate != NULL);
 
        /*
-        * Check that ExecutorFinish was called, unless in EXPLAIN-only mode.
-        * This Assert is needed because ExecutorFinish is new as of 9.1, and
-        * callers might forget to call it.
+        * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+        * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+        * might forget to call it.
         */
        Assert(estate->es_finished ||
                   (estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
@@ -519,7 +520,7 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
 
        foreach(l, rangeTable)
        {
-               RangeTblEntry  *rte = (RangeTblEntry *) lfirst(l);
+               RangeTblEntry *rte = (RangeTblEntry *) lfirst(l);
 
                result = ExecCheckRTEPerms(rte);
                if (!result)
@@ -533,8 +534,8 @@ ExecCheckRTPerms(List *rangeTable, bool ereport_on_violation)
        }
 
        if (ExecutorCheckPerms_hook)
-               result = (*ExecutorCheckPerms_hook)(rangeTable,
-                                                                                       ereport_on_violation);
+               result = (*ExecutorCheckPerms_hook) (rangeTable,
+                                                                                        ereport_on_violation);
        return result;
 }
 
@@ -980,7 +981,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
 void
 CheckValidResultRel(Relation resultRel, CmdType operation)
 {
-       TriggerDesc     *trigDesc = resultRel->trigdesc;
+       TriggerDesc *trigDesc = resultRel->trigdesc;
 
        switch (resultRel->rd_rel->relkind)
        {
@@ -1005,26 +1006,26 @@ CheckValidResultRel(Relation resultRel, CmdType operation)
                                case CMD_INSERT:
                                        if (!trigDesc || !trigDesc->trig_insert_instead_row)
                                                ereport(ERROR,
-                                                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                                errmsg("cannot insert into view \"%s\"",
-                                                                               RelationGetRelationName(resultRel)),
-                                                                errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot insert into view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON INSERT DO INSTEAD rule or an INSTEAD OF INSERT trigger.")));
                                        break;
                                case CMD_UPDATE:
                                        if (!trigDesc || !trigDesc->trig_update_instead_row)
                                                ereport(ERROR,
-                                                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                                errmsg("cannot update view \"%s\"",
-                                                                               RelationGetRelationName(resultRel)),
-                                                                errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot update view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON UPDATE DO INSTEAD rule or an INSTEAD OF UPDATE trigger.")));
                                        break;
                                case CMD_DELETE:
                                        if (!trigDesc || !trigDesc->trig_delete_instead_row)
                                                ereport(ERROR,
-                                                               (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                                errmsg("cannot delete from view \"%s\"",
-                                                                               RelationGetRelationName(resultRel)),
-                                                                errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("cannot delete from view \"%s\"",
+                                                                 RelationGetRelationName(resultRel)),
+                                                  errhint("You need an unconditional ON DELETE DO INSTEAD rule or an INSTEAD OF DELETE trigger.")));
                                        break;
                                default:
                                        elog(ERROR, "unrecognized CmdType: %d", (int) operation);
@@ -1137,8 +1138,8 @@ ExecGetTriggerResultRel(EState *estate, Oid relid)
        /*
         * Open the target relation's relcache entry.  We assume that an
         * appropriate lock is still held by the backend from whenever the trigger
-        * event got queued, so we need take no new lock here.  Also, we need
-        * not recheck the relkind, so no need for CheckValidResultRel.
+        * event got queued, so we need take no new lock here.  Also, we need not
+        * recheck the relkind, so no need for CheckValidResultRel.
         */
        rel = heap_open(relid, NoLock);
 
@@ -1238,12 +1239,12 @@ ExecPostprocessPlan(EState *estate)
 
        /*
         * Run any secondary ModifyTable nodes to completion, in case the main
-        * query did not fetch all rows from them.  (We do this to ensure that
+        * query did not fetch all rows from them.      (We do this to ensure that
         * such nodes have predictable results.)
         */
        foreach(lc, estate->es_auxmodifytables)
        {
-               PlanState *ps = (PlanState *) lfirst(lc);
+               PlanState  *ps = (PlanState *) lfirst(lc);
 
                for (;;)
                {
@@ -2220,9 +2221,9 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
         * ExecInitSubPlan expects to be able to find these entries. Some of the
         * SubPlans might not be used in the part of the plan tree we intend to
         * run, but since it's not easy to tell which, we just initialize them
-        * all.  (However, if the subplan is headed by a ModifyTable node, then
-        * it must be a data-modifying CTE, which we will certainly not need to
-        * re-run, so we can skip initializing it.  This is just an efficiency
+        * all.  (However, if the subplan is headed by a ModifyTable node, then it
+        * must be a data-modifying CTE, which we will certainly not need to
+        * re-run, so we can skip initializing it.      This is just an efficiency
         * hack; it won't skip data-modifying CTEs for which the ModifyTable node
         * is not at the top.)
         */
index c153ca00dbf520a61220015b34bc64f3909c8e26..5f0b58f43b79ae138f49c6198fcb6c1d333c523e 100644 (file)
@@ -79,9 +79,9 @@ static Datum ExecEvalWholeRowSlow(ExprState *exprstate, ExprContext *econtext,
 static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
                          bool *isNull, ExprDoneCond *isDone);
 static Datum ExecEvalParamExec(ExprState *exprstate, ExprContext *econtext,
-                         bool *isNull, ExprDoneCond *isDone);
+                                 bool *isNull, ExprDoneCond *isDone);
 static Datum ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext,
-                         bool *isNull, ExprDoneCond *isDone);
+                                       bool *isNull, ExprDoneCond *isDone);
 static void init_fcache(Oid foid, Oid input_collation, FuncExprState *fcache,
                        MemoryContext fcacheCxt, bool needDescForSets);
 static void ShutdownFuncExpr(Datum arg);
@@ -1043,7 +1043,7 @@ ExecEvalParamExtern(ExprState *exprstate, ExprContext *econtext,
        ereport(ERROR,
                        (errcode(ERRCODE_UNDEFINED_OBJECT),
                         errmsg("no value found for parameter %d", thisParamId)));
-       return (Datum) 0;               /* keep compiler quiet */
+       return (Datum) 0;                       /* keep compiler quiet */
 }
 
 
index 7e84ccdd9cd65033e6c8689357570e497207d7ef..0cbbe04d3bda1bd5511c16e36c96bd42cf63a7a9 100644 (file)
@@ -1319,9 +1319,9 @@ retry:
        /*
         * Ordinarily, at this point the search should have found the originally
         * inserted tuple, unless we exited the loop early because of conflict.
-        * However, it is possible to define exclusion constraints for which
-        * that wouldn't be true --- for instance, if the operator is <>.
-        * So we no longer complain if found_self is still false.
+        * However, it is possible to define exclusion constraints for which that
+        * wouldn't be true --- for instance, if the operator is <>. So we no
+        * longer complain if found_self is still false.
         */
 
        econtext->ecxt_scantuple = save_scantuple;
index 70d126c521340418cc5714d9c3d30d83f13d6c84..9c867bbae200b7354d0d15d980a9b9c102ec3bfd 100644 (file)
@@ -81,7 +81,7 @@ typedef struct
        char       *fname;                      /* function name (for error msgs) */
        char       *src;                        /* function body text (for error msgs) */
 
-       SQLFunctionParseInfoPtr pinfo;  /* data for parser callback hooks */
+       SQLFunctionParseInfoPtr pinfo;          /* data for parser callback hooks */
 
        Oid                     rettype;                /* actual return type */
        int16           typlen;                 /* length of the return type */
@@ -119,7 +119,7 @@ typedef struct SQLFunctionParseInfo
        Oid                *argtypes;           /* resolved types of input arguments */
        int                     nargs;                  /* number of input arguments */
        Oid                     collation;              /* function's input collation, if known */
-} SQLFunctionParseInfo;
+}      SQLFunctionParseInfo;
 
 
 /* non-export function prototypes */
@@ -255,7 +255,7 @@ sql_fn_param_ref(ParseState *pstate, ParamRef *pref)
  * Set up the per-query execution_state records for a SQL function.
  *
  * The input is a List of Lists of parsed and rewritten, but not planned,
- * querytrees.  The sublist structure denotes the original query boundaries.
+ * querytrees. The sublist structure denotes the original query boundaries.
  */
 static List *
 init_execution_state(List *queryTree_list,
@@ -299,8 +299,8 @@ init_execution_state(List *queryTree_list,
                                ereport(ERROR,
                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                /* translator: %s is a SQL statement name */
-                                                errmsg("%s is not allowed in a non-volatile function",
-                                                               CreateCommandTag(stmt))));
+                                          errmsg("%s is not allowed in a non-volatile function",
+                                                         CreateCommandTag(stmt))));
 
                        /* OK, build the execution_state for this query */
                        newes = (execution_state *) palloc(sizeof(execution_state));
@@ -311,8 +311,8 @@ init_execution_state(List *queryTree_list,
 
                        newes->next = NULL;
                        newes->status = F_EXEC_START;
-                       newes->setsResult = false;              /* might change below */
-                       newes->lazyEval = false;                /* might change below */
+                       newes->setsResult = false;      /* might change below */
+                       newes->lazyEval = false;        /* might change below */
                        newes->stmt = stmt;
                        newes->qd = NULL;
 
@@ -442,7 +442,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK)
        fcache->src = TextDatumGetCString(tmp);
 
        /*
-        * Parse and rewrite the queries in the function text.  Use sublists to
+        * Parse and rewrite the queries in the function text.  Use sublists to
         * keep track of the original query boundaries.  But we also build a
         * "flat" list of the rewritten queries to pass to check_sql_fn_retval.
         * This is because the last canSetTag query determines the result type
@@ -462,7 +462,7 @@ init_sql_fcache(FmgrInfo *finfo, bool lazyEvalOK)
 
                queryTree_sublist = pg_analyze_and_rewrite_params(parsetree,
                                                                                                                  fcache->src,
-                                                                                                                 (ParserSetupHook) sql_fn_parser_setup,
+                                                                          (ParserSetupHook) sql_fn_parser_setup,
                                                                                                                  fcache->pinfo);
                queryTree_list = lappend(queryTree_list, queryTree_sublist);
                flat_query_list = list_concat(flat_query_list,
@@ -657,7 +657,7 @@ postquel_sub_params(SQLFunctionCachePtr fcache,
                {
                        /* sizeof(ParamListInfoData) includes the first array element */
                        paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
-                                                                          (nargs - 1) *sizeof(ParamExternData));
+                                                                         (nargs - 1) * sizeof(ParamExternData));
                        /* we have static list of params, so no hooks needed */
                        paramLI->paramFetch = NULL;
                        paramLI->paramFetchArg = NULL;
@@ -748,8 +748,8 @@ fmgr_sql(PG_FUNCTION_ARGS)
        execution_state *es;
        TupleTableSlot *slot;
        Datum           result;
-       List            *eslist;
-       ListCell    *eslc;
+       List       *eslist;
+       ListCell   *eslc;
 
        /*
         * Switch to context in which the fcache lives.  This ensures that
@@ -847,10 +847,10 @@ fmgr_sql(PG_FUNCTION_ARGS)
         *
         * In a non-read-only function, we rely on the fact that we'll never
         * suspend execution between queries of the function: the only reason to
-        * suspend execution before completion is if we are returning a row from
-        * lazily-evaluated SELECT.  So, when first entering this loop, we'll
+        * suspend execution before completion is if we are returning a row from a
+        * lazily-evaluated SELECT.  So, when first entering this loop, we'll
         * either start a new query (and push a fresh snapshot) or re-establish
-        * the active snapshot from the existing query descriptor.  If we need to
+        * the active snapshot from the existing query descriptor.      If we need to
         * start a new query in a subsequent execution of the loop, either we need
         * a fresh snapshot (and pushed_snapshot is false) or the existing
         * snapshot is on the active stack and we can just bump its command ID.
@@ -927,10 +927,10 @@ fmgr_sql(PG_FUNCTION_ARGS)
                        es = (execution_state *) lfirst(eslc);
 
                        /*
-                        * Flush the current snapshot so that we will take a new one
-                        * for the new query list.  This ensures that new snaps are
-                        * taken at original-query boundaries, matching the behavior
-                        * of interactive execution.
+                        * Flush the current snapshot so that we will take a new one for
+                        * the new query list.  This ensures that new snaps are taken at
+                        * original-query boundaries, matching the behavior of interactive
+                        * execution.
                         */
                        if (pushed_snapshot)
                        {
@@ -1183,7 +1183,7 @@ ShutdownSQLFunction(Datum arg)
 {
        SQLFunctionCachePtr fcache = (SQLFunctionCachePtr) DatumGetPointer(arg);
        execution_state *es;
-       ListCell                *lc;
+       ListCell   *lc;
 
        foreach(lc, fcache->func_state)
        {
@@ -1415,7 +1415,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                 * the function that's calling it.
                 *
                 * XXX Note that if rettype is RECORD, the IsBinaryCoercible check
-                * will succeed for any composite restype.  For the moment we rely on
+                * will succeed for any composite restype.      For the moment we rely on
                 * runtime type checking to catch any discrepancy, but it'd be nice to
                 * do better at parse time.
                 */
@@ -1432,7 +1432,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                                        tle->expr = (Expr *) makeRelabelType(tle->expr,
                                                                                                                 rettype,
                                                                                                                 -1,
-                                                                                                                get_typcollation(rettype),
+                                                                                                  get_typcollation(rettype),
                                                                                                                 COERCE_DONTCARE);
                                        /* Relabel is dangerous if sort/group or setop column */
                                        if (tle->ressortgroupref != 0 || parse->setOperations)
@@ -1536,7 +1536,7 @@ check_sql_fn_retval(Oid func_id, Oid rettype, List *queryTreeList,
                                        tle->expr = (Expr *) makeRelabelType(tle->expr,
                                                                                                                 atttype,
                                                                                                                 -1,
-                                                                                                                get_typcollation(atttype),
+                                                                                                  get_typcollation(atttype),
                                                                                                                 COERCE_DONTCARE);
                                        /* Relabel is dangerous if sort/group or setop column */
                                        if (tle->ressortgroupref != 0 || parse->setOperations)
index 51b1228c26ff3f6c3310fccddb11013ea266d9c1..47555bab55bdc0d8bb6ce42683fb037a7dd6e5ed 100644 (file)
@@ -199,7 +199,7 @@ typedef struct AggStatePerAggData
         */
 
        Tuplesortstate *sortstate;      /* sort object, if DISTINCT or ORDER BY */
-} AggStatePerAggData;
+}      AggStatePerAggData;
 
 /*
  * AggStatePerGroupData - per-aggregate-per-group working state
@@ -246,7 +246,7 @@ typedef struct AggHashEntryData
        TupleHashEntryData shared;      /* common header for hash table entries */
        /* per-aggregate transition status array - must be last! */
        AggStatePerGroupData pergroup[1];       /* VARIABLE LENGTH ARRAY */
-} AggHashEntryData;                            /* VARIABLE LENGTH STRUCT */
+}      AggHashEntryData;       /* VARIABLE LENGTH STRUCT */
 
 
 static void initialize_aggregates(AggState *aggstate,
@@ -827,7 +827,7 @@ build_hash_table(AggState *aggstate)
        Assert(node->numGroups > 0);
 
        entrysize = sizeof(AggHashEntryData) +
-               (aggstate->numaggs - 1) *sizeof(AggStatePerGroupData);
+               (aggstate->numaggs - 1) * sizeof(AggStatePerGroupData);
 
        aggstate->hashtable = BuildTupleHashTable(node->numCols,
                                                                                          node->grpColIdx,
@@ -899,7 +899,7 @@ hash_agg_entry_size(int numAggs)
 
        /* This must match build_hash_table */
        entrysize = sizeof(AggHashEntryData) +
-               (numAggs - 1) *sizeof(AggStatePerGroupData);
+               (numAggs - 1) * sizeof(AggStatePerGroupData);
        entrysize = MAXALIGN(entrysize);
        /* Account for hashtable overhead (assuming fill factor = 1) */
        entrysize += 3 * sizeof(void *);
index 90ff0403ab0130b83e385689a38b1ab12a75deac..4de54ea55f6061400fb85dcc4cb562ea81b8f982 100644 (file)
@@ -307,8 +307,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate, int eflags)
                                                           indexstate->biss_NumScanKeys);
 
        /*
-        * If no run-time keys to calculate, go ahead and pass the scankeys to
-        * the index AM.
+        * If no run-time keys to calculate, go ahead and pass the scankeys to the
+        * index AM.
         */
        if (indexstate->biss_NumRuntimeKeys == 0 &&
                indexstate->biss_NumArrayKeys == 0)
index c4309a981e271533ca07bfd58d1fd4af1f7e06aa..d50489c7f4c595d8298a67e97f33932891a75cbd 100644 (file)
@@ -40,7 +40,7 @@ static TupleTableSlot *
 ForeignNext(ForeignScanState *node)
 {
        TupleTableSlot *slot;
-       ForeignScan        *plan = (ForeignScan *) node->ss.ps.plan;
+       ForeignScan *plan = (ForeignScan *) node->ss.ps.plan;
        ExprContext *econtext = node->ss.ps.ps_ExprContext;
        MemoryContext oldcontext;
 
index 295563011fe032dc8bf7a8a65fc52e62f6d26929..1af98c81a69f0390710c6f64bcda6cfa409457a2 100644 (file)
@@ -960,13 +960,11 @@ void
 ExecPrepHashTableForUnmatched(HashJoinState *hjstate)
 {
        /*
-        *----------
-        * During this scan we use the HashJoinState fields as follows:
+        * ---------- During this scan we use the HashJoinState fields as follows:
         *
-        * hj_CurBucketNo: next regular bucket to scan
-        * hj_CurSkewBucketNo: next skew bucket (an index into skewBucketNums)
-        * hj_CurTuple: last tuple returned, or NULL to start next bucket
-        *----------
+        * hj_CurBucketNo: next regular bucket to scan hj_CurSkewBucketNo: next
+        * skew bucket (an index into skewBucketNums) hj_CurTuple: last tuple
+        * returned, or NULL to start next bucket ----------
         */
        hjstate->hj_CurBucketNo = 0;
        hjstate->hj_CurSkewBucketNo = 0;
@@ -1003,7 +1001,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
                }
                else if (hjstate->hj_CurSkewBucketNo < hashtable->nSkewBuckets)
                {
-                       int             j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
+                       int                     j = hashtable->skewBucketNums[hjstate->hj_CurSkewBucketNo];
 
                        hashTuple = hashtable->skewBucket[j]->tuples;
                        hjstate->hj_CurSkewBucketNo++;
@@ -1020,7 +1018,7 @@ ExecScanHashTableForUnmatched(HashJoinState *hjstate, ExprContext *econtext)
                                /* insert hashtable's tuple into exec slot */
                                inntuple = ExecStoreMinimalTuple(HJTUPLE_MINTUPLE(hashTuple),
                                                                                                 hjstate->hj_HashTupleSlot,
-                                                                                                false);        /* do not pfree */
+                                                                                                false);                /* do not pfree */
                                econtext->ecxt_innertuple = inntuple;
 
                                /*
@@ -1091,7 +1089,7 @@ ExecHashTableResetMatchFlags(HashJoinTable hashtable)
        /* ... and the same for the skew buckets, if any */
        for (i = 0; i < hashtable->nSkewBuckets; i++)
        {
-               int             j = hashtable->skewBucketNums[i];
+               int                     j = hashtable->skewBucketNums[i];
                HashSkewBucket *skewBucket = hashtable->skewBucket[j];
 
                for (tuple = skewBucket->tuples; tuple != NULL; tuple = tuple->next)
index a6847c956f4c61a6375360e18e206fa5f3c86eff..7c02db94adb6dc068a71e4d0ba9a8f8ee53840a7 100644 (file)
@@ -113,6 +113,7 @@ ExecHashJoin(HashJoinState *node)
                switch (node->hj_JoinState)
                {
                        case HJ_BUILD_HASHTABLE:
+
                                /*
                                 * First time through: build hash table for inner relation.
                                 */
@@ -123,12 +124,12 @@ ExecHashJoin(HashJoinState *node)
                                 * right/full join, we can quit without building the hash
                                 * table.  However, for an inner join it is only a win to
                                 * check this when the outer relation's startup cost is less
-                                * than the projected cost of building the hash
-                                * table.  Otherwise it's best to build the hash table first
-                                * and see if the inner relation is empty.  (When it's a left
-                                * join, we should always make this check, since we aren't
-                                * going to be able to skip the join on the strength of an
-                                * empty inner relation anyway.)
+                                * than the projected cost of building the hash table.
+                                * Otherwise it's best to build the hash table first and see
+                                * if the inner relation is empty.      (When it's a left join, we
+                                * should always make this check, since we aren't going to be
+                                * able to skip the join on the strength of an empty inner
+                                * relation anyway.)
                                 *
                                 * If we are rescanning the join, we make use of information
                                 * gained on the previous scan: don't bother to try the
@@ -185,8 +186,8 @@ ExecHashJoin(HashJoinState *node)
                                        return NULL;
 
                                /*
-                                * need to remember whether nbatch has increased since we began
-                                * scanning the outer relation
+                                * need to remember whether nbatch has increased since we
+                                * began scanning the outer relation
                                 */
                                hashtable->nbatch_outstart = hashtable->nbatch;
 
@@ -202,6 +203,7 @@ ExecHashJoin(HashJoinState *node)
                                /* FALL THRU */
 
                        case HJ_NEED_NEW_OUTER:
+
                                /*
                                 * We don't have an outer tuple, try to get the next one
                                 */
@@ -250,7 +252,7 @@ ExecHashJoin(HashJoinState *node)
                                        Assert(batchno > hashtable->curbatch);
                                        ExecHashJoinSaveTuple(ExecFetchSlotMinimalTuple(outerTupleSlot),
                                                                                  hashvalue,
-                                                                                 &hashtable->outerBatchFile[batchno]);
+                                                                               &hashtable->outerBatchFile[batchno]);
                                        /* Loop around, staying in HJ_NEED_NEW_OUTER state */
                                        continue;
                                }
@@ -261,6 +263,7 @@ ExecHashJoin(HashJoinState *node)
                                /* FALL THRU */
 
                        case HJ_SCAN_BUCKET:
+
                                /*
                                 * Scan the selected hash bucket for matches to current outer
                                 */
@@ -296,8 +299,8 @@ ExecHashJoin(HashJoinState *node)
                                        }
 
                                        /*
-                                        * In a semijoin, we'll consider returning the first match,
-                                        * but after that we're done with this outer tuple.
+                                        * In a semijoin, we'll consider returning the first
+                                        * match, but after that we're done with this outer tuple.
                                         */
                                        if (node->js.jointype == JOIN_SEMI)
                                                node->hj_JoinState = HJ_NEED_NEW_OUTER;
@@ -320,10 +323,11 @@ ExecHashJoin(HashJoinState *node)
                                break;
 
                        case HJ_FILL_OUTER_TUPLE:
+
                                /*
                                 * The current outer tuple has run out of matches, so check
-                                * whether to emit a dummy outer-join tuple.  Whether we
-                                * emit one or not, the next state is NEED_NEW_OUTER.
+                                * whether to emit a dummy outer-join tuple.  Whether we emit
+                                * one or not, the next state is NEED_NEW_OUTER.
                                 */
                                node->hj_JoinState = HJ_NEED_NEW_OUTER;
 
@@ -354,6 +358,7 @@ ExecHashJoin(HashJoinState *node)
                                break;
 
                        case HJ_FILL_INNER_TUPLES:
+
                                /*
                                 * We have finished a batch, but we are doing right/full join,
                                 * so any unmatched inner tuples in the hashtable have to be
@@ -389,11 +394,12 @@ ExecHashJoin(HashJoinState *node)
                                break;
 
                        case HJ_NEED_NEW_BATCH:
+
                                /*
                                 * Try to advance to next batch.  Done if there are no more.
                                 */
                                if (!ExecHashJoinNewBatch(node))
-                                       return NULL;                            /* end of join */
+                                       return NULL;    /* end of join */
                                node->hj_JoinState = HJ_NEED_NEW_OUTER;
                                break;
 
@@ -783,7 +789,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
        }
 
        if (curbatch >= nbatch)
-               return false;           /* no more batches */
+               return false;                   /* no more batches */
 
        hashtable->curbatch = curbatch;
 
@@ -829,7 +835,7 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
                if (BufFileSeek(hashtable->outerBatchFile[curbatch], 0, 0L, SEEK_SET))
                        ereport(ERROR,
                                        (errcode_for_file_access(),
-                                        errmsg("could not rewind hash-join temporary file: %m")));
+                                  errmsg("could not rewind hash-join temporary file: %m")));
        }
 
        return true;
@@ -944,14 +950,13 @@ ExecReScanHashJoin(HashJoinState *node)
                                ExecHashTableResetMatchFlags(node->hj_HashTable);
 
                        /*
-                        * Also, we need to reset our state about the emptiness of
-                        * the outer relation, so that the new scan of the outer will
-                        * update it correctly if it turns out to be empty this time.
-                        * (There's no harm in clearing it now because ExecHashJoin won't
-                        * need the info.  In the other cases, where the hash table
-                        * doesn't exist or we are destroying it, we leave this state
-                        * alone because ExecHashJoin will need it the first time
-                        * through.)
+                        * Also, we need to reset our state about the emptiness of the
+                        * outer relation, so that the new scan of the outer will update
+                        * it correctly if it turns out to be empty this time. (There's no
+                        * harm in clearing it now because ExecHashJoin won't need the
+                        * info.  In the other cases, where the hash table doesn't exist
+                        * or we are destroying it, we leave this state alone because
+                        * ExecHashJoin will need it the first time through.)
                         */
                        node->hj_OuterNotEmpty = false;
 
index 3b8741fc21b63cf346e244be2eee0587095597d5..d8e59ca39e5e08a195421171c66d8b24428e14a6 100644 (file)
@@ -212,7 +212,7 @@ ExecIndexEvalRuntimeKeys(ExprContext *econtext,
 
                /*
                 * For each run-time key, extract the run-time expression and evaluate
-                * it with respect to the current context.  We then stick the result
+                * it with respect to the current context.      We then stick the result
                 * into the proper scan key.
                 *
                 * Note: the result of the eval could be a pass-by-ref value that's
@@ -605,16 +605,16 @@ ExecInitIndexScan(IndexScan *node, EState *estate, int eflags)
                                                                                           indexstate->iss_RelationDesc,
                                                                                           estate->es_snapshot,
                                                                                           indexstate->iss_NumScanKeys,
-                                                                                          indexstate->iss_NumOrderByKeys);
+                                                                                        indexstate->iss_NumOrderByKeys);
 
        /*
-        * If no run-time keys to calculate, go ahead and pass the scankeys to
-        * the index AM.
+        * If no run-time keys to calculate, go ahead and pass the scankeys to the
+        * index AM.
         */
        if (indexstate->iss_NumRuntimeKeys == 0)
                index_rescan(indexstate->iss_ScanDesc,
                                         indexstate->iss_ScanKeys, indexstate->iss_NumScanKeys,
-                                        indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys);
+                               indexstate->iss_OrderByKeys, indexstate->iss_NumOrderByKeys);
 
        /*
         * all done.
@@ -703,11 +703,11 @@ ExecIndexBuildScanKeys(PlanState *planstate, Relation index, Index scanrelid,
        scan_keys = (ScanKey) palloc(n_scan_keys * sizeof(ScanKeyData));
 
        /*
-        * runtime_keys array is dynamically resized as needed.  We handle it
-        * this way so that the same runtime keys array can be shared between
-        * indexquals and indexorderbys, which will be processed in separate
-        * calls of this function.  Caller must be sure to pass in NULL/0 for
-        * first call.
+        * runtime_keys array is dynamically resized as needed.  We handle it this
+        * way so that the same runtime keys array can be shared between
+        * indexquals and indexorderbys, which will be processed in separate calls
+        * of this function.  Caller must be sure to pass in NULL/0 for first
+        * call.
         */
        runtime_keys = *runtimeKeys;
        n_runtime_keys = max_runtime_keys = *numRuntimeKeys;
index edbe0558b70d784957de85460273f74320fdfc65..85d1a6e27f110df00e6b2ad405ab1624bd56b16d 100644 (file)
@@ -346,14 +346,14 @@ pass_down_bound(LimitState *node, PlanState *child_node)
        else if (IsA(child_node, ResultState))
        {
                /*
-                * An extra consideration here is that if the Result is projecting
-                * a targetlist that contains any SRFs, we can't assume that every
-                * input tuple generates an output tuple, so a Sort underneath
-                * might need to return more than N tuples to satisfy LIMIT N.
-                * So we cannot use bounded sort.
+                * An extra consideration here is that if the Result is projecting a
+                * targetlist that contains any SRFs, we can't assume that every input
+                * tuple generates an output tuple, so a Sort underneath might need to
+                * return more than N tuples to satisfy LIMIT N. So we cannot use
+                * bounded sort.
                 *
-                * If Result supported qual checking, we'd have to punt on seeing
-                * qual, too.  Note that having a resconstantqual is not a
+                * If Result supported qual checking, we'd have to punt on seeing a
+                * qual, too.  Note that having a resconstantqual is not a
                 * showstopper: if that fails we're not getting any rows at all.
                 */
                if (outerPlanState(child_node) &&
index 2e08008807172e7b32b74c951063aafd8c320baa..d71278ebd729f682f6b1220cd0f43a2334c66635 100644 (file)
@@ -291,7 +291,7 @@ ExecInitLockRows(LockRows *node, EState *estate, int eflags)
 
        /*
         * Locate the ExecRowMark(s) that this node is responsible for, and
-        * construct ExecAuxRowMarks for them.  (InitPlan should already have
+        * construct ExecAuxRowMarks for them.  (InitPlan should already have
         * built the global list of ExecRowMarks.)
         */
        lrstate->lr_arowMarks = NIL;
index 73920f21c8c2a27889c2cbbcf8d3d2190fe94f73..4ebe0cbe033372d6016c58d2aee7f2863ef3ba77 100644 (file)
@@ -48,8 +48,8 @@
  * contains integers which index into the slots array. These typedefs try to
  * clear it up, but they're only documentation.
  */
-typedef int            SlotNumber;
-typedef int            HeapPosition;
+typedef int SlotNumber;
+typedef int HeapPosition;
 
 static void heap_insert_slot(MergeAppendState *node, SlotNumber new_slot);
 static void heap_siftup_slot(MergeAppendState *node);
@@ -128,13 +128,13 @@ ExecInitMergeAppend(MergeAppend *node, EState *estate, int eflags)
         * initialize sort-key information
         */
        mergestate->ms_nkeys = node->numCols;
-       mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) *  node->numCols);
+       mergestate->ms_scankeys = palloc0(sizeof(ScanKeyData) * node->numCols);
 
        for (i = 0; i < node->numCols; i++)
        {
-               Oid             sortFunction;
-               bool    reverse;
-               int             flags;
+               Oid                     sortFunction;
+               bool            reverse;
+               int                     flags;
 
                if (!get_compare_function_for_ordering_op(node->sortOperators[i],
                                                                                                  &sortFunction, &reverse))
@@ -187,8 +187,8 @@ ExecMergeAppend(MergeAppendState *node)
        if (!node->ms_initialized)
        {
                /*
-                * First time through: pull the first tuple from each subplan,
-                * and set up the heap.
+                * First time through: pull the first tuple from each subplan, and set
+                * up the heap.
                 */
                for (i = 0; i < node->ms_nplans; i++)
                {
@@ -243,7 +243,7 @@ heap_insert_slot(MergeAppendState *node, SlotNumber new_slot)
        j = node->ms_heap_size++;       /* j is where the "hole" is */
        while (j > 0)
        {
-               int             i = (j-1)/2;
+               int                     i = (j - 1) / 2;
 
                if (heap_compare_slots(node, new_slot, node->ms_heap[i]) >= 0)
                        break;
@@ -269,11 +269,11 @@ heap_siftup_slot(MergeAppendState *node)
        i = 0;                                          /* i is where the "hole" is */
        for (;;)
        {
-               int             j = 2 * i + 1;
+               int                     j = 2 * i + 1;
 
                if (j >= n)
                        break;
-               if (j+1 < n && heap_compare_slots(node, heap[j], heap[j+1]) > 0)
+               if (j + 1 < n && heap_compare_slots(node, heap[j], heap[j + 1]) > 0)
                        j++;
                if (heap_compare_slots(node, heap[n], heap[j]) <= 0)
                        break;
@@ -298,13 +298,13 @@ heap_compare_slots(MergeAppendState *node, SlotNumber slot1, SlotNumber slot2)
 
        for (nkey = 0; nkey < node->ms_nkeys; nkey++)
        {
-               ScanKey scankey = node->ms_scankeys + nkey;
-               AttrNumber attno = scankey->sk_attno;
-               Datum   datum1,
-                               datum2;
-               bool    isNull1,
-                               isNull2;
-               int32   compare;
+               ScanKey         scankey = node->ms_scankeys + nkey;
+               AttrNumber      attno = scankey->sk_attno;
+               Datum           datum1,
+                                       datum2;
+               bool            isNull1,
+                                       isNull2;
+               int32           compare;
 
                datum1 = slot_getattr(s1, attno, &isNull1);
                datum2 = slot_getattr(s2, attno, &isNull2);
index 75c3a645359131160f1b0b60e6d755ad3330f0db..ce5462e961ea8c7c6e0cd0936e1a7d9c931132e9 100644 (file)
@@ -143,7 +143,7 @@ typedef struct MergeJoinClauseData
        bool            reverse;                /* if true, negate the cmpfn's output */
        bool            nulls_first;    /* if true, nulls sort low */
        FmgrInfo        cmpfinfo;
-} MergeJoinClauseData;
+}      MergeJoinClauseData;
 
 /* Result type for MJEvalOuterValues and MJEvalInnerValues */
 typedef enum
index f10f70a17d3d773b6f3911686cf581611fba6915..c0eab4bf0db894dce689bfcabbde4d7e281b7714 100644 (file)
@@ -544,7 +544,7 @@ ExecUpdate(ItemPointer tupleid,
                 *
                 * If we generate a new candidate tuple after EvalPlanQual testing, we
                 * must loop back here and recheck constraints.  (We don't need to
-                * redo triggers, however.  If there are any BEFORE triggers then
+                * redo triggers, however.      If there are any BEFORE triggers then
                 * trigger.c will have done heap_lock_tuple to lock the correct tuple,
                 * so there's no need to do them again.)
                 */
@@ -608,11 +608,10 @@ lreplace:;
 
                /*
                 * Note: instead of having to update the old index tuples associated
-                * with the heap tuple, all we do is form and insert new index
-                * tuples. This is because UPDATEs are actually DELETEs and INSERTs,
-                * and index tuple deletion is done later by VACUUM (see notes in
-                * ExecDelete). All we do here is insert new index tuples.  -cim
-                * 9/27/89
+                * with the heap tuple, all we do is form and insert new index tuples.
+                * This is because UPDATEs are actually DELETEs and INSERTs, and index
+                * tuple deletion is done later by VACUUM (see notes in ExecDelete).
+                * All we do here is insert new index tuples.  -cim 9/27/89
                 */
 
                /*
@@ -713,7 +712,7 @@ ExecModifyTable(ModifyTableState *node)
        TupleTableSlot *planSlot;
        ItemPointer tupleid = NULL;
        ItemPointerData tuple_ctid;
-       HeapTupleHeader oldtuple = NULL;
+       HeapTupleHeader oldtuple = NULL;
 
        /*
         * If we've already completed processing, don't try to do more.  We need
@@ -740,7 +739,7 @@ ExecModifyTable(ModifyTableState *node)
 
        /*
         * es_result_relation_info must point to the currently active result
-        * relation while we are within this ModifyTable node.  Even though
+        * relation while we are within this ModifyTable node.  Even though
         * ModifyTable nodes can't be nested statically, they can be nested
         * dynamically (since our subplan could include a reference to a modifying
         * CTE).  So we have to save and restore the caller's value.
@@ -756,7 +755,7 @@ ExecModifyTable(ModifyTableState *node)
        for (;;)
        {
                /*
-                * Reset the per-output-tuple exprcontext.  This is needed because
+                * Reset the per-output-tuple exprcontext.      This is needed because
                 * triggers expect to use that context as workspace.  It's a bit ugly
                 * to do this below the top level of the plan, however.  We might need
                 * to rethink this later.
@@ -806,7 +805,8 @@ ExecModifyTable(ModifyTableState *node)
                                                elog(ERROR, "ctid is NULL");
 
                                        tupleid = (ItemPointer) DatumGetPointer(datum);
-                                       tuple_ctid = *tupleid;  /* be sure we don't free ctid!! */
+                                       tuple_ctid = *tupleid;          /* be sure we don't free
+                                                                                                * ctid!! */
                                        tupleid = &tuple_ctid;
                                }
                                else
@@ -836,11 +836,11 @@ ExecModifyTable(ModifyTableState *node)
                                break;
                        case CMD_UPDATE:
                                slot = ExecUpdate(tupleid, oldtuple, slot, planSlot,
-                                                                 &node->mt_epqstate, estate, node->canSetTag);
+                                                               &node->mt_epqstate, estate, node->canSetTag);
                                break;
                        case CMD_DELETE:
                                slot = ExecDelete(tupleid, oldtuple, planSlot,
-                                                                 &node->mt_epqstate, estate, node->canSetTag);
+                                                               &node->mt_epqstate, estate, node->canSetTag);
                                break;
                        default:
                                elog(ERROR, "unknown operation");
@@ -922,9 +922,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
 
        /*
         * call ExecInitNode on each of the plans to be executed and save the
-        * results into the array "mt_plans".  This is also a convenient place
-        * to verify that the proposed target relations are valid and open their
-        * indexes for insertion of new index entries.  Note we *must* set
+        * results into the array "mt_plans".  This is also a convenient place to
+        * verify that the proposed target relations are valid and open their
+        * indexes for insertion of new index entries.  Note we *must* set
         * estate->es_result_relation_info correctly while we initialize each
         * sub-plan; ExecContextForcesOids depends on that!
         */
@@ -944,7 +944,7 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
                /*
                 * If there are indices on the result relation, open them and save
                 * descriptors in the result relation info, so that we can add new
-                * index entries for the tuples we add/update.  We need not do this
+                * index entries for the tuples we add/update.  We need not do this
                 * for a DELETE, however, since deletion doesn't affect indexes.
                 */
                if (resultRelInfo->ri_RelationDesc->rd_rel->relhasindex &&
@@ -1147,10 +1147,10 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
         * Lastly, if this is not the primary (canSetTag) ModifyTable node, add it
         * to estate->es_auxmodifytables so that it will be run to completion by
         * ExecPostprocessPlan.  (It'd actually work fine to add the primary
-        * ModifyTable node too, but there's no need.)  Note the use of lcons
-        * not lappend: we need later-initialized ModifyTable nodes to be shut
-        * down before earlier ones.  This ensures that we don't throw away
-        * RETURNING rows that need to be seen by a later CTE subplan.
+        * ModifyTable node too, but there's no need.)  Note the use of lcons not
+        * lappend: we need later-initialized ModifyTable nodes to be shut down
+        * before earlier ones.  This ensures that we don't throw away RETURNING
+        * rows that need to be seen by a later CTE subplan.
         */
        if (!mtstate->canSetTag)
                estate->es_auxmodifytables = lcons(mtstate,
index 4893a6ea6d6180e6a1d79486b1f3edfeabfc5ccb..e98bc0f5a308bd27bd624c6342e4a22fffe530a7 100644 (file)
@@ -137,9 +137,8 @@ ExecNestLoop(NestLoopState *node)
                        node->nl_MatchedOuter = false;
 
                        /*
-                        * fetch the values of any outer Vars that must be passed to
-                        * the inner scan, and store them in the appropriate PARAM_EXEC
-                        * slots.
+                        * fetch the values of any outer Vars that must be passed to the
+                        * inner scan, and store them in the appropriate PARAM_EXEC slots.
                         */
                        foreach(lc, nl->nestParams)
                        {
@@ -330,9 +329,9 @@ ExecInitNestLoop(NestLoop *node, EState *estate, int eflags)
         *
         * If we have no parameters to pass into the inner rel from the outer,
         * tell the inner child that cheap rescans would be good.  If we do have
-        * such parameters, then there is no point in REWIND support at all in
-        * the inner child, because it will always be rescanned with fresh
-        * parameter values.
+        * such parameters, then there is no point in REWIND support at all in the
+        * inner child, because it will always be rescanned with fresh parameter
+        * values.
         */
        outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
        if (node->nestParams == NIL)
index 84c051854bbbf585a87b1d0f5161e510d7f46099..12e1b9a58577276cc03fffb99e38adc5ca374fa2 100644 (file)
@@ -29,7 +29,7 @@ typedef struct RUHashEntryData *RUHashEntry;
 typedef struct RUHashEntryData
 {
        TupleHashEntryData shared;      /* common header for hash table entries */
-} RUHashEntryData;
+}      RUHashEntryData;
 
 
 /*
index aa352d7822e88e8540fdc21ec50d8ab1596e9fb8..9106f14873024c3d8c039ab8383207111936e6e6 100644 (file)
@@ -76,7 +76,7 @@ typedef struct SetOpHashEntryData
 {
        TupleHashEntryData shared;      /* common header for hash table entries */
        SetOpStatePerGroupData pergroup;
-} SetOpHashEntryData;
+}      SetOpHashEntryData;
 
 
 static TupleTableSlot *setop_retrieve_direct(SetOpState *setopstate);
index 5680efeb69ebe19e18882ecfc9e31c7bbb3366a1..25d9298cefc2460e706309d1a1972c124c555a28 100644 (file)
@@ -92,7 +92,7 @@ typedef struct WindowStatePerFuncData
        int                     aggno;                  /* if so, index of its PerAggData */
 
        WindowObject winobj;            /* object used in window function API */
-} WindowStatePerFuncData;
+}      WindowStatePerFuncData;
 
 /*
  * For plain aggregate window functions, we also have one of these.
index a717a0deeade014f4853cf9bc7a8adda79d59a12..6e723ca092b323bfca4d8415bc24acc4b5a16b7d 100644 (file)
@@ -1787,8 +1787,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
         * snapshot != InvalidSnapshot, read_only = true: use exactly the given
         * snapshot.
         *
-        * snapshot != InvalidSnapshot, read_only = false: use the given
-        * snapshot, modified by advancing its command ID before each querytree.
+        * snapshot != InvalidSnapshot, read_only = false: use the given snapshot,
+        * modified by advancing its command ID before each querytree.
         *
         * snapshot == InvalidSnapshot, read_only = true: use the entry-time
         * ActiveSnapshot, if any (if there isn't one, we run with no snapshot).
@@ -1797,8 +1797,8 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
         * snapshot for each user command, and advance its command ID before each
         * querytree within the command.
         *
-        * In the first two cases, we can just push the snap onto the stack
-        * once for the whole plan list.
+        * In the first two cases, we can just push the snap onto the stack once
+        * for the whole plan list.
         */
        if (snapshot != InvalidSnapshot)
        {
@@ -2028,7 +2028,7 @@ _SPI_convert_params(int nargs, Oid *argtypes,
 
                /* sizeof(ParamListInfoData) includes the first array element */
                paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
-                                                                          (nargs - 1) *sizeof(ParamExternData));
+                                                                         (nargs - 1) * sizeof(ParamExternData));
                /* we have static list of params, so no hooks needed */
                paramLI->paramFetch = NULL;
                paramLI->paramFetchArg = NULL;
index 151ec5613b4a1fd0481c9d5e51f4c2611eec98fd..d003b1206a14d4cec684c3ad01073f4d004fd1f1 100644 (file)
@@ -61,6 +61,7 @@ static int    recv_and_check_password_packet(Port *port);
 #define IDENT_PORT 113
 
 static int     ident_inet(hbaPort *port);
+
 #ifdef HAVE_UNIX_SOCKETS
 static int     auth_peer(hbaPort *port);
 #endif
@@ -182,7 +183,7 @@ static int  pg_GSS_recvauth(Port *port);
  *----------------------------------------------------------------
  */
 #ifdef ENABLE_SSPI
-typedef                SECURITY_STATUS
+typedef SECURITY_STATUS
                        (WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) (
                                                                                                           PCtxtHandle, void **);
 static int     pg_SSPI_recvauth(Port *port);
@@ -543,7 +544,7 @@ ClientAuthentication(Port *port)
                        }
 #endif
                        status = auth_peer(port);
-#else /* HAVE_UNIX_SOCKETS */
+#else                                                  /* HAVE_UNIX_SOCKETS */
                        Assert(false);
 #endif
                        break;
@@ -598,7 +599,7 @@ ClientAuthentication(Port *port)
        }
 
        if (ClientAuthentication_hook)
-               (*ClientAuthentication_hook)(port, status);
+               (*ClientAuthentication_hook) (port, status);
 
        if (status == STATUS_OK)
                sendAuthRequest(port, AUTH_REQ_OK);
@@ -844,7 +845,7 @@ pg_krb5_recvauth(Port *port)
                return ret;
 
        retval = krb5_recvauth(pg_krb5_context, &auth_context,
-                                                  (krb5_pointer) & port->sock, pg_krb_srvnam,
+                                                  (krb5_pointer) &port->sock, pg_krb_srvnam,
                                                   pg_krb5_server, 0, pg_krb5_keytab, &ticket);
        if (retval)
        {
@@ -1814,7 +1815,6 @@ auth_peer(hbaPort *port)
        }
 
        strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
 #elif defined(SO_PEERCRED)
        /* Linux style: use getsockopt(SO_PEERCRED) */
        struct ucred peercred;
@@ -1843,7 +1843,6 @@ auth_peer(hbaPort *port)
        }
 
        strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
 #elif defined(HAVE_GETPEERUCRED)
        /* Solaris > 10 */
        uid_t           uid;
@@ -1879,7 +1878,6 @@ auth_peer(hbaPort *port)
        }
 
        strlcpy(ident_user, pass->pw_name, IDENT_USERNAME_MAX + 1);
-
 #elif defined(HAVE_STRUCT_CMSGCRED) || defined(HAVE_STRUCT_FCRED) || (defined(HAVE_STRUCT_SOCKCRED) && defined(LOCAL_CREDS))
        struct msghdr msg;
 
@@ -1947,7 +1945,6 @@ auth_peer(hbaPort *port)
        }
 
        strlcpy(ident_user, pw->pw_name, IDENT_USERNAME_MAX + 1);
-
 #else
        ereport(LOG,
                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2768,10 +2765,10 @@ CheckRADIUSAuth(Port *port)
        pg_freeaddrinfo_all(hint.ai_family, serveraddrs);
 
        /*
-        * Figure out at what time we should time out. We can't just use
-        * a single call to select() with a timeout, since somebody can
-        * be sending invalid packets to our port thus causing us to
-        * retry in a loop and never time out.
+        * Figure out at what time we should time out. We can't just use a single
+        * call to select() with a timeout, since somebody can be sending invalid
+        * packets to our port thus causing us to retry in a loop and never time
+        * out.
         */
        gettimeofday(&endtime, NULL);
        endtime.tv_sec += RADIUS_TIMEOUT;
@@ -2780,7 +2777,7 @@ CheckRADIUSAuth(Port *port)
        {
                struct timeval timeout;
                struct timeval now;
-               int64 timeoutval;
+               int64           timeoutval;
 
                gettimeofday(&now, NULL);
                timeoutval = (endtime.tv_sec * 1000000 + endtime.tv_usec) - (now.tv_sec * 1000000 + now.tv_usec);
@@ -2820,12 +2817,12 @@ CheckRADIUSAuth(Port *port)
                /*
                 * Attempt to read the response packet, and verify the contents.
                 *
-                * Any packet that's not actually a RADIUS packet, or otherwise
-                * does not validate as an explicit reject, is just ignored and
-                * we retry for another packet (until we reach the timeout). This
-                * is to avoid the possibility to denial-of-service the login by
-                * flooding the server with invalid packets on the port that
-                * we're expecting the RADIUS response on.
+                * Any packet that's not actually a RADIUS packet, or otherwise does
+                * not validate as an explicit reject, is just ignored and we retry
+                * for another packet (until we reach the timeout). This is to avoid
+                * the possibility to denial-of-service the login by flooding the
+                * server with invalid packets on the port that we're expecting the
+                * RADIUS response on.
                 */
 
                addrsize = sizeof(remoteaddr);
@@ -2846,12 +2843,12 @@ CheckRADIUSAuth(Port *port)
                {
 #ifdef HAVE_IPV6
                        ereport(LOG,
-                                       (errmsg("RADIUS response was sent from incorrect port: %i",
-                                                       ntohs(remoteaddr.sin6_port))));
+                                 (errmsg("RADIUS response was sent from incorrect port: %i",
+                                                 ntohs(remoteaddr.sin6_port))));
 #else
                        ereport(LOG,
-                                       (errmsg("RADIUS response was sent from incorrect port: %i",
-                                                       ntohs(remoteaddr.sin_port))));
+                                 (errmsg("RADIUS response was sent from incorrect port: %i",
+                                                 ntohs(remoteaddr.sin_port))));
 #endif
                        continue;
                }
@@ -2885,12 +2882,12 @@ CheckRADIUSAuth(Port *port)
                 */
                cryptvector = palloc(packetlength + strlen(port->hba->radiussecret));
 
-               memcpy(cryptvector, receivepacket, 4);          /* code+id+length */
-               memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH);          /* request
-                                                                                                                                                        * authenticator, from
-                                                                                                                                                        * original packet */
-               if (packetlength > RADIUS_HEADER_LENGTH)        /* there may be no attributes
-                                                                                                        * at all */
+               memcpy(cryptvector, receivepacket, 4);  /* code+id+length */
+               memcpy(cryptvector + 4, packet->vector, RADIUS_VECTOR_LENGTH);  /* request
+                                                                                                                                                * authenticator, from
+                                                                                                                                                * original packet */
+               if (packetlength > RADIUS_HEADER_LENGTH)                /* there may be no
+                                                                                                                * attributes at all */
                        memcpy(cryptvector + RADIUS_HEADER_LENGTH, receive_buffer + RADIUS_HEADER_LENGTH, packetlength - RADIUS_HEADER_LENGTH);
                memcpy(cryptvector + packetlength, port->hba->radiussecret, strlen(port->hba->radiussecret));
 
@@ -2899,7 +2896,7 @@ CheckRADIUSAuth(Port *port)
                                                   encryptedpassword))
                {
                        ereport(LOG,
-                                       (errmsg("could not perform MD5 encryption of received packet")));
+                       (errmsg("could not perform MD5 encryption of received packet")));
                        pfree(cryptvector);
                        continue;
                }
@@ -2925,9 +2922,9 @@ CheckRADIUSAuth(Port *port)
                else
                {
                        ereport(LOG,
-                                       (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
-                                                       receivepacket->code, port->user_name)));
+                        (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
+                                        receivepacket->code, port->user_name)));
                        continue;
                }
-       } /* while (true) */
+       }                                                       /* while (true) */
 }
index 2def6cea894852b01e786b308d5db343552fab6b..fdc29aaa72d3f081904153443c372edf0d5bd2b5 100644 (file)
@@ -543,7 +543,7 @@ check_db(const char *dbname, const char *role, Oid roleid, char *param_str)
 }
 
 static bool
-ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b)
+ipv4eq(struct sockaddr_in * a, struct sockaddr_in * b)
 {
        return (a->sin_addr.s_addr == b->sin_addr.s_addr);
 }
@@ -551,9 +551,9 @@ ipv4eq(struct sockaddr_in *a, struct sockaddr_in *b)
 #ifdef HAVE_IPV6
 
 static bool
-ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
+ipv6eq(struct sockaddr_in6 * a, struct sockaddr_in6 * b)
 {
-       int i;
+       int                     i;
 
        for (i = 0; i < 16; i++)
                if (a->sin6_addr.s6_addr[i] != b->sin6_addr.s6_addr[i])
@@ -561,8 +561,7 @@ ipv6eq(struct sockaddr_in6 *a, struct sockaddr_in6 *b)
 
        return true;
 }
-
-#endif /* HAVE_IPV6 */
+#endif   /* HAVE_IPV6 */
 
 /*
  * Check whether host name matches pattern.
@@ -572,8 +571,8 @@ hostname_match(const char *pattern, const char *actual_hostname)
 {
        if (pattern[0] == '.')          /* suffix match */
        {
-               size_t plen = strlen(pattern);
-               size_t hlen = strlen(actual_hostname);
+               size_t          plen = strlen(pattern);
+               size_t          hlen = strlen(actual_hostname);
 
                if (hlen < plen)
                        return false;
@@ -590,7 +589,8 @@ hostname_match(const char *pattern, const char *actual_hostname)
 static bool
 check_hostname(hbaPort *port, const char *hostname)
 {
-       struct addrinfo *gai_result, *gai;
+       struct addrinfo *gai_result,
+                          *gai;
        int                     ret;
        bool            found;
 
@@ -632,7 +632,7 @@ check_hostname(hbaPort *port, const char *hostname)
                        if (gai->ai_addr->sa_family == AF_INET)
                        {
                                if (ipv4eq((struct sockaddr_in *) gai->ai_addr,
-                                                  (struct sockaddr_in *) &port->raddr.addr))
+                                                  (struct sockaddr_in *) & port->raddr.addr))
                                {
                                        found = true;
                                        break;
@@ -642,7 +642,7 @@ check_hostname(hbaPort *port, const char *hostname)
                        else if (gai->ai_addr->sa_family == AF_INET6)
                        {
                                if (ipv6eq((struct sockaddr_in6 *) gai->ai_addr,
-                                                  (struct sockaddr_in6 *) &port->raddr.addr))
+                                                  (struct sockaddr_in6 *) & port->raddr.addr))
                                {
                                        found = true;
                                        break;
@@ -974,8 +974,8 @@ parse_hba_line(List *line, int line_num, HbaLine *parsedline)
                                                        (errcode(ERRCODE_CONFIG_FILE_ERROR),
                                                         errmsg("specifying both host name and CIDR mask is invalid: \"%s\"",
                                                                        token),
-                                                        errcontext("line %d of configuration file \"%s\"",
-                                                                               line_num, HbaFileName)));
+                                                  errcontext("line %d of configuration file \"%s\"",
+                                                                         line_num, HbaFileName)));
                                        pfree(token);
                                        return false;
                                }
index 3232e64d4ad8fc61c79b9d53ec5f90a2150b001d..b83a2efb6989383552166bd6ddfa95ae046f809d 100644 (file)
@@ -85,7 +85,7 @@
 #ifdef HAVE_UTIME_H
 #include <utime.h>
 #endif
-#ifdef WIN32_ONLY_COMPILER /* mstcpip.h is missing on mingw */
+#ifdef WIN32_ONLY_COMPILER             /* mstcpip.h is missing on mingw */
 #include <mstcpip.h>
 #endif
 
@@ -745,7 +745,7 @@ TouchSocketFile(void)
  */
 
 /* --------------------------------
- *            pq_set_nonblocking - set socket blocking/non-blocking
+ *                       pq_set_nonblocking - set socket blocking/non-blocking
  *
  * Sets the socket non-blocking if nonblocking is TRUE, or sets it
  * blocking otherwise.
@@ -760,16 +760,17 @@ pq_set_nonblocking(bool nonblocking)
 #ifdef WIN32
        pgwin32_noblock = nonblocking ? 1 : 0;
 #else
+
        /*
-        * Use COMMERROR on failure, because ERROR would try to send the error
-        * to the client, which might require changing the mode again, leading
-        * to infinite recursion.
+        * Use COMMERROR on failure, because ERROR would try to send the error to
+        * the client, which might require changing the mode again, leading to
+        * infinite recursion.
         */
        if (nonblocking)
        {
                if (!pg_set_noblock(MyProcPort->sock))
                        ereport(COMMERROR,
-                                       (errmsg("could not set socket to non-blocking mode: %m")));
+                                 (errmsg("could not set socket to non-blocking mode: %m")));
        }
        else
        {
@@ -903,18 +904,17 @@ pq_getbyte_if_available(unsigned char *c)
        {
                /*
                 * Ok if no data available without blocking or interrupted (though
-                * EINTR really shouldn't happen with a non-blocking socket).
-                * Report other errors.
+                * EINTR really shouldn't happen with a non-blocking socket). Report
+                * other errors.
                 */
                if (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
                        r = 0;
                else
                {
                        /*
-                        * Careful: an ereport() that tries to write to the client
-                        * would cause recursion to here, leading to stack overflow
-                        * and core dump!  This message must go *only* to the
-                        * postmaster log.
+                        * Careful: an ereport() that tries to write to the client would
+                        * cause recursion to here, leading to stack overflow and core
+                        * dump!  This message must go *only* to the postmaster log.
                         */
                        ereport(COMMERROR,
                                        (errcode_for_socket_access(),
@@ -1219,8 +1219,8 @@ internal_flush(void)
                                continue;               /* Ok if we were interrupted */
 
                        /*
-                        * Ok if no data writable without blocking, and the socket
-                        * is in non-blocking mode.
+                        * Ok if no data writable without blocking, and the socket is in
+                        * non-blocking mode.
                         */
                        if (errno == EAGAIN ||
                                errno == EWOULDBLOCK)
@@ -1369,8 +1369,8 @@ fail:
 void
 pq_putmessage_noblock(char msgtype, const char *s, size_t len)
 {
-       int res;
-       int required;
+       int                     res;
+       int                     required;
 
        /*
         * Ensure we have enough space in the output buffer for the message header
@@ -1383,7 +1383,8 @@ pq_putmessage_noblock(char msgtype, const char *s, size_t len)
                PqSendBufferSize = required;
        }
        res = pq_putmessage(msgtype, s, len);
-       Assert(res == 0);       /* should not fail when the message fits in buffer */
+       Assert(res == 0);                       /* should not fail when the message fits in
+                                                                * buffer */
 }
 
 
@@ -1434,13 +1435,13 @@ pq_endcopyout(bool errorAbort)
 static int
 pq_setkeepaliveswin32(Port *port, int idle, int interval)
 {
-       struct tcp_keepalive    ka;
-       DWORD                                   retsize;
+       struct tcp_keepalive ka;
+       DWORD           retsize;
 
        if (idle <= 0)
-               idle = 2 * 60 * 60; /* default = 2 hours */
+               idle = 2 * 60 * 60;             /* default = 2 hours */
        if (interval <= 0)
-               interval = 1;       /* default = 1 second */
+               interval = 1;                   /* default = 1 second */
 
        ka.onoff = 1;
        ka.keepalivetime = idle * 1000;
@@ -1500,11 +1501,11 @@ pq_getkeepalivesidle(Port *port)
                        elog(LOG, "getsockopt(TCP_KEEPALIVE) failed: %m");
                        port->default_keepalives_idle = -1; /* don't know */
                }
-#endif /* TCP_KEEPIDLE */
-#else /* WIN32 */
+#endif   /* TCP_KEEPIDLE */
+#else                                                  /* WIN32 */
                /* We can't get the defaults on Windows, so return "don't know" */
                port->default_keepalives_idle = -1;
-#endif /* WIN32 */
+#endif   /* WIN32 */
        }
 
        return port->default_keepalives_idle;
@@ -1555,10 +1556,10 @@ pq_setkeepalivesidle(int idle, Port *port)
 #endif
 
        port->keepalives_idle = idle;
-#else /* WIN32 */
+#else                                                  /* WIN32 */
        return pq_setkeepaliveswin32(port, idle, port->keepalives_interval);
 #endif
-#else /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */
+#else                                                  /* TCP_KEEPIDLE || SIO_KEEPALIVE_VALS */
        if (idle != 0)
        {
                elog(LOG, "setting the keepalive idle time is not supported");
@@ -1593,7 +1594,7 @@ pq_getkeepalivesinterval(Port *port)
 #else
                /* We can't get the defaults on Windows, so return "don't know" */
                port->default_keepalives_interval = -1;
-#endif /* WIN32 */
+#endif   /* WIN32 */
        }
 
        return port->default_keepalives_interval;
@@ -1635,7 +1636,7 @@ pq_setkeepalivesinterval(int interval, Port *port)
        }
 
        port->keepalives_interval = interval;
-#else /* WIN32 */
+#else                                                  /* WIN32 */
        return pq_setkeepaliveswin32(port, port->keepalives_idle, interval);
 #endif
 #else
index 43d182b4dba751f5229049bbb7900afa1b9620a3..c4ef56dc6c6584709c53c8a63127ff10315c7223 100644 (file)
@@ -204,7 +204,7 @@ main(int argc, char *argv[])
 /*
  * Place platform-specific startup hacks here. This is the right
  * place to put code that must be executed early in the launch of any new
- * server process.  Note that this code will NOT be executed when a backend
+ * server process.     Note that this code will NOT be executed when a backend
  * or sub-bootstrap process is forked, unless we are in a fork/exec
  * environment (ie EXEC_BACKEND is defined).
  *
@@ -218,8 +218,8 @@ startup_hacks(const char *progname)
        /*
         * On some platforms, unaligned memory accesses result in a kernel trap;
         * the default kernel behavior is to emulate the memory access, but this
-        * results in a significant performance penalty.  We want PG never to
-        * make such unaligned memory accesses, so this code disables the kernel
+        * results in a significant performance penalty.  We want PG never to make
+        * such unaligned memory accesses, so this code disables the kernel
         * emulation: unaligned accesses will result in SIGBUS instead.
         */
 #ifdef NOFIXADE
@@ -230,7 +230,7 @@ startup_hacks(const char *progname)
 
 #if defined(__alpha)                   /* no __alpha__ ? */
        {
-               int             buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT};
+               int                     buffer[] = {SSIN_UACPROC, UAC_SIGBUS | UAC_NOPRINT};
 
                if (setsysinfo(SSI_NVPAIRS, buffer, 1, (caddr_t) NULL,
                                           (unsigned long) NULL) < 0)
@@ -238,7 +238,6 @@ startup_hacks(const char *progname)
                                                 progname, strerror(errno));
        }
 #endif   /* __alpha */
-
 #endif   /* NOFIXADE */
 
        /*
index 0eac9826a4e5f2e7184bf73ee7944b6ebd37c714..c0d2294317e50d829c7666a4cf259d5289fd15cb 100644 (file)
@@ -581,7 +581,7 @@ _copyForeignScan(ForeignScan *from)
 static FdwPlan *
 _copyFdwPlan(FdwPlan *from)
 {
-       FdwPlan *newnode = makeNode(FdwPlan);
+       FdwPlan    *newnode = makeNode(FdwPlan);
 
        COPY_SCALAR_FIELD(startup_cost);
        COPY_SCALAR_FIELD(total_cost);
@@ -1468,7 +1468,7 @@ _copyConvertRowtypeExpr(ConvertRowtypeExpr *from)
 static CollateExpr *
 _copyCollateExpr(CollateExpr *from)
 {
-       CollateExpr   *newnode = makeNode(CollateExpr);
+       CollateExpr *newnode = makeNode(CollateExpr);
 
        COPY_NODE_FIELD(arg);
        COPY_SCALAR_FIELD(collOid);
@@ -2269,7 +2269,7 @@ _copyTypeCast(TypeCast *from)
 static CollateClause *
 _copyCollateClause(CollateClause *from)
 {
-       CollateClause   *newnode = makeNode(CollateClause);
+       CollateClause *newnode = makeNode(CollateClause);
 
        COPY_NODE_FIELD(arg);
        COPY_NODE_FIELD(collname);
index af1ccb7efec64b5207c37238ce07937d32ae4f77..0e57f6c6d7d68eb8667b81cb860028aa69c391b1 100644 (file)
@@ -675,10 +675,10 @@ exprCollation(Node *expr)
                        coll = ((NullIfExpr *) expr)->opcollid;
                        break;
                case T_ScalarArrayOpExpr:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_BoolExpr:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_SubLink:
                        {
@@ -736,7 +736,7 @@ exprCollation(Node *expr)
                        coll = ((FieldSelect *) expr)->resultcollid;
                        break;
                case T_FieldStore:
-                       coll = InvalidOid;                              /* result is always composite */
+                       coll = InvalidOid;      /* result is always composite */
                        break;
                case T_RelabelType:
                        coll = ((RelabelType *) expr)->resultcollid;
@@ -748,7 +748,7 @@ exprCollation(Node *expr)
                        coll = ((ArrayCoerceExpr *) expr)->resultcollid;
                        break;
                case T_ConvertRowtypeExpr:
-                       coll = InvalidOid;                              /* result is always composite */
+                       coll = InvalidOid;      /* result is always composite */
                        break;
                case T_CollateExpr:
                        coll = ((CollateExpr *) expr)->collOid;
@@ -763,10 +763,10 @@ exprCollation(Node *expr)
                        coll = ((ArrayExpr *) expr)->array_collid;
                        break;
                case T_RowExpr:
-                       coll = InvalidOid;                              /* result is always composite */
+                       coll = InvalidOid;      /* result is always composite */
                        break;
                case T_RowCompareExpr:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_CoalesceExpr:
                        coll = ((CoalesceExpr *) expr)->coalescecollid;
@@ -775,10 +775,11 @@ exprCollation(Node *expr)
                        coll = ((MinMaxExpr *) expr)->minmaxcollid;
                        break;
                case T_XmlExpr:
+
                        /*
                         * XMLSERIALIZE returns text from non-collatable inputs, so its
-                        * collation is always default.  The other cases return boolean
-                        * or XML, which are non-collatable.
+                        * collation is always default.  The other cases return boolean or
+                        * XML, which are non-collatable.
                         */
                        if (((XmlExpr *) expr)->op == IS_XMLSERIALIZE)
                                coll = DEFAULT_COLLATION_OID;
@@ -786,10 +787,10 @@ exprCollation(Node *expr)
                                coll = InvalidOid;
                        break;
                case T_NullTest:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_BooleanTest:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_CoerceToDomain:
                        coll = ((CoerceToDomain *) expr)->resultcollid;
@@ -801,7 +802,7 @@ exprCollation(Node *expr)
                        coll = ((SetToDefault *) expr)->collation;
                        break;
                case T_CurrentOfExpr:
-                       coll = InvalidOid;                              /* result is always boolean */
+                       coll = InvalidOid;      /* result is always boolean */
                        break;
                case T_PlaceHolderVar:
                        coll = exprCollation((Node *) ((PlaceHolderVar *) expr)->phexpr);
@@ -907,10 +908,10 @@ exprSetCollation(Node *expr, Oid collation)
                        ((NullIfExpr *) expr)->opcollid = collation;
                        break;
                case T_ScalarArrayOpExpr:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_BoolExpr:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_SubLink:
 #ifdef USE_ASSERT_CHECKING
@@ -937,13 +938,13 @@ exprSetCollation(Node *expr, Oid collation)
                                        Assert(!OidIsValid(collation));
                                }
                        }
-#endif /* USE_ASSERT_CHECKING */
+#endif   /* USE_ASSERT_CHECKING */
                        break;
                case T_FieldSelect:
                        ((FieldSelect *) expr)->resultcollid = collation;
                        break;
                case T_FieldStore:
-                       Assert(!OidIsValid(collation)); /* result is always composite */
+                       Assert(!OidIsValid(collation));         /* result is always composite */
                        break;
                case T_RelabelType:
                        ((RelabelType *) expr)->resultcollid = collation;
@@ -955,7 +956,7 @@ exprSetCollation(Node *expr, Oid collation)
                        ((ArrayCoerceExpr *) expr)->resultcollid = collation;
                        break;
                case T_ConvertRowtypeExpr:
-                       Assert(!OidIsValid(collation)); /* result is always composite */
+                       Assert(!OidIsValid(collation));         /* result is always composite */
                        break;
                case T_CaseExpr:
                        ((CaseExpr *) expr)->casecollid = collation;
@@ -964,10 +965,10 @@ exprSetCollation(Node *expr, Oid collation)
                        ((ArrayExpr *) expr)->array_collid = collation;
                        break;
                case T_RowExpr:
-                       Assert(!OidIsValid(collation)); /* result is always composite */
+                       Assert(!OidIsValid(collation));         /* result is always composite */
                        break;
                case T_RowCompareExpr:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_CoalesceExpr:
                        ((CoalesceExpr *) expr)->coalescecollid = collation;
@@ -981,10 +982,10 @@ exprSetCollation(Node *expr, Oid collation)
                                   (collation == InvalidOid));
                        break;
                case T_NullTest:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_BooleanTest:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                case T_CoerceToDomain:
                        ((CoerceToDomain *) expr)->resultcollid = collation;
@@ -996,7 +997,7 @@ exprSetCollation(Node *expr, Oid collation)
                        ((SetToDefault *) expr)->collation = collation;
                        break;
                case T_CurrentOfExpr:
-                       Assert(!OidIsValid(collation)); /* result is always boolean */
+                       Assert(!OidIsValid(collation));         /* result is always boolean */
                        break;
                default:
                        elog(ERROR, "unrecognized node type: %d", (int) nodeTag(expr));
index d6e6e6a2bda4152c75f947d8916398de2219e6be..62d766a2827c486d4ddda960f6ca285c986495a7 100644 (file)
@@ -43,7 +43,7 @@ copyParamList(ParamListInfo from)
 
        /* sizeof(ParamListInfoData) includes the first array element */
        size = sizeof(ParamListInfoData) +
-               (from->numParams - 1) *sizeof(ParamExternData);
+               (from->numParams - 1) * sizeof(ParamExternData);
 
        retval = (ParamListInfo) palloc(size);
        retval->paramFetch = NULL;
index dc2a23bb2737313b43e1e6d7b272119863693cbc..47ab08e502e63971cbcca7a44b780ec37784d2a7 100644 (file)
@@ -66,7 +66,7 @@ static void set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
 static void set_worktable_pathlist(PlannerInfo *root, RelOptInfo *rel,
                                           RangeTblEntry *rte);
 static void set_foreign_pathlist(PlannerInfo *root, RelOptInfo *rel,
-                                          RangeTblEntry *rte);
+                                        RangeTblEntry *rte);
 static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
 static bool subquery_is_pushdown_safe(Query *subquery, Query *topquery,
                                                  bool *differentTypes);
@@ -413,11 +413,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
 
                /*
                 * We have to make child entries in the EquivalenceClass data
-                * structures as well.  This is needed either if the parent
-                * participates in some eclass joins (because we will want to
-                * consider inner-indexscan joins on the individual children)
-                * or if the parent has useful pathkeys (because we should try
-                * to build MergeAppend paths that produce those sort orderings).
+                * structures as well.  This is needed either if the parent
+                * participates in some eclass joins (because we will want to consider
+                * inner-indexscan joins on the individual children) or if the parent
+                * has useful pathkeys (because we should try to build MergeAppend
+                * paths that produce those sort orderings).
                 */
                if (rel->has_eclass_joins || has_useful_pathkeys(root, rel))
                        add_child_rel_equivalences(root, appinfo, rel, childrel);
@@ -462,7 +462,7 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
                        /* Have we already seen this ordering? */
                        foreach(lpk, all_child_pathkeys)
                        {
-                               List   *existing_pathkeys = (List *) lfirst(lpk);
+                               List       *existing_pathkeys = (List *) lfirst(lpk);
 
                                if (compare_pathkeys(existing_pathkeys,
                                                                         childkeys) == PATHKEYS_EQUAL)
@@ -540,18 +540,18 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
 
        /*
         * Next, build MergeAppend paths based on the collected list of child
-        * pathkeys.  We consider both cheapest-startup and cheapest-total
-        * cases, ie, for each interesting ordering, collect all the cheapest
-        * startup subpaths and all the cheapest total paths, and build a
-        * MergeAppend path for each list.
+        * pathkeys.  We consider both cheapest-startup and cheapest-total cases,
+        * ie, for each interesting ordering, collect all the cheapest startup
+        * subpaths and all the cheapest total paths, and build a MergeAppend path
+        * for each list.
         */
        foreach(l, all_child_pathkeys)
        {
-               List   *pathkeys = (List *) lfirst(l);
-               List   *startup_subpaths = NIL;
-               List   *total_subpaths = NIL;
-               bool    startup_neq_total = false;
-               ListCell *lcr;
+               List       *pathkeys = (List *) lfirst(l);
+               List       *startup_subpaths = NIL;
+               List       *total_subpaths = NIL;
+               bool            startup_neq_total = false;
+               ListCell   *lcr;
 
                /* Select the child paths for this ordering... */
                foreach(lcr, live_childrels)
@@ -581,8 +581,8 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
 
                        /*
                         * Notice whether we actually have different paths for the
-                        * "cheapest" and "total" cases; frequently there will be no
-                        * point in two create_merge_append_path() calls.
+                        * "cheapest" and "total" cases; frequently there will be no point
+                        * in two create_merge_append_path() calls.
                         */
                        if (cheapest_startup != cheapest_total)
                                startup_neq_total = true;
@@ -623,7 +623,7 @@ accumulate_append_subpath(List *subpaths, Path *path)
 {
        if (IsA(path, AppendPath))
        {
-               AppendPath      *apath = (AppendPath *) path;
+               AppendPath *apath = (AppendPath *) path;
 
                /* list_copy is important here to avoid sharing list substructure */
                return list_concat(subpaths, list_copy(apath->subpaths));
index 8f763b436956fa2641aa04ee62b924f9bac706a7..e200dcf4728c3d9a9b57c53689abd1e5c9a6bba4 100644 (file)
@@ -1096,7 +1096,7 @@ cost_recursive_union(Plan *runion, Plan *nrterm, Plan *rterm)
  * accesses (XXX can't we refine that guess?)
  *
  * By default, we charge two operator evals per tuple comparison, which should
- * be in the right ballpark in most cases.  The caller can tweak this by
+ * be in the right ballpark in most cases.     The caller can tweak this by
  * specifying nonzero comparison_cost; typically that's used for any extra
  * work that has to be done to prepare the inputs to the comparison operators.
  *
@@ -1218,7 +1218,7 @@ cost_sort(Path *path, PlannerInfo *root,
  *       Determines and returns the cost of a MergeAppend node.
  *
  * MergeAppend merges several pre-sorted input streams, using a heap that
- * at any given instant holds the next tuple from each stream.  If there
+ * at any given instant holds the next tuple from each stream. If there
  * are N streams, we need about N*log2(N) tuple comparisons to construct
  * the heap at startup, and then for each output tuple, about log2(N)
  * comparisons to delete the top heap entry and another log2(N) comparisons
@@ -2909,7 +2909,7 @@ adjust_semi_join(PlannerInfo *root, JoinPath *path, SpecialJoinInfo *sjinfo,
                        List       *nrclauses;
 
                        nrclauses = select_nonredundant_join_clauses(root,
-                                                                                                                path->joinrestrictinfo,
+                                                                                                         path->joinrestrictinfo,
                                                                                                                 path->innerjoinpath);
                        *indexed_join_quals = (nrclauses == NIL);
                }
@@ -3185,7 +3185,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel,
 
        /*
         * Compute per-output-column width estimates by examining the subquery's
-        * targetlist.  For any output that is a plain Var, get the width estimate
+        * targetlist.  For any output that is a plain Var, get the width estimate
         * that was made while planning the subquery.  Otherwise, fall back on a
         * datatype-based estimate.
         */
@@ -3210,7 +3210,7 @@ set_subquery_size_estimates(PlannerInfo *root, RelOptInfo *rel,
                if (IsA(texpr, Var) &&
                        subroot->parse->setOperations == NULL)
                {
-                       Var        *var = (Var *) texpr;
+                       Var                *var = (Var *) texpr;
                        RelOptInfo *subrel = find_base_rel(subroot, var->varno);
 
                        item_width = subrel->attr_widths[var->varattno - subrel->min_attr];
@@ -3332,7 +3332,7 @@ set_cte_size_estimates(PlannerInfo *root, RelOptInfo *rel, Plan *cteplan)
  * of estimating baserestrictcost, so we set that, and we also set up width
  * using what will be purely datatype-driven estimates from the targetlist.
  * There is no way to do anything sane with the rows value, so we just put
- * a default estimate and hope that the wrapper can improve on it.  The
+ * a default estimate and hope that the wrapper can improve on it.     The
  * wrapper's PlanForeignScan function will be called momentarily.
  *
  * The rel's targetlist and restrictinfo list must have been constructed
@@ -3396,8 +3396,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                        ndx = var->varattno - rel->min_attr;
 
                        /*
-                        * If it's a whole-row Var, we'll deal with it below after we
-                        * have already cached as many attr widths as possible.
+                        * If it's a whole-row Var, we'll deal with it below after we have
+                        * already cached as many attr widths as possible.
                         */
                        if (var->varattno == 0)
                        {
@@ -3406,8 +3406,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                        }
 
                        /*
-                        * The width may have been cached already (especially if it's
-                        * subquery), so don't duplicate effort.
+                        * The width may have been cached already (especially if it's a
+                        * subquery), so don't duplicate effort.
                         */
                        if (rel->attr_widths[ndx] > 0)
                        {
@@ -3464,13 +3464,13 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
         */
        if (have_wholerow_var)
        {
-               int32   wholerow_width = sizeof(HeapTupleHeaderData);
+               int32           wholerow_width = sizeof(HeapTupleHeaderData);
 
                if (reloid != InvalidOid)
                {
                        /* Real relation, so estimate true tuple width */
                        wholerow_width += get_relation_data_width(reloid,
-                                                                                                         rel->attr_widths - rel->min_attr);
+                                                                                  rel->attr_widths - rel->min_attr);
                }
                else
                {
@@ -3484,8 +3484,8 @@ set_rel_width(PlannerInfo *root, RelOptInfo *rel)
                rel->attr_widths[0 - rel->min_attr] = wholerow_width;
 
                /*
-                * Include the whole-row Var as part of the output tuple.  Yes,
-                * that really is what happens at runtime.
+                * Include the whole-row Var as part of the output tuple.  Yes, that
+                * really is what happens at runtime.
                 */
                tuple_width += wholerow_width;
        }
index 9a32e16940b19bd38de19e9c1ab0f690b4a683c5..a365beecd8a348623968b2ad080954699af2c8a3 100644 (file)
@@ -385,7 +385,7 @@ process_equivalence(PlannerInfo *root, RestrictInfo *restrictinfo,
  * Also, the expression's exposed collation must match the EC's collation.
  * This is important because in comparisons like "foo < bar COLLATE baz",
  * only one of the expressions has the correct exposed collation as we receive
- * it from the parser.  Forcing both of them to have it ensures that all
+ * it from the parser. Forcing both of them to have it ensures that all
  * variant spellings of such a construct behave the same.  Again, we can
  * stick on a RelabelType to force the right exposed collation.  (It might
  * work to not label the collation at all in EC members, but this is risky
@@ -414,13 +414,13 @@ canonicalize_ec_expression(Expr *expr, Oid req_type, Oid req_collation)
                exprCollation((Node *) expr) != req_collation)
        {
                /*
-                * Strip any existing RelabelType, then add a new one if needed.
-                * This is to preserve the invariant of no redundant RelabelTypes.
+                * Strip any existing RelabelType, then add a new one if needed. This
+                * is to preserve the invariant of no redundant RelabelTypes.
                 *
                 * If we have to change the exposed type of the stripped expression,
                 * set typmod to -1 (since the new type may not have the same typmod
-                * interpretation).  If we only have to change collation, preserve
-                * the exposed typmod.
+                * interpretation).  If we only have to change collation, preserve the
+                * exposed typmod.
                 */
                while (expr && IsA(expr, RelabelType))
                        expr = (Expr *) ((RelabelType *) expr)->arg;
@@ -1784,8 +1784,8 @@ add_child_rel_equivalences(PlannerInfo *root,
                ListCell   *lc2;
 
                /*
-                * If this EC contains a constant, then it's not useful for sorting
-                * or driving an inner index-scan, so we skip generating child EMs.
+                * If this EC contains a constant, then it's not useful for sorting or
+                * driving an inner index-scan, so we skip generating child EMs.
                 *
                 * If this EC contains a volatile expression, then generating child
                 * EMs would be downright dangerous.  We rely on a volatile EC having
index 76f842631fa510dbc2c28d805c7974e6a8ffc3e5..ef65cf222442e4a920f1947f54aaff4838062548 100644 (file)
@@ -119,7 +119,7 @@ static bool match_special_index_operator(Expr *clause,
 static Expr *expand_boolean_index_clause(Node *clause, int indexcol,
                                                        IndexOptInfo *index);
 static List *expand_indexqual_opclause(RestrictInfo *rinfo,
-                                                                          Oid opfamily, Oid idxcollation);
+                                                 Oid opfamily, Oid idxcollation);
 static RestrictInfo *expand_indexqual_rowcompare(RestrictInfo *rinfo,
                                                        IndexOptInfo *index,
                                                        int indexcol);
@@ -1159,8 +1159,8 @@ group_clauses_by_indexkey(IndexOptInfo *index,
  *       (2)  must contain an operator which is in the same family as the index
  *                operator for this column, or is a "special" operator as recognized
  *                by match_special_index_operator();
- *         and
- *    (3)  must match the collation of the index, if collation is relevant.
+ *                and
+ *       (3)  must match the collation of the index, if collation is relevant.
  *
  *       Our definition of "const" is pretty liberal: we allow Vars belonging
  *       to the caller-specified outer_relids relations (which had better not
@@ -1312,7 +1312,7 @@ match_clause_to_indexcol(IndexOptInfo *index,
                 * is a "special" indexable operator.
                 */
                if (plain_op &&
-                       match_special_index_operator(clause, opfamily, idxcollation, true))
+                 match_special_index_operator(clause, opfamily, idxcollation, true))
                        return true;
                return false;
        }
@@ -1438,7 +1438,7 @@ match_rowcompare_to_indexcol(IndexOptInfo *index,
 
 
 /****************************************************************************
- *                             ----  ROUTINES TO CHECK ORDERING OPERATORS  ----
+ *                             ----  ROUTINES TO CHECK ORDERING OPERATORS      ----
  ****************************************************************************/
 
 /*
@@ -1461,7 +1461,7 @@ match_index_to_pathkeys(IndexOptInfo *index, List *pathkeys)
 
        foreach(lc1, pathkeys)
        {
-               PathKey    *pathkey = (PathKey *) lfirst(lc1);
+               PathKey    *pathkey = (PathKey *) lfirst(lc1);
                bool            found = false;
                ListCell   *lc2;
 
@@ -1483,7 +1483,7 @@ match_index_to_pathkeys(IndexOptInfo *index, List *pathkeys)
                foreach(lc2, pathkey->pk_eclass->ec_members)
                {
                        EquivalenceMember *member = (EquivalenceMember *) lfirst(lc2);
-                       int             indexcol;
+                       int                     indexcol;
 
                        /* No possibility of match if it references other relations */
                        if (!bms_equal(member->em_relids, index->rel->relids))
@@ -1491,7 +1491,7 @@ match_index_to_pathkeys(IndexOptInfo *index, List *pathkeys)
 
                        for (indexcol = 0; indexcol < index->ncolumns; indexcol++)
                        {
-                               Expr   *expr;
+                               Expr       *expr;
 
                                expr = match_clause_to_ordering_op(index,
                                                                                                   indexcol,
@@ -1535,7 +1535,7 @@ match_index_to_pathkeys(IndexOptInfo *index, List *pathkeys)
  * Note that we currently do not consider the collation of the ordering
  * operator's result.  In practical cases the result type will be numeric
  * and thus have no collation, and it's not very clear what to match to
- * if it did have a collation.  The index's collation should match the
+ * if it did have a collation. The index's collation should match the
  * ordering operator's input collation, not its result.
  *
  * If successful, return 'clause' as-is if the indexkey is on the left,
@@ -1598,8 +1598,8 @@ match_clause_to_ordering_op(IndexOptInfo *index,
                return NULL;
 
        /*
-        * Is the (commuted) operator an ordering operator for the opfamily?
-        * And if so, does it yield the right sorting semantics?
+        * Is the (commuted) operator an ordering operator for the opfamily? And
+        * if so, does it yield the right sorting semantics?
         */
        sortfamily = get_op_opfamily_sortfamily(expr_op, opfamily);
        if (sortfamily != pk_opfamily)
@@ -2198,9 +2198,9 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel,
                                        continue;
 
                                /*
-                                * XXX at some point we may need to check collations here
-                                * too.  For the moment we assume all collations reduce to
-                                * the same notion of equality.
+                                * XXX at some point we may need to check collations here too.
+                                * For the moment we assume all collations reduce to the same
+                                * notion of equality.
                                 */
 
                                /* OK, see if the condition operand matches the index key */
@@ -2544,10 +2544,10 @@ match_special_index_operator(Expr *clause, Oid opfamily, Oid idxcollation,
         *
         * The non-pattern opclasses will not sort the way we need in most non-C
         * locales.  We can use such an index anyway for an exact match (simple
-        * equality), but not for prefix-match cases.  Note that we are looking
-        * at the index's collation, not the expression's collation -- this test
-        * is not dependent on the LIKE/regex operator's collation (which would
-        * only affect case folding behavior of ILIKE, anyway).
+        * equality), but not for prefix-match cases.  Note that we are looking at
+        * the index's collation, not the expression's collation -- this test is
+        * not dependent on the LIKE/regex operator's collation (which would only
+        * affect case folding behavior of ILIKE, anyway).
         */
        switch (expr_op)
        {
@@ -2657,7 +2657,7 @@ expand_indexqual_conditions(IndexOptInfo *index, List *clausegroups)
                                resultquals = list_concat(resultquals,
                                                                                  expand_indexqual_opclause(rinfo,
                                                                                                                                        curFamily,
-                                                                                                                                       curCollation));
+                                                                                                                         curCollation));
                        }
                        else if (IsA(clause, ScalarArrayOpExpr))
                        {
@@ -3254,7 +3254,7 @@ network_prefix_quals(Node *leftop, Oid expr_op, Oid opfamily, Datum rightop)
        expr = make_opclause(opr1oid, BOOLOID, false,
                                                 (Expr *) leftop,
                                                 (Expr *) makeConst(datatype, -1,
-                                                                                       InvalidOid,     /* not collatable */
+                                                                                       InvalidOid, /* not collatable */
                                                                                        -1, opr1right,
                                                                                        false, false),
                                                 InvalidOid, InvalidOid);
@@ -3272,7 +3272,7 @@ network_prefix_quals(Node *leftop, Oid expr_op, Oid opfamily, Datum rightop)
        expr = make_opclause(opr2oid, BOOLOID, false,
                                                 (Expr *) leftop,
                                                 (Expr *) makeConst(datatype, -1,
-                                                                                       InvalidOid,     /* not collatable */
+                                                                                       InvalidOid, /* not collatable */
                                                                                        -1, opr2right,
                                                                                        false, false),
                                                 InvalidOid, InvalidOid);
index 740dc32dc78d8b4b233d5004c60fb4d4a269763a..7d3cf425da51bcbd45ec37cfe1eff7e00a6e4c4c 100644 (file)
@@ -97,11 +97,11 @@ add_paths_to_joinrel(PlannerInfo *root,
 
        /*
         * 1. Consider mergejoin paths where both relations must be explicitly
-        * sorted.  Skip this if we can't mergejoin.
+        * sorted.      Skip this if we can't mergejoin.
         */
        if (mergejoin_allowed)
                sort_inner_and_outer(root, joinrel, outerrel, innerrel,
-                                                        restrictlist, mergeclause_list, jointype, sjinfo);
+                                                  restrictlist, mergeclause_list, jointype, sjinfo);
 
        /*
         * 2. Consider paths where the outer relation need not be explicitly
@@ -112,7 +112,7 @@ add_paths_to_joinrel(PlannerInfo *root,
         */
        if (mergejoin_allowed)
                match_unsorted_outer(root, joinrel, outerrel, innerrel,
-                                                        restrictlist, mergeclause_list, jointype, sjinfo);
+                                                  restrictlist, mergeclause_list, jointype, sjinfo);
 
 #ifdef NOT_USED
 
@@ -129,7 +129,7 @@ add_paths_to_joinrel(PlannerInfo *root,
         */
        if (mergejoin_allowed)
                match_unsorted_inner(root, joinrel, outerrel, innerrel,
-                                                        restrictlist, mergeclause_list, jointype, sjinfo);
+                                                  restrictlist, mergeclause_list, jointype, sjinfo);
 #endif
 
        /*
index 42618649fb9614515a73b35766a6b568b26289ff..bbb79c582defe95c28091d2d70a1386d05e0a22a 100644 (file)
@@ -30,7 +30,7 @@ static bool has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel);
 static bool is_dummy_rel(RelOptInfo *rel);
 static void mark_dummy_rel(RelOptInfo *rel);
 static bool restriction_is_constant_false(List *restrictlist,
-                                                                                 bool only_pushed_down);
+                                                         bool only_pushed_down);
 
 
 /*
@@ -604,10 +604,10 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2)
         *
         * Also, a provably constant-false join restriction typically means that
         * we can skip evaluating one or both sides of the join.  We do this by
-        * marking the appropriate rel as dummy.  For outer joins, a constant-false
-        * restriction that is pushed down still means the whole join is dummy,
-        * while a non-pushed-down one means that no inner rows will join so we
-        * can treat the inner rel as dummy.
+        * marking the appropriate rel as dummy.  For outer joins, a
+        * constant-false restriction that is pushed down still means the whole
+        * join is dummy, while a non-pushed-down one means that no inner rows
+        * will join so we can treat the inner rel as dummy.
         *
         * We need only consider the jointypes that appear in join_info_list, plus
         * JOIN_INNER.
index 47597a5d35b949ca4c0b3eb582d4c0aac2da6db8..e5228a81c63ac8885cbd3c2a5a609de33e73cc09 100644 (file)
@@ -253,7 +253,7 @@ make_pathkey_from_sortinfo(PlannerInfo *root,
        /*
         * EquivalenceClasses need to contain opfamily lists based on the family
         * membership of mergejoinable equality operators, which could belong to
-        * more than one opfamily.  So we have to look up the opfamily's equality
+        * more than one opfamily.      So we have to look up the opfamily's equality
         * operator and get its membership.
         */
        equality_op = get_opfamily_member(opfamily,
@@ -558,9 +558,9 @@ build_index_pathkeys(PlannerInfo *root,
                                                                                          true);
 
                /*
-                * If the sort key isn't already present in any EquivalenceClass,
-                * then it's not an interesting sort order for this query.  So
-                * we can stop now --- lower-order sort keys aren't useful either.
+                * If the sort key isn't already present in any EquivalenceClass, then
+                * it's not an interesting sort order for this query.  So we can stop
+                * now --- lower-order sort keys aren't useful either.
                 */
                if (!cpathkey)
                        break;
@@ -747,8 +747,8 @@ convert_subquery_pathkeys(PlannerInfo *root, RelOptInfo *rel,
                                                continue;
 
                                        /*
-                                        * Build a representation of this targetlist entry as
-                                        * an outer Var.
+                                        * Build a representation of this targetlist entry as an
+                                        * outer Var.
                                         */
                                        outer_expr = (Expr *) makeVarFromTargetEntry(rel->relid,
                                                                                                                                 tle);
@@ -923,7 +923,7 @@ make_pathkeys_for_sortclauses(PlannerInfo *root,
  * right sides.
  *
  * Note this is called before EC merging is complete, so the links won't
- * necessarily point to canonical ECs.  Before they are actually used for
+ * necessarily point to canonical ECs. Before they are actually used for
  * anything, update_mergeclause_eclasses must be called to ensure that
  * they've been updated to point to canonical ECs.
  */
index 80d42f3be609b4338f9c08ee7b80f45b6054ff30..1784ac2fc5bd77f4ddafe4a17625ef8128a3280a 100644 (file)
@@ -31,7 +31,7 @@
 /* local functions */
 static bool join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo);
 static void remove_rel_from_query(PlannerInfo *root, int relid,
-                                                                 Relids joinrelids);
+                                         Relids joinrelids);
 static List *remove_rel_from_joinlist(List *joinlist, int relid, int *nremoved);
 
 
@@ -238,10 +238,10 @@ join_is_removable(PlannerInfo *root, SpecialJoinInfo *sjinfo)
                        !bms_equal(restrictinfo->required_relids, joinrelids))
                {
                        /*
-                        * If such a clause actually references the inner rel then
-                        * join removal has to be disallowed.  We have to check this
-                        * despite the previous attr_needed checks because of the
-                        * possibility of pushed-down clauses referencing the rel.
+                        * If such a clause actually references the inner rel then join
+                        * removal has to be disallowed.  We have to check this despite
+                        * the previous attr_needed checks because of the possibility of
+                        * pushed-down clauses referencing the rel.
                         */
                        if (bms_is_member(innerrelid, restrictinfo->clause_relids))
                                return false;
@@ -365,8 +365,8 @@ remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids)
         * Likewise remove references from SpecialJoinInfo data structures.
         *
         * This is relevant in case the outer join we're deleting is nested inside
-        * other outer joins: the upper joins' relid sets have to be adjusted.
-        * The RHS of the target outer join will be made empty here, but that's OK
+        * other outer joins: the upper joins' relid sets have to be adjusted. The
+        * RHS of the target outer join will be made empty here, but that's OK
         * since caller will delete that SpecialJoinInfo entirely.
         */
        foreach(l, root->join_info_list)
@@ -426,6 +426,7 @@ remove_rel_from_query(PlannerInfo *root, int relid, Relids joinrelids)
                {
                        /* Recheck that qual doesn't actually reference the target rel */
                        Assert(!bms_is_member(relid, rinfo->clause_relids));
+
                        /*
                         * The required_relids probably aren't shared with anything else,
                         * but let's copy them just to be sure.
index f1308812515a12416e6fc5b11abbe92e80d3e2e1..1a9540ce06802f913810ce0d81a1d5ffaaed9d72 100644 (file)
@@ -108,7 +108,7 @@ static TidScan *make_tidscan(List *qptlist, List *qpqual, Index scanrelid,
                         List *tidquals);
 static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
                                  Index scanrelid, Node *funcexpr, List *funccolnames,
-                                 List *funccoltypes, List *funccoltypmods, List *funccolcollations);
+                 List *funccoltypes, List *funccoltypmods, List *funccolcollations);
 static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
                                Index scanrelid, List *values_lists);
 static CteScan *make_ctescan(List *qptlist, List *qpqual,
@@ -143,24 +143,25 @@ static MergeJoin *make_mergejoin(List *tlist,
                           bool *mergenullsfirst,
                           Plan *lefttree, Plan *righttree,
                           JoinType jointype);
-static Sort *make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
-                 AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
+static Sort *
+make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
+AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
                  double limit_tuples);
 static Plan *prepare_sort_from_pathkeys(PlannerInfo *root,
-                                                                               Plan *lefttree, List *pathkeys,
-                                                                               bool adjust_tlist_in_place,
-                                                                               int *p_numsortkeys,
-                                                                               AttrNumber **p_sortColIdx,
-                                                                               Oid **p_sortOperators,
-                                                                               Oid **p_collations,
-                                                                               bool **p_nullsFirst);
+                                                  Plan *lefttree, List *pathkeys,
+                                                  bool adjust_tlist_in_place,
+                                                  int *p_numsortkeys,
+                                                  AttrNumber **p_sortColIdx,
+                                                  Oid **p_sortOperators,
+                                                  Oid **p_collations,
+                                                  bool **p_nullsFirst);
 static Material *make_material(Plan *lefttree);
 
 
 /*
  * create_plan
  *       Creates the access plan for a query by recursively processing the
- *       desired tree of pathnodes, starting at the node 'best_path'.  For
+ *       desired tree of pathnodes, starting at the node 'best_path'.  For
  *       every pathnode found, we create a corresponding plan node containing
  *       appropriate id, target list, and qualification information.
  *
@@ -737,7 +738,7 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path)
                /* Now, insert a Sort node if subplan isn't sufficiently ordered */
                if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
                        subplan = (Plan *) make_sort(root, subplan, numsortkeys,
-                                                                                sortColIdx, sortOperators, collations, nullsFirst,
+                                                  sortColIdx, sortOperators, collations, nullsFirst,
                                                                                 best_path->limit_tuples);
 
                subplans = lappend(subplans, subplan);
@@ -983,7 +984,7 @@ create_unique_plan(PlannerInfo *root, UniquePath *best_path)
                        sortcl->eqop = eqop;
                        sortcl->sortop = sortop;
                        sortcl->nulls_first = false;
-                       sortcl->hashable = false;               /* no need to make this accurate */
+                       sortcl->hashable = false;       /* no need to make this accurate */
                        sortList = lappend(sortList, sortcl);
                        groupColPos++;
                }
@@ -1153,8 +1154,8 @@ create_indexscan_plan(PlannerInfo *root,
        qpqual = extract_actual_clauses(qpqual, false);
 
        /*
-        * We have to replace any outer-relation variables with nestloop params
-        * in the indexqualorig, qpqual, and indexorderbyorig expressions.  A bit
+        * We have to replace any outer-relation variables with nestloop params in
+        * the indexqualorig, qpqual, and indexorderbyorig expressions.  A bit
         * annoying to have to do this separately from the processing in
         * fix_indexqual_references --- rethink this when generalizing the inner
         * indexscan support.  But note we can't really do this earlier because
@@ -1465,6 +1466,7 @@ create_bitmap_subplan(PlannerInfo *root, Path *bitmapqual,
                                *indexqual = lappend(*indexqual, pred);
                        }
                }
+
                /*
                 * Replace outer-relation variables with nestloop params, but only
                 * after doing the above comparisons to index predicates.
@@ -2330,10 +2332,10 @@ replace_nestloop_params_mutator(Node *node, PlannerInfo *root)
                return NULL;
        if (IsA(node, Var))
        {
-               Var        *var = (Var *) node;
-               Param  *param;
+               Var                *var = (Var *) node;
+               Param      *param;
                NestLoopParam *nlp;
-               ListCell *lc;
+               ListCell   *lc;
 
                /* Upper-level Vars should be long gone at this point */
                Assert(var->varlevelsup == 0);
@@ -2493,7 +2495,7 @@ fix_indexqual_references(PlannerInfo *root, IndexPath *index_path,
  *
  * This is a simplified version of fix_indexqual_references.  The input does
  * not have RestrictInfo nodes, and we assume that indxqual.c already
- * commuted the clauses to put the index keys on the left.  Also, we don't
+ * commuted the clauses to put the index keys on the left.     Also, we don't
  * bother to support any cases except simple OpExprs, since nothing else
  * is allowed for ordering operators.
  */
@@ -3082,8 +3084,8 @@ make_append(List *appendplans, List *tlist)
         * If you change this, see also create_append_path().  Also, the size
         * calculations should match set_append_rel_pathlist().  It'd be better
         * not to duplicate all this logic, but some callers of this function
-        * aren't working from an appendrel or AppendPath, so there's noplace
-        * to copy the data from.
+        * aren't working from an appendrel or AppendPath, so there's noplace to
+        * copy the data from.
         */
        plan->startup_cost = 0;
        plan->total_cost = 0;
@@ -3320,7 +3322,7 @@ make_mergejoin(List *tlist,
  */
 static Sort *
 make_sort(PlannerInfo *root, Plan *lefttree, int numCols,
-                 AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
+AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations, bool *nullsFirst,
                  double limit_tuples)
 {
        Sort       *node = makeNode(Sort);
@@ -3398,7 +3400,7 @@ add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll, bool nulls_first,
  * prepare_sort_from_pathkeys
  *       Prepare to sort according to given pathkeys
  *
- * This is used to set up for both Sort and MergeAppend nodes.  It calculates
+ * This is used to set up for both Sort and MergeAppend nodes. It calculates
  * the executor's representation of the sort key information, and adjusts the
  * plan targetlist if needed to add resjunk sort columns.
  *
@@ -3608,7 +3610,7 @@ prepare_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
                                                                          pathkey->pk_eclass->ec_collation,
                                                                          pathkey->pk_nulls_first,
                                                                          numsortkeys,
-                                                                         sortColIdx, sortOperators, collations, nullsFirst);
+                                                 sortColIdx, sortOperators, collations, nullsFirst);
        }
 
        Assert(numsortkeys > 0);
@@ -3653,7 +3655,7 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys,
 
        /* Now build the Sort node */
        return make_sort(root, lefttree, numsortkeys,
-                                        sortColIdx, sortOperators, collations, nullsFirst, limit_tuples);
+                       sortColIdx, sortOperators, collations, nullsFirst, limit_tuples);
 }
 
 /*
@@ -3699,7 +3701,7 @@ make_sort_from_sortclauses(PlannerInfo *root, List *sortcls, Plan *lefttree)
                                                                          exprCollation((Node *) tle->expr),
                                                                          sortcl->nulls_first,
                                                                          numsortkeys,
-                                                                         sortColIdx, sortOperators, collations, nullsFirst);
+                                                 sortColIdx, sortOperators, collations, nullsFirst);
        }
 
        Assert(numsortkeys > 0);
@@ -3761,7 +3763,7 @@ make_sort_from_groupcols(PlannerInfo *root,
                                                                          exprCollation((Node *) tle->expr),
                                                                          grpcl->nulls_first,
                                                                          numsortkeys,
-                                                                         sortColIdx, sortOperators, collations, nullsFirst);
+                                                 sortColIdx, sortOperators, collations, nullsFirst);
                grpno++;
        }
 
index 0e00df64335d0d25d0752aca840d686d9f3fc1d4..333ede218ea40749e27cacb62e3290bb9b6fb2ed 100644 (file)
@@ -188,10 +188,11 @@ add_vars_to_targetlist(PlannerInfo *root, List *vars, Relids where_needed)
 
                        phinfo->ph_needed = bms_add_members(phinfo->ph_needed,
                                                                                                where_needed);
+
                        /*
-                        * Update ph_may_need too.  This is currently only necessary
-                        * when being called from build_base_rel_tlists, but we may as
-                        * well do it always.
+                        * Update ph_may_need too.      This is currently only necessary when
+                        * being called from build_base_rel_tlists, but we may as well do
+                        * it always.
                         */
                        phinfo->ph_may_need = bms_add_members(phinfo->ph_may_need,
                                                                                                  where_needed);
@@ -704,8 +705,8 @@ make_outerjoininfo(PlannerInfo *root,
         * this join's nullable side, and it may get used above this join, then
         * ensure that min_righthand contains the full eval_at set of the PHV.
         * This ensures that the PHV actually can be evaluated within the RHS.
-        * Note that this works only because we should already have determined
-        * the final eval_at level for any PHV syntactically within this join.
+        * Note that this works only because we should already have determined the
+        * final eval_at level for any PHV syntactically within this join.
         */
        foreach(l, root->placeholder_list)
        {
@@ -1070,7 +1071,7 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
         *
         * In all cases, it's important to initialize the left_ec and right_ec
         * fields of a mergejoinable clause, so that all possibly mergejoinable
-        * expressions have representations in EquivalenceClasses.  If
+        * expressions have representations in EquivalenceClasses.      If
         * process_equivalence is successful, it will take care of that;
         * otherwise, we have to call initialize_mergeclause_eclasses to do it.
         */
index f2ddf2a8442215a6dc8b5b0bee133604ddaa1aad..7fce92c2f1561b762e77511dd83f8550032aca21 100644 (file)
@@ -10,9 +10,9 @@
  *              ORDER BY col ASC/DESC
  *              LIMIT 1)
  * Given a suitable index on tab.col, this can be much faster than the
- * generic scan-all-the-rows aggregation plan.  We can handle multiple
+ * generic scan-all-the-rows aggregation plan. We can handle multiple
  * MIN/MAX aggregates by generating multiple subqueries, and their
- * orderings can be different.  However, if the query contains any
+ * orderings can be different. However, if the query contains any
  * non-optimizable aggregates, there's no point since we'll have to
  * scan all the rows anyway.
  *
@@ -87,10 +87,10 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
         *
         * We don't handle GROUP BY or windowing, because our current
         * implementations of grouping require looking at all the rows anyway, and
-        * so there's not much point in optimizing MIN/MAX.  (Note: relaxing
-        * this would likely require some restructuring in grouping_planner(),
-        * since it performs assorted processing related to these features between
-        * calling preprocess_minmax_aggregates and optimize_minmax_aggregates.)
+        * so there's not much point in optimizing MIN/MAX.  (Note: relaxing this
+        * would likely require some restructuring in grouping_planner(), since it
+        * performs assorted processing related to these features between calling
+        * preprocess_minmax_aggregates and optimize_minmax_aggregates.)
         */
        if (parse->groupClause || parse->hasWindowFuncs)
                return;
@@ -119,7 +119,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
 
        /*
         * Scan the tlist and HAVING qual to find all the aggregates and verify
-        * all are MIN/MAX aggregates.  Stop as soon as we find one that isn't.
+        * all are MIN/MAX aggregates.  Stop as soon as we find one that isn't.
         */
        aggs_list = NIL;
        if (find_minmax_aggs_walker((Node *) tlist, &aggs_list))
@@ -146,7 +146,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
                 * ordering operator.
                 */
                eqop = get_equality_op_for_ordering_op(mminfo->aggsortop, &reverse);
-               if (!OidIsValid(eqop))          /* shouldn't happen */
+               if (!OidIsValid(eqop))  /* shouldn't happen */
                        elog(ERROR, "could not find equality operator for ordering operator %u",
                                 mminfo->aggsortop);
 
@@ -154,7 +154,7 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
                 * We can use either an ordering that gives NULLS FIRST or one that
                 * gives NULLS LAST; furthermore there's unlikely to be much
                 * performance difference between them, so it doesn't seem worth
-                * costing out both ways if we get a hit on the first one.  NULLS
+                * costing out both ways if we get a hit on the first one.      NULLS
                 * FIRST is more likely to be available if the operator is a
                 * reverse-sort operator, so try that first if reverse.
                 */
@@ -169,8 +169,8 @@ preprocess_minmax_aggregates(PlannerInfo *root, List *tlist)
 
        /*
         * We're done until path generation is complete.  Save info for later.
-        * (Setting root->minmax_aggs non-NIL signals we succeeded in making
-        * index access paths for all the aggregates.)
+        * (Setting root->minmax_aggs non-NIL signals we succeeded in making index
+        * access paths for all the aggregates.)
         */
        root->minmax_aggs = aggs_list;
 }
@@ -333,7 +333,7 @@ find_minmax_aggs_walker(Node *node, List **context)
                mminfo->aggfnoid = aggref->aggfnoid;
                mminfo->aggsortop = aggsortop;
                mminfo->target = curTarget->expr;
-               mminfo->subroot = NULL;                         /* don't compute path yet */
+               mminfo->subroot = NULL; /* don't compute path yet */
                mminfo->path = NULL;
                mminfo->pathcost = 0;
                mminfo->param = NULL;
@@ -424,7 +424,7 @@ build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo,
        sortcl->eqop = eqop;
        sortcl->sortop = sortop;
        sortcl->nulls_first = nulls_first;
-       sortcl->hashable = false;               /* no need to make this accurate */
+       sortcl->hashable = false;       /* no need to make this accurate */
        parse->sortClause = list_make1(sortcl);
 
        /* set up expressions for LIMIT 1 */
@@ -450,8 +450,8 @@ build_minmax_path(PlannerInfo *root, MinMaxAggInfo *mminfo,
        subroot->query_pathkeys = subroot->sort_pathkeys;
 
        /*
-        * Generate the best paths for this query, telling query_planner that
-        * we have LIMIT 1.
+        * Generate the best paths for this query, telling query_planner that we
+        * have LIMIT 1.
         */
        query_planner(subroot, parse->targetList, 1.0, 1.0,
                                  &cheapest_path, &sorted_path, &dNumGroups);
@@ -527,11 +527,11 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *mminfo)
                                                                   exprCollation((Node *) mminfo->target));
 
        /*
-        * Make sure the initplan gets into the outer PlannerInfo, along with
-        * any other initplans generated by the sub-planning run.  We had to
-        * include the outer PlannerInfo's pre-existing initplans into the
-        * inner one's init_plans list earlier, so make sure we don't put back
-        * any duplicate entries.
+        * Make sure the initplan gets into the outer PlannerInfo, along with any
+        * other initplans generated by the sub-planning run.  We had to include
+        * the outer PlannerInfo's pre-existing initplans into the inner one's
+        * init_plans list earlier, so make sure we don't put back any duplicate
+        * entries.
         */
        root->init_plans = list_concat_unique_ptr(root->init_plans,
                                                                                          subroot->init_plans);
index 3dc23662e7152fb12b1098980b8f432c4efdff8e..ff39d5754dc989a087102523ca77adca79a89e17 100644 (file)
@@ -179,12 +179,12 @@ query_planner(PlannerInfo *root, List *tlist,
        /*
         * Examine the targetlist and join tree, adding entries to baserel
         * targetlists for all referenced Vars, and generating PlaceHolderInfo
-        * entries for all referenced PlaceHolderVars.  Restrict and join clauses
-        * are added to appropriate lists belonging to the mentioned relations.
-        * We also build EquivalenceClasses for provably equivalent expressions.
-        * The SpecialJoinInfo list is also built to hold information about join
-        * order restrictions.  Finally, we form a target joinlist for
-        * make_one_rel() to work from.
+        * entries for all referenced PlaceHolderVars.  Restrict and join clauses
+        * are added to appropriate lists belonging to the mentioned relations. We
+        * also build EquivalenceClasses for provably equivalent expressions. The
+        * SpecialJoinInfo list is also built to hold information about join order
+        * restrictions.  Finally, we form a target joinlist for make_one_rel() to
+        * work from.
         */
        build_base_rel_tlists(root, tlist);
 
@@ -216,7 +216,7 @@ query_planner(PlannerInfo *root, List *tlist,
        /*
         * Examine any "placeholder" expressions generated during subquery pullup.
         * Make sure that the Vars they need are marked as needed at the relevant
-        * join level.  This must be done before join removal because it might
+        * join level.  This must be done before join removal because it might
         * cause Vars or placeholders to be needed above a join when they weren't
         * so marked before.
         */
index 56d25abc6d747465be54140852bcc6b0746fd04e..58a5bf8eceb464e01fcca3a2711e2d12f5528132 100644 (file)
@@ -345,16 +345,16 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
        inline_set_returning_functions(root);
 
        /*
-        * Check to see if any subqueries in the jointree can be merged into
-        * this query.
+        * Check to see if any subqueries in the jointree can be merged into this
+        * query.
         */
        parse->jointree = (FromExpr *)
                pull_up_subqueries(root, (Node *) parse->jointree, NULL, NULL);
 
        /*
-        * If this is a simple UNION ALL query, flatten it into an appendrel.
-        * We do this now because it requires applying pull_up_subqueries to the
-        * leaf queries of the UNION ALL, which weren't touched above because they
+        * If this is a simple UNION ALL query, flatten it into an appendrel. We
+        * do this now because it requires applying pull_up_subqueries to the leaf
+        * queries of the UNION ALL, which weren't touched above because they
         * weren't referenced by the jointree (they will be after we do this).
         */
        if (parse->setOperations)
@@ -575,7 +575,7 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
 
                        plan = (Plan *) make_modifytable(parse->commandType,
                                                                                         parse->canSetTag,
-                                                                                        list_make1_int(parse->resultRelation),
+                                                                          list_make1_int(parse->resultRelation),
                                                                                         list_make1(plan),
                                                                                         returningLists,
                                                                                         rowMarks,
@@ -3116,9 +3116,9 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
 
        /*
         * Determine eval cost of the index expressions, if any.  We need to
-        * charge twice that amount for each tuple comparison that happens
-        * during the sort, since tuplesort.c will have to re-evaluate the
-        * index expressions each time.  (XXX that's pretty inefficient...)
+        * charge twice that amount for each tuple comparison that happens during
+        * the sort, since tuplesort.c will have to re-evaluate the index
+        * expressions each time.  (XXX that's pretty inefficient...)
         */
        cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
        comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
index bd678ac7edea153d191ca546c478384149c0c530..a40f116bf9a48d709e8868218eddc722c25b2d24 100644 (file)
@@ -1429,7 +1429,7 @@ pullup_replace_vars_callback(Var *var,
  *
  * If a query's setOperations tree consists entirely of simple UNION ALL
  * operations, flatten it into an append relation, which we can process more
- * intelligently than the general setops case.  Otherwise, do nothing.
+ * intelligently than the general setops case. Otherwise, do nothing.
  *
  * In most cases, this can succeed only for a top-level query, because for a
  * subquery in FROM, the parent query's invocation of pull_up_subqueries would
@@ -1478,10 +1478,10 @@ flatten_simple_union_all(PlannerInfo *root)
 
        /*
         * Make a copy of the leftmost RTE and add it to the rtable.  This copy
-        * will represent the leftmost leaf query in its capacity as a member
-        * of the appendrel.  The original will represent the appendrel as a
-        * whole.  (We must do things this way because the upper query's Vars
-        * have to be seen as referring to the whole appendrel.)
+        * will represent the leftmost leaf query in its capacity as a member of
+        * the appendrel.  The original will represent the appendrel as a whole.
+        * (We must do things this way because the upper query's Vars have to be
+        * seen as referring to the whole appendrel.)
         */
        childRTE = copyObject(leftmostRTE);
        parse->rtable = lappend(parse->rtable, childRTE);
@@ -1503,8 +1503,8 @@ flatten_simple_union_all(PlannerInfo *root)
        parse->jointree->fromlist = list_make1(rtr);
 
        /*
-        * Now pretend the query has no setops.  We must do this before trying
-        * to do subquery pullup, because of Assert in pull_up_simple_subquery.
+        * Now pretend the query has no setops.  We must do this before trying to
+        * do subquery pullup, because of Assert in pull_up_simple_subquery.
         */
        parse->setOperations = NULL;
 
@@ -1842,9 +1842,9 @@ reduce_outer_joins_pass2(Node *jtnode,
                         * never both, to the children of an outer join.
                         *
                         * Note that a SEMI join works like an inner join here: it's okay
-                        * to pass down both local and upper constraints.  (There can't
-                        * be any upper constraints affecting its inner side, but it's
-                        * not worth having a separate code path to avoid passing them.)
+                        * to pass down both local and upper constraints.  (There can't be
+                        * any upper constraints affecting its inner side, but it's not
+                        * worth having a separate code path to avoid passing them.)
                         *
                         * At a FULL join we just punt and pass nothing down --- is it
                         * possible to be smarter?
@@ -1882,7 +1882,7 @@ reduce_outer_joins_pass2(Node *jtnode,
                                        pass_nonnullable_vars = local_nonnullable_vars;
                                        pass_forced_null_vars = local_forced_null_vars;
                                }
-                               else if (jointype != JOIN_FULL)         /* ie, LEFT or ANTI */
+                               else if (jointype != JOIN_FULL) /* ie, LEFT or ANTI */
                                {
                                        /* can't pass local constraints to non-nullable side */
                                        pass_nonnullable_rels = nonnullable_rels;
index 10e00d90ddac55f063718afd9d1bebf45f4cbe5e..f6f00c4ee91bb4d725b2925956b7205c075fc514 100644 (file)
@@ -54,12 +54,12 @@ static Expr *process_duplicate_ors(List *orlist);
  * Although this can be invoked on its own, it's mainly intended as a helper
  * for eval_const_expressions(), and that context drives several design
  * decisions.  In particular, if the input is already AND/OR flat, we must
- * preserve that property.  We also don't bother to recurse in situations
+ * preserve that property.     We also don't bother to recurse in situations
  * where we can assume that lower-level executions of eval_const_expressions
  * would already have simplified sub-clauses of the input.
  *
  * The difference between this and a simple make_notclause() is that this
- * tries to get rid of the NOT node by logical simplification.  It's clearly
+ * tries to get rid of the NOT node by logical simplification. It's clearly
  * always a win if the NOT node can be eliminated altogether.  However, our
  * use of DeMorgan's laws could result in having more NOT nodes rather than
  * fewer.  We do that unconditionally anyway, because in WHERE clauses it's
@@ -141,21 +141,21 @@ negate_clause(Node *node)
 
                                switch (expr->boolop)
                                {
-                                       /*--------------------
-                                        * Apply DeMorgan's Laws:
-                                        *              (NOT (AND A B)) => (OR (NOT A) (NOT B))
-                                        *              (NOT (OR A B))  => (AND (NOT A) (NOT B))
-                                        * i.e., swap AND for OR and negate each subclause.
-                                        *
-                                        * If the input is already AND/OR flat and has no NOT
-                                        * directly above AND or OR, this transformation preserves
-                                        * those properties.  For example, if no direct child of
-                                        * the given AND clause is an AND or a NOT-above-OR, then
-                                        * the recursive calls of negate_clause() can't return any
-                                        * OR clauses.  So we needn't call pull_ors() before
-                                        * building a new OR clause.  Similarly for the OR case.
-                                        *--------------------
-                                        */
+                                               /*--------------------
+                                                * Apply DeMorgan's Laws:
+                                                *              (NOT (AND A B)) => (OR (NOT A) (NOT B))
+                                                *              (NOT (OR A B))  => (AND (NOT A) (NOT B))
+                                                * i.e., swap AND for OR and negate each subclause.
+                                                *
+                                                * If the input is already AND/OR flat and has no NOT
+                                                * directly above AND or OR, this transformation preserves
+                                                * those properties.  For example, if no direct child of
+                                                * the given AND clause is an AND or a NOT-above-OR, then
+                                                * the recursive calls of negate_clause() can't return any
+                                                * OR clauses.  So we needn't call pull_ors() before
+                                                * building a new OR clause.  Similarly for the OR case.
+                                                *--------------------
+                                                */
                                        case AND_EXPR:
                                                {
                                                        List       *nargs = NIL;
@@ -183,6 +183,7 @@ negate_clause(Node *node)
                                                }
                                                break;
                                        case NOT_EXPR:
+
                                                /*
                                                 * NOT underneath NOT: they cancel.  We assume the
                                                 * input is already simplified, so no need to recurse.
@@ -218,8 +219,8 @@ negate_clause(Node *node)
                        break;
                case T_BooleanTest:
                        {
-                               BooleanTest   *expr = (BooleanTest *) node;
-                               BooleanTest   *newexpr = makeNode(BooleanTest);
+                               BooleanTest *expr = (BooleanTest *) node;
+                               BooleanTest *newexpr = makeNode(BooleanTest);
 
                                newexpr->arg = expr->arg;
                                switch (expr->booltesttype)
index 4ba8921528f405771ee544da1abceeb2fe236806..c97150c6f74bf75be52075299f777a50ae08ffef 100644 (file)
@@ -4,7 +4,7 @@
  *       Routines to preprocess the parse tree target list
  *
  * For INSERT and UPDATE queries, the targetlist must contain an entry for
- * each attribute of the target relation in the correct order.  For all query
+ * each attribute of the target relation in the correct order. For all query
  * types, we may need to add junk tlist entries for Vars used in the RETURNING
  * list and row ID information needed for EvalPlanQual checking.
  *
@@ -80,7 +80,7 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
        /*
         * Add necessary junk columns for rowmarked rels.  These values are needed
         * for locking of rels selected FOR UPDATE/SHARE, and to do EvalPlanQual
-        * rechecking.  See comments for PlanRowMark in plannodes.h.
+        * rechecking.  See comments for PlanRowMark in plannodes.h.
         */
        foreach(lc, root->rowMarks)
        {
index e15a8620426319f3a6a640add1c668f85c480055..0ed9535d94d05b3cfbd998e183e4d1bbee130d8b 100644 (file)
@@ -938,7 +938,7 @@ generate_setop_tlist(List *colTypes, int flag,
  * The Vars are always generated with varno 0.
  */
 static List *
-generate_append_tlist(List *colTypes, List*colCollations, bool flag,
+generate_append_tlist(List *colTypes, List *colCollations, bool flag,
                                          List *input_plans,
                                          List *refnames_tlist)
 {
index b1069259f9b9e3e57d8e6f288f4c409f467ccbbc..b3c2aec97b00f64e607536b94363f778a055bd23 100644 (file)
@@ -2042,7 +2042,7 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
  *
  * Whenever a function is eliminated from the expression by means of
  * constant-expression evaluation or inlining, we add the function to
- * root->glob->invalItems.  This ensures the plan is known to depend on
+ * root->glob->invalItems.     This ensures the plan is known to depend on
  * such functions, even though they aren't referenced anymore.
  *
  * We assume that the tree has already been type-checked and contains
@@ -2437,8 +2437,8 @@ eval_const_expressions_mutator(Node *node,
                                                                                                                 context);
 
                                        /*
-                                        * Use negate_clause() to see if we can simplify away
-                                        * the NOT.
+                                        * Use negate_clause() to see if we can simplify away the
+                                        * NOT.
                                         */
                                        return negate_clause(arg);
                                }
@@ -2548,9 +2548,9 @@ eval_const_expressions_mutator(Node *node,
                                                          makeConst(OIDOID, -1, InvalidOid, sizeof(Oid),
                                                                                ObjectIdGetDatum(intypioparam),
                                                                                false, true),
-                                                         makeConst(INT4OID, -1, InvalidOid, sizeof(int32),
-                                                                               Int32GetDatum(-1),
-                                                                               false, true));
+                                                       makeConst(INT4OID, -1, InvalidOid, sizeof(int32),
+                                                                         Int32GetDatum(-1),
+                                                                         false, true));
 
                        simple = simplify_function(infunc,
                                                                           expr->resulttype, -1,
@@ -2618,9 +2618,9 @@ eval_const_expressions_mutator(Node *node,
                /*
                 * If we can simplify the input to a constant, then we don't need the
                 * CollateExpr node at all: just change the constcollid field of the
-                * Const node.  Otherwise, replace the CollateExpr with a RelabelType.
-                * (We do that so as to improve uniformity of expression representation
-                * and thus simplify comparison of expressions.)
+                * Const node.  Otherwise, replace the CollateExpr with a RelabelType.
+                * (We do that so as to improve uniformity of expression
+                * representation and thus simplify comparison of expressions.)
                 */
                CollateExpr *collate = (CollateExpr *) node;
                Node       *arg;
@@ -2675,7 +2675,7 @@ eval_const_expressions_mutator(Node *node,
                 * placeholder nodes, so that we have the opportunity to reduce
                 * constant test conditions.  For example this allows
                 *              CASE 0 WHEN 0 THEN 1 ELSE 1/0 END
-                * to reduce to 1 rather than drawing a divide-by-0 error.  Note
+                * to reduce to 1 rather than drawing a divide-by-0 error.      Note
                 * that when the test expression is constant, we don't have to
                 * include it in the resulting CASE; for example
                 *              CASE 0 WHEN x THEN y ELSE z END
@@ -2855,9 +2855,9 @@ eval_const_expressions_mutator(Node *node,
                        /*
                         * We can remove null constants from the list. For a non-null
                         * constant, if it has not been preceded by any other
-                        * non-null-constant expressions then it is the result.  Otherwise,
-                        * it's the next argument, but we can drop following arguments
-                        * since they will never be reached.
+                        * non-null-constant expressions then it is the result.
+                        * Otherwise, it's the next argument, but we can drop following
+                        * arguments since they will never be reached.
                         */
                        if (IsA(e, Const))
                        {
@@ -3353,12 +3353,12 @@ simplify_boolean_equality(Oid opno, List *args)
                        if (DatumGetBool(((Const *) leftop)->constvalue))
                                return rightop; /* true = foo */
                        else
-                               return negate_clause(rightop); /* false = foo */
+                               return negate_clause(rightop);  /* false = foo */
                }
                else
                {
                        if (DatumGetBool(((Const *) leftop)->constvalue))
-                               return negate_clause(rightop); /* true <> foo */
+                               return negate_clause(rightop);  /* true <> foo */
                        else
                                return rightop; /* false <> foo */
                }
@@ -3902,7 +3902,7 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
        fexpr->funcresulttype = result_type;
        fexpr->funcretset = false;
        fexpr->funcformat = COERCE_DONTCARE;            /* doesn't matter */
-       fexpr->funccollid = result_collid;                      /* doesn't matter */
+       fexpr->funccollid = result_collid;      /* doesn't matter */
        fexpr->inputcollid = input_collid;
        fexpr->args = args;
        fexpr->location = -1;
@@ -4060,18 +4060,18 @@ inline_function(Oid funcid, Oid result_type, Oid result_collid,
        MemoryContextDelete(mycxt);
 
        /*
-        * If the result is of a collatable type, force the result to expose
-        * the correct collation.  In most cases this does not matter, but
-        * it's possible that the function result is used directly as a sort key
-        * or in other places where we expect exprCollation() to tell the truth.
+        * If the result is of a collatable type, force the result to expose the
+        * correct collation.  In most cases this does not matter, but it's
+        * possible that the function result is used directly as a sort key or in
+        * other places where we expect exprCollation() to tell the truth.
         */
        if (OidIsValid(result_collid))
        {
-               Oid             exprcoll = exprCollation(newexpr);
+               Oid                     exprcoll = exprCollation(newexpr);
 
                if (OidIsValid(exprcoll) && exprcoll != result_collid)
                {
-                       CollateExpr   *newnode = makeNode(CollateExpr);
+                       CollateExpr *newnode = makeNode(CollateExpr);
 
                        newnode->arg = (Expr *) newexpr;
                        newnode->collOid = result_collid;
@@ -4370,11 +4370,11 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
        oldcxt = MemoryContextSwitchTo(mycxt);
 
        /*
-        * When we call eval_const_expressions below, it might try to add items
-        * to root->glob->invalItems.  Since it is running in the temp context,
-        * those items will be in that context, and will need to be copied out
-        * if we're successful.  Temporarily reset the list so that we can keep
-        * those items separate from the pre-existing list contents.
+        * When we call eval_const_expressions below, it might try to add items to
+        * root->glob->invalItems.      Since it is running in the temp context, those
+        * items will be in that context, and will need to be copied out if we're
+        * successful.  Temporarily reset the list so that we can keep those items
+        * separate from the pre-existing list contents.
         */
        saveInvalItems = root->glob->invalItems;
        root->glob->invalItems = NIL;
@@ -4419,8 +4419,8 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
                goto fail;
 
        /*
-        * Set up to handle parameters while parsing the function body.  We
-        * can use the FuncExpr just created as the input for
+        * Set up to handle parameters while parsing the function body.  We can
+        * use the FuncExpr just created as the input for
         * prepare_sql_fn_parse_info.
         */
        pinfo = prepare_sql_fn_parse_info(func_tuple,
@@ -4438,7 +4438,7 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
 
        querytree_list = pg_analyze_and_rewrite_params(linitial(raw_parsetree_list),
                                                                                                   src,
-                                                                                                  (ParserSetupHook) sql_fn_parser_setup,
+                                                                          (ParserSetupHook) sql_fn_parser_setup,
                                                                                                   pinfo);
        if (list_length(querytree_list) != 1)
                goto fail;
@@ -4513,8 +4513,8 @@ inline_set_returning_function(PlannerInfo *root, RangeTblEntry *rte)
        ReleaseSysCache(func_tuple);
 
        /*
-        * We don't have to fix collations here because the upper query is
-        * already parsed, ie, the collations in the RTE are what count.
+        * We don't have to fix collations here because the upper query is already
+        * parsed, ie, the collations in the RTE are what count.
         */
 
        /*
index b60c88d9251222a9e0a0944dd3ac23f6b3f7c061..55218b58694bb43e7af654016ac53ac9d58d0a93 100644 (file)
@@ -745,7 +745,7 @@ create_merge_append_path(PlannerInfo *root,
                else
                {
                        /* We'll need to insert a Sort node, so include cost for that */
-                       Path    sort_path;              /* dummy for result of cost_sort */
+                       Path            sort_path;              /* dummy for result of cost_sort */
 
                        cost_sort(&sort_path,
                                          root,
@@ -1432,11 +1432,11 @@ create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel)
        ForeignPath *pathnode = makeNode(ForeignPath);
        RangeTblEntry *rte;
        FdwRoutine *fdwroutine;
-       FdwPlan    *fdwplan;
+       FdwPlan    *fdwplan;
 
        pathnode->path.pathtype = T_ForeignScan;
        pathnode->path.parent = rel;
-       pathnode->path.pathkeys = NIL;  /* result is always unordered */
+       pathnode->path.pathkeys = NIL;          /* result is always unordered */
 
        /* Get FDW's callback info */
        rte = planner_rt_fetch(rel->relid, root);
index 61edd4991c6b751cf2cfd52e8b7a54d2a5db2f91..9796fbf9b603f93fb462c6cf0ee208af8d04ee96 100644 (file)
@@ -25,7 +25,7 @@
 /* Local functions */
 static Relids find_placeholders_recurse(PlannerInfo *root, Node *jtnode);
 static void find_placeholders_in_qual(PlannerInfo *root, Node *qual,
-                                                                         Relids relids);
+                                                 Relids relids);
 
 
 /*
@@ -179,7 +179,7 @@ find_placeholders_recurse(PlannerInfo *root, Node *jtnode)
        {
                elog(ERROR, "unrecognized node type: %d",
                         (int) nodeTag(jtnode));
-               jtrelids = NULL;                        /* keep compiler quiet */
+               jtrelids = NULL;                /* keep compiler quiet */
        }
        return jtrelids;
 }
index 72fd3e4ca712c8e7c2ca0422f844263a5993bbca..a7e83729b1b1bd161fab50b00847cd883ca33676 100644 (file)
@@ -1696,7 +1696,7 @@ get_btree_test_op(Oid pred_op, Oid clause_op, bool refute_it)
                else if (OidIsValid(clause_op_negator))
                {
                        clause_tuple = SearchSysCache3(AMOPOPID,
-                                                                                  ObjectIdGetDatum(clause_op_negator),
+                                                                                ObjectIdGetDatum(clause_op_negator),
                                                                                   CharGetDatum(AMOP_SEARCH),
                                                                                   ObjectIdGetDatum(opfamily_id));
                        if (HeapTupleIsValid(clause_tuple))
index 944db238003e13d5732ca5e1f0f5d59ac0a0ff14..edf507c40565050b64fef6f166556efd01326d48 100644 (file)
@@ -694,7 +694,7 @@ pull_var_clause_walker(Node *node, pull_var_clause_context *context)
  * entries might now be arbitrary expressions, not just Vars.  This affects
  * this function in one important way: we might find ourselves inserting
  * SubLink expressions into subqueries, and we must make sure that their
- * Query.hasSubLinks fields get set to TRUE if so.  If there are any
+ * Query.hasSubLinks fields get set to TRUE if so.     If there are any
  * SubLinks in the join alias lists, the outer Query should already have
  * hasSubLinks = TRUE, so this is only relevant to un-flattened subqueries.
  *
index 315f067b17a5cb2e56960425a7fed757eaf9559b..e4e83a67165733e95863f4fa02ecf4702de50c52 100644 (file)
@@ -759,8 +759,8 @@ transformInsertRow(ParseState *pstate, List *exprlist,
                 * columns.  Add a suitable hint if that seems to be the problem,
                 * because the main error message is quite misleading for this case.
                 * (If there's no stmtcols, you'll get something about data type
-                * mismatch, which is less misleading so we don't worry about giving
-                * hint in that case.)
+                * mismatch, which is less misleading so we don't worry about giving a
+                * hint in that case.)
                 */
                ereport(ERROR,
                                (errcode(ERRCODE_SYNTAX_ERROR),
@@ -809,7 +809,7 @@ transformInsertRow(ParseState *pstate, List *exprlist,
  *       return -1 if expression isn't a RowExpr or a Var referencing one.
  *
  * This is currently used only for hint purposes, so we aren't terribly
- * tense about recognizing all possible cases.  The Var case is interesting
+ * tense about recognizing all possible cases. The Var case is interesting
  * because that's what we'll get in the INSERT ... SELECT (...) case.
  */
 static int
@@ -1100,8 +1100,8 @@ transformValuesClause(ParseState *pstate, SelectStmt *stmt)
                /*
                 * We must assign collations now because assign_query_collations
                 * doesn't process rangetable entries.  We just assign all the
-                * collations independently in each row, and don't worry about
-                * whether they are consistent vertically either.
+                * collations independently in each row, and don't worry about whether
+                * they are consistent vertically either.
                 */
                assign_list_collations(pstate, newsublist);
 
@@ -1452,7 +1452,7 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
  *             Recursively transform leaves and internal nodes of a set-op tree
  *
  * In addition to returning the transformed node, if targetlist isn't NULL
- * then we return a list of its non-resjunk TargetEntry nodes.  For a leaf
+ * then we return a list of its non-resjunk TargetEntry nodes. For a leaf
  * set-op node these are the actual targetlist entries; otherwise they are
  * dummy entries created to carry the type, typmod, collation, and location
  * (for error messages) of each output column of the set-op node.  This info
@@ -1672,7 +1672,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
                         * child query's semantics.
                         *
                         * If a child expression is an UNKNOWN-type Const or Param, we
-                        * want to replace it with the coerced expression.  This can only
+                        * want to replace it with the coerced expression.      This can only
                         * happen when the child is a leaf set-op node.  It's safe to
                         * replace the expression because if the child query's semantics
                         * depended on the type of this output column, it'd have already
@@ -1721,8 +1721,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
                         * collation.)
                         */
                        rescolcoll = select_common_collation(pstate,
-                                                                                                list_make2(lcolnode, rcolnode),
-                                                                                                (op->op == SETOP_UNION && op->all));
+                                                                                         list_make2(lcolnode, rcolnode),
+                                                                                (op->op == SETOP_UNION && op->all));
 
                        /* emit results */
                        op->colTypes = lappend_oid(op->colTypes, rescoltype);
@@ -1778,7 +1778,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
                                rescolnode->collation = rescolcoll;
                                rescolnode->location = bestlocation;
                                restle = makeTargetEntry((Expr *) rescolnode,
-                                                                                0,                     /* no need to set resno */
+                                                                                0,             /* no need to set resno */
                                                                                 NULL,
                                                                                 false);
                                *targetlist = lappend(*targetlist, restle);
@@ -2331,10 +2331,10 @@ transformLockingClause(ParseState *pstate, Query *qry, LockingClause *lc,
                                                case RTE_RELATION:
                                                        if (rte->relkind == RELKIND_FOREIGN_TABLE)
                                                                ereport(ERROR,
-                                                                               (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                                                                errmsg("SELECT FOR UPDATE/SHARE cannot be used with foreign table \"%s\"",
-                                                                                               rte->eref->aliasname),
-                                                                                parser_errposition(pstate, thisrel->location)));
+                                                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+                                                                         errmsg("SELECT FOR UPDATE/SHARE cannot be used with foreign table \"%s\"",
+                                                                                        rte->eref->aliasname),
+                                                                         parser_errposition(pstate, thisrel->location)));
                                                        applyLockingClause(qry, i,
                                                                                           lc->forUpdate, lc->noWait,
                                                                                           pushedDown);
index 523d6e6989a2cea32ebc993665e692baf44d387a..8356133796624810f7040f94d3ab16322a336cb4 100644 (file)
@@ -631,7 +631,7 @@ check_ungrouped_columns_walker(Node *node,
 
                /*
                 * Check whether the Var is known functionally dependent on the GROUP
-                * BY columns.  If so, we can allow the Var to be used, because the
+                * BY columns.  If so, we can allow the Var to be used, because the
                 * grouping is really a no-op for this table.  However, this deduction
                 * depends on one or more constraints of the table, so we have to add
                 * those constraints to the query's constraintDeps list, because it's
@@ -642,11 +642,11 @@ check_ungrouped_columns_walker(Node *node,
                 * Because this is a pretty expensive check, and will have the same
                 * outcome for all columns of a table, we remember which RTEs we've
                 * already proven functional dependency for in the func_grouped_rels
-                * list.  This test also prevents us from adding duplicate entries
-                * to the constraintDeps list.
+                * list.  This test also prevents us from adding duplicate entries to
+                * the constraintDeps list.
                 */
                if (list_member_int(*context->func_grouped_rels, var->varno))
-                       return false;                           /* previously proven acceptable */
+                       return false;           /* previously proven acceptable */
 
                Assert(var->varno > 0 &&
                           (int) var->varno <= list_length(context->pstate->p_rtable));
@@ -661,7 +661,7 @@ check_ungrouped_columns_walker(Node *node,
                        {
                                *context->func_grouped_rels =
                                        lappend_int(*context->func_grouped_rels, var->varno);
-                               return false;                   /* acceptable */
+                               return false;   /* acceptable */
                        }
                }
 
index 6c0a78474cd1c89d2914b135eadf2b5a5b1cbc21..c5fe6b6a3fdb68f54aee8f2ea6f0eefe346cd33c 100644 (file)
@@ -1078,7 +1078,7 @@ buildMergedJoinVar(ParseState *pstate, JoinType jointype,
        else if (l_colvar->vartypmod != outcoltypmod)
                l_node = (Node *) makeRelabelType((Expr *) l_colvar,
                                                                                  outcoltype, outcoltypmod,
-                                                                                 InvalidOid,           /* fixed below */
+                                                                                 InvalidOid,   /* fixed below */
                                                                                  COERCE_IMPLICIT_CAST);
        else
                l_node = (Node *) l_colvar;
@@ -1090,7 +1090,7 @@ buildMergedJoinVar(ParseState *pstate, JoinType jointype,
        else if (r_colvar->vartypmod != outcoltypmod)
                r_node = (Node *) makeRelabelType((Expr *) r_colvar,
                                                                                  outcoltype, outcoltypmod,
-                                                                                 InvalidOid,           /* fixed below */
+                                                                                 InvalidOid,   /* fixed below */
                                                                                  COERCE_IMPLICIT_CAST);
        else
                r_node = (Node *) r_colvar;
@@ -1143,8 +1143,8 @@ buildMergedJoinVar(ParseState *pstate, JoinType jointype,
 
        /*
         * Apply assign_expr_collations to fix up the collation info in the
-        * coercion and CoalesceExpr nodes, if we made any.  This must be done
-        * now so that the join node's alias vars show correct collation info.
+        * coercion and CoalesceExpr nodes, if we made any.  This must be done now
+        * so that the join node's alias vars show correct collation info.
         */
        assign_expr_collations(pstate, res_node);
 
index 895c3ad98565d0c9e160b2e74863e4fa1d26a3e3..0418972517eee4df52dbdc8f7807aa8fa528a674 100644 (file)
@@ -285,7 +285,7 @@ coerce_type(ParseState *pstate, Node *node,
        {
                /*
                 * If we have a COLLATE clause, we have to push the coercion
-                * underneath the COLLATE.  This is really ugly, but there is little
+                * underneath the COLLATE.      This is really ugly, but there is little
                 * choice because the above hacks on Consts and Params wouldn't happen
                 * otherwise.
                 */
@@ -1959,7 +1959,7 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
                        Oid                     sourceElem;
 
                        if ((targetElem = get_element_type(targetTypeId)) != InvalidOid &&
-                               (sourceElem = get_base_element_type(sourceTypeId)) != InvalidOid)
+                       (sourceElem = get_base_element_type(sourceTypeId)) != InvalidOid)
                        {
                                CoercionPathType elempathtype;
                                Oid                     elemfuncid;
@@ -2091,8 +2091,8 @@ is_complex_array(Oid typid)
 static bool
 typeIsOfTypedTable(Oid reltypeId, Oid reloftypeId)
 {
-       Oid relid = typeidTypeRelid(reltypeId);
-       bool result = false;
+       Oid                     relid = typeidTypeRelid(reltypeId);
+       bool            result = false;
 
        if (relid)
        {
index 3e557db266806dce6e76d6189ecc1ef4a3dda852..f0cd3f88d232dc0d67fe6b82cabbe26ad72ea50b 100644 (file)
  * 1. The output collation of each expression node, or InvalidOid if it
  * returns a noncollatable data type.  This can also be InvalidOid if the
  * result type is collatable but the collation is indeterminate.
- * 2. The collation to be used in executing each function.  InvalidOid means
+ * 2. The collation to be used in executing each function.     InvalidOid means
  * that there are no collatable inputs or their collation is indeterminate.
  * This value is only stored in node types that might call collation-using
  * functions.
  *
  * You might think we could get away with storing only one collation per
- * node, but the two concepts really need to be kept distinct.  Otherwise
+ * node, but the two concepts really need to be kept distinct. Otherwise
  * it's too confusing when a function produces a collatable output type but
  * has no collatable inputs or produces noncollatable output from collatable
  * inputs.
  *
  * Cases with indeterminate collation might result in an error being thrown
- * at runtime.  If we knew exactly which functions require collation
+ * at runtime. If we knew exactly which functions require collation
  * information, we could throw those errors at parse time instead.
  *
  * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
@@ -72,7 +72,7 @@ typedef struct
 
 static bool assign_query_collations_walker(Node *node, ParseState *pstate);
 static bool assign_collations_walker(Node *node,
-                                                                        assign_collations_context *context);
+                                                assign_collations_context *context);
 
 
 /*
@@ -116,8 +116,8 @@ assign_query_collations_walker(Node *node, ParseState *pstate)
                return false;
 
        /*
-        * We don't want to recurse into a set-operations tree; it's already
-        * been fully processed in transformSetOperationStmt.
+        * We don't want to recurse into a set-operations tree; it's already been
+        * fully processed in transformSetOperationStmt.
         */
        if (IsA(node, SetOperationStmt))
                return false;
@@ -144,7 +144,7 @@ assign_list_collations(ParseState *pstate, List *exprs)
 
        foreach(lc, exprs)
        {
-               Node   *node = (Node *) lfirst(lc);
+               Node       *node = (Node *) lfirst(lc);
 
                assign_expr_collations(pstate, node);
        }
@@ -231,7 +231,7 @@ select_common_collation(ParseState *pstate, List *exprs, bool none_ok)
  *             Recursive guts of collation processing.
  *
  * Nodes with no children (eg, Vars, Consts, Params) must have been marked
- * when built.  All upper-level nodes are marked here.
+ * when built. All upper-level nodes are marked here.
  *
  * Note: if this is invoked directly on a List, it will attempt to infer a
  * common collation for all the list members.  In particular, it will throw
@@ -250,9 +250,9 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                return false;
 
        /*
-        * Prepare for recursion.  For most node types, though not all, the
-        * first thing we do is recurse to process all nodes below this one.
-        * Each level of the tree has its own local context.
+        * Prepare for recursion.  For most node types, though not all, the first
+        * thing we do is recurse to process all nodes below this one. Each level
+        * of the tree has its own local context.
         */
        loccontext.pstate = context->pstate;
        loccontext.collation = InvalidOid;
@@ -323,11 +323,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                        {
                                /*
                                 * CaseExpr is a special case because we do not want to
-                                * recurse into the test expression (if any).  It was
-                                * already marked with collations during transformCaseExpr,
-                                * and furthermore its collation is not relevant to the
-                                * result of the CASE --- only the output expressions are.
-                                * So we can't use expression_tree_walker here.
+                                * recurse into the test expression (if any).  It was already
+                                * marked with collations during transformCaseExpr, and
+                                * furthermore its collation is not relevant to the result of
+                                * the CASE --- only the output expressions are. So we can't
+                                * use expression_tree_walker here.
                                 */
                                CaseExpr   *expr = (CaseExpr *) node;
                                Oid                     typcollation;
@@ -338,6 +338,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                        CaseWhen   *when = (CaseWhen *) lfirst(lc);
 
                                        Assert(IsA(when, CaseWhen));
+
                                        /*
                                         * The condition expressions mustn't affect the CASE's
                                         * result collation either; but since they are known to
@@ -401,11 +402,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                case T_RowExpr:
                        {
                                /*
-                                * RowExpr is a special case because the subexpressions
-                                * are independent: we don't want to complain if some of
-                                * them have incompatible explicit collations.
+                                * RowExpr is a special case because the subexpressions are
+                                * independent: we don't want to complain if some of them have
+                                * incompatible explicit collations.
                                 */
-                               RowExpr *expr = (RowExpr *) node;
+                               RowExpr    *expr = (RowExpr *) node;
 
                                assign_list_collations(context->pstate, expr->args);
 
@@ -414,7 +415,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                 * has a collation, we can just stop here: this node has no
                                 * impact on the collation of its parent.
                                 */
-                               return false;                   /* done */
+                               return false;   /* done */
                        }
                case T_RowCompareExpr:
                        {
@@ -431,9 +432,9 @@ assign_collations_walker(Node *node, assign_collations_context *context)
 
                                forboth(l, expr->largs, r, expr->rargs)
                                {
-                                       Node  *le = (Node *) lfirst(l);
-                                       Node  *re = (Node *) lfirst(r);
-                                       Oid             coll;
+                                       Node       *le = (Node *) lfirst(l);
+                                       Node       *re = (Node *) lfirst(r);
+                                       Oid                     coll;
 
                                        coll = select_common_collation(context->pstate,
                                                                                                   list_make2(le, re),
@@ -443,11 +444,11 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                expr->inputcollids = colls;
 
                                /*
-                                * Since the result is always boolean and therefore never
-                                * has a collation, we can just stop here: this node has no
-                                * impact on the collation of its parent.
+                                * Since the result is always boolean and therefore never has
+                                * a collation, we can just stop here: this node has no impact
+                                * on the collation of its parent.
                                 */
-                               return false;                   /* done */
+                               return false;   /* done */
                        }
                case T_CoerceToDomain:
                        {
@@ -455,12 +456,12 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                 * If the domain declaration included a non-default COLLATE
                                 * spec, then use that collation as the output collation of
                                 * the coercion.  Otherwise allow the input collation to
-                                * bubble up.  (The input should be of the domain's base
-                                * type, therefore we don't need to worry about it not being
+                                * bubble up.  (The input should be of the domain's base type,
+                                * therefore we don't need to worry about it not being
                                 * collatable when the domain is.)
                                 */
                                CoerceToDomain *expr = (CoerceToDomain *) node;
-                               Oid             typcollation = get_typcollation(expr->resulttype);
+                               Oid                     typcollation = get_typcollation(expr->resulttype);
 
                                /* ... but first, recurse */
                                (void) expression_tree_walker(node,
@@ -510,7 +511,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
 
                        /*
                         * TargetEntry can have only one child, and should bubble that
-                        * state up to its parent.  We can't use the general-case code
+                        * state up to its parent.      We can't use the general-case code
                         * below because exprType and friends don't work on TargetEntry.
                         */
                        collation = loccontext.collation;
@@ -525,9 +526,9 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                         * There are some cases where there might not be a failure, for
                         * example if the planner chooses to use hash aggregation instead
                         * of sorting for grouping; but it seems better to predictably
-                        * throw an error.  (Compare transformSetOperationTree, which will
-                        * throw error for indeterminate collation of set-op columns,
-                        * even though the planner might be able to implement the set-op
+                        * throw an error.      (Compare transformSetOperationTree, which will
+                        * throw error for indeterminate collation of set-op columns, even
+                        * though the planner might be able to implement the set-op
                         * without sorting.)
                         */
                        if (strength == COLLATE_CONFLICT &&
@@ -548,6 +549,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                        (void) expression_tree_walker(node,
                                                                                  assign_collations_walker,
                                                                                  (void *) &loccontext);
+
                        /*
                         * When we're invoked on a query's jointree, we don't need to do
                         * anything with join nodes except recurse through them to process
@@ -599,6 +601,7 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                case T_CaseTestExpr:
                case T_SetToDefault:
                case T_CurrentOfExpr:
+
                        /*
                         * General case for childless expression nodes.  These should
                         * already have a collation assigned; it is not this function's
@@ -610,10 +613,10 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                        /*
                         * Note: in most cases, there will be an assigned collation
                         * whenever type_is_collatable(exprType(node)); but an exception
-                        * occurs for a Var referencing a subquery output column for
-                        * which a unique collation was not determinable.  That may lead
-                        * to a runtime failure if a collation-sensitive function is
-                        * applied to the Var.
+                        * occurs for a Var referencing a subquery output column for which
+                        * a unique collation was not determinable.  That may lead to a
+                        * runtime failure if a collation-sensitive function is applied to
+                        * the Var.
                         */
 
                        if (OidIsValid(collation))
@@ -626,10 +629,10 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                default:
                        {
                                /*
-                                * General case for most expression nodes with children.
-                                * First recurse, then figure out what to assign here.
+                                * General case for most expression nodes with children. First
+                                * recurse, then figure out what to assign here.
                                 */
-                               Oid             typcollation;
+                               Oid                     typcollation;
 
                                (void) expression_tree_walker(node,
                                                                                          assign_collations_walker,
@@ -668,9 +671,9 @@ assign_collations_walker(Node *node, assign_collations_context *context)
                                }
 
                                /*
-                                * Save the result collation into the expression node.
-                                * If the state is COLLATE_CONFLICT, we'll set the collation
-                                * to InvalidOid, which might result in an error at runtime.
+                                * Save the result collation into the expression node. If the
+                                * state is COLLATE_CONFLICT, we'll set the collation to
+                                * InvalidOid, which might result in an error at runtime.
                                 */
                                if (strength == COLLATE_CONFLICT)
                                        exprSetCollation(node, InvalidOid);
index c527f7589e20646d02f9fdaf97eada30d744a048..41097263b413f79b1fa2ea2fcd448e88edd3b4ef 100644 (file)
@@ -245,7 +245,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
        cte->ctequery = (Node *) query;
 
        /*
-        * Check that we got something reasonable.  These first two cases should
+        * Check that we got something reasonable.      These first two cases should
         * be prevented by the grammar.
         */
        if (!IsA(query, Query))
@@ -310,7 +310,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
                                continue;
                        varattno++;
                        Assert(varattno == te->resno);
-                       if (lctyp == NULL || lctypmod == NULL || lccoll == NULL)                /* shouldn't happen */
+                       if (lctyp == NULL || lctypmod == NULL || lccoll == NULL)        /* shouldn't happen */
                                elog(ERROR, "wrong number of output columns in WITH");
                        texpr = (Node *) te->expr;
                        if (exprType(texpr) != lfirst_oid(lctyp) ||
@@ -338,7 +338,7 @@ analyzeCTE(ParseState *pstate, CommonTableExpr *cte)
                        lctypmod = lnext(lctypmod);
                        lccoll = lnext(lccoll);
                }
-               if (lctyp != NULL || lctypmod != NULL || lccoll != NULL)        /* shouldn't happen */
+               if (lctyp != NULL || lctypmod != NULL || lccoll != NULL)                /* shouldn't happen */
                        elog(ERROR, "wrong number of output columns in WITH");
        }
 }
@@ -645,7 +645,7 @@ checkWellFormedRecursion(CteState *cstate)
                CommonTableExpr *cte = cstate->items[i].cte;
                SelectStmt *stmt = (SelectStmt *) cte->ctequery;
 
-               Assert(!IsA(stmt, Query));      /* not analyzed yet */
+               Assert(!IsA(stmt, Query));              /* not analyzed yet */
 
                /* Ignore items that weren't found to be recursive */
                if (!cte->cterecursive)
index 4986e0e5fab551b76d85b4a77b9aaf67578eefce..08f0439e7edb270176e4b2c60cd92997ab745d69 100644 (file)
@@ -163,6 +163,7 @@ transformExpr(ParseState *pstate, Node *expr)
 
                                        typenameTypeIdAndMod(pstate, tc->typeName,
                                                                                 &targetType, &targetTypmod);
+
                                        /*
                                         * If target is a domain over array, work with the base
                                         * array type here.  transformTypeCast below will cast the
@@ -1283,9 +1284,9 @@ transformCaseExpr(ParseState *pstate, CaseExpr *c)
 
                /*
                 * Run collation assignment on the test expression so that we know
-                * what collation to mark the placeholder with.  In principle we
-                * could leave it to parse_collate.c to do that later, but propagating
-                * the result to the CaseTestExpr would be unnecessarily complicated.
+                * what collation to mark the placeholder with.  In principle we could
+                * leave it to parse_collate.c to do that later, but propagating the
+                * result to the CaseTestExpr would be unnecessarily complicated.
                 */
                assign_expr_collations(pstate, arg);
 
@@ -2122,15 +2123,16 @@ static Node *
 transformCollateClause(ParseState *pstate, CollateClause *c)
 {
        CollateExpr *newc;
-       Oid             argtype;
+       Oid                     argtype;
 
        newc = makeNode(CollateExpr);
        newc->arg = (Expr *) transformExpr(pstate, c->arg);
 
        argtype = exprType((Node *) newc->arg);
+
        /*
-        * The unknown type is not collatable, but coerce_type() takes
-        * care of it separately, so we'll let it go here.
+        * The unknown type is not collatable, but coerce_type() takes care of it
+        * separately, so we'll let it go here.
         */
        if (!type_is_collatable(argtype) && argtype != UNKNOWNOID)
                ereport(ERROR,
@@ -2351,7 +2353,7 @@ make_row_comparison_op(ParseState *pstate, List *opname,
        rcexpr->rctype = rctype;
        rcexpr->opnos = opnos;
        rcexpr->opfamilies = opfamilies;
-       rcexpr->inputcollids = NIL;     /* assign_expr_collations will fix this */
+       rcexpr->inputcollids = NIL; /* assign_expr_collations will fix this */
        rcexpr->largs = largs;
        rcexpr->rargs = rargs;
 
index ba699e9a1ed929dd97a79d42ce69a4dca0003d78..75f1e20475d1c2df628f0a866fc081c601340e98 100644 (file)
@@ -288,9 +288,9 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
                                         errmsg("function %s does not exist",
                                                        func_signature_string(funcname, nargs, argnames,
                                                                                                  actual_arg_types)),
-                       errhint("No aggregate function matches the given name and argument types. "
-                                       "Perhaps you misplaced ORDER BY; ORDER BY must appear "
-                                       "after all regular arguments of the aggregate."),
+                                        errhint("No aggregate function matches the given name and argument types. "
+                                         "Perhaps you misplaced ORDER BY; ORDER BY must appear "
+                                                        "after all regular arguments of the aggregate."),
                                         parser_errposition(pstate, location)));
                }
                else
@@ -1034,7 +1034,7 @@ func_get_detail(List *funcname,
                                                case COERCION_PATH_COERCEVIAIO:
                                                        if ((sourceType == RECORDOID ||
                                                                 ISCOMPLEX(sourceType)) &&
-                                                               TypeCategory(targetType) == TYPCATEGORY_STRING)
+                                                         TypeCategory(targetType) == TYPCATEGORY_STRING)
                                                                iscoercion = false;
                                                        else
                                                                iscoercion = true;
index 07257accc828fe76dec18cfa39ea438a45f2ce64..7b5c040cb4010ce3d25f0e51936ed2f112629902 100644 (file)
@@ -220,7 +220,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
         * If the input is a domain, smash to base type, and extract the actual
         * typmod to be applied to the base type.  Subscripting a domain is an
         * operation that necessarily works on the base array type, not the domain
-        * itself.  (Note that we provide no method whereby the creator of a
+        * itself.      (Note that we provide no method whereby the creator of a
         * domain over an array type could hide its ability to be subscripted.)
         */
        *arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
@@ -290,8 +290,8 @@ transformArraySubscripts(ParseState *pstate,
 
        /*
         * Caller may or may not have bothered to determine elementType.  Note
-        * that if the caller did do so, arrayType/arrayTypMod must be as
-        * modified by transformArrayType, ie, smash domain to base type.
+        * that if the caller did do so, arrayType/arrayTypMod must be as modified
+        * by transformArrayType, ie, smash domain to base type.
         */
        if (!OidIsValid(elementType))
                elementType = transformArrayType(&arrayType, &arrayTypMod);
@@ -542,7 +542,7 @@ make_const(ParseState *pstate, Value *value, int location)
 
        con = makeConst(typeid,
                                        -1,                     /* typmod -1 is OK for all cases */
-                                       InvalidOid,     /* all cases are uncollatable types */
+                                       InvalidOid, /* all cases are uncollatable types */
                                        typelen,
                                        val,
                                        false,
index 822e0a0a628308c7bcec57a3574ac71b7bb2260f..15a3bb3a01360942e6cea0085233e4f2eabda6a3 100644 (file)
@@ -214,9 +214,9 @@ get_sort_group_operators(Oid argtype,
        /*
         * If the datatype is an array, then we can use array_lt and friends ...
         * but only if there are suitable operators for the element type.
-        * Likewise, array types are only hashable if the element type is.
-        * Testing all three operator IDs here should be redundant, but let's do
-        * it anyway.
+        * Likewise, array types are only hashable if the element type is. Testing
+        * all three operator IDs here should be redundant, but let's do it
+        * anyway.
         */
        if (lt_opr == ARRAY_LT_OP ||
                eq_opr == ARRAY_EQ_OP ||
index 1895f92d7c4187fd7d154ee54f5c608db687e7c6..3b8a67619e8ff40e8f63549d76075d9a93973341 100644 (file)
@@ -233,8 +233,8 @@ variable_coerce_param_hook(ParseState *pstate, Param *param,
 
                /*
                 * This module always sets a Param's collation to be the default for
-                * its datatype.  If that's not what you want, you should be using
-                * the more general parser substitution hooks.
+                * its datatype.  If that's not what you want, you should be using the
+                * more general parser substitution hooks.
                 */
                param->paramcollid = get_typcollation(param->paramtype);
 
index 1af3f2f3b53448afcc68875ead09c5472e3bf4e3..d603ce2f423e52b100b440a577d99aeb1dd3ab8a 100644 (file)
@@ -1393,14 +1393,14 @@ addRangeTableEntryForCTE(ParseState *pstate,
         */
        if (IsA(cte->ctequery, Query))
        {
-               Query  *ctequery = (Query *) cte->ctequery;
+               Query      *ctequery = (Query *) cte->ctequery;
 
                if (ctequery->commandType != CMD_SELECT &&
                        ctequery->returningList == NIL)
                        ereport(ERROR,
                                        (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                        errmsg("WITH query \"%s\" does not have a RETURNING clause",
-                                                       cte->ctename),
+                                errmsg("WITH query \"%s\" does not have a RETURNING clause",
+                                               cte->ctename),
                                         parser_errposition(pstate, rv->location)));
        }
 
@@ -1871,7 +1871,7 @@ expandTupleDesc(TupleDesc tupdesc, Alias *eref,
                                         * what type the Const claims to be.
                                         */
                                        *colvars = lappend(*colvars,
-                                                                          makeNullConst(INT4OID, -1, InvalidOid));
+                                                                        makeNullConst(INT4OID, -1, InvalidOid));
                                }
                        }
                        continue;
@@ -1893,7 +1893,7 @@ expandTupleDesc(TupleDesc tupdesc, Alias *eref,
                        Var                *varnode;
 
                        varnode = makeVar(rtindex, attr->attnum,
-                                                         attr->atttypid, attr->atttypmod, attr->attcollation,
+                                                attr->atttypid, attr->atttypmod, attr->attcollation,
                                                          sublevels_up);
                        varnode->location = location;
 
index 52c6db2eb5cae4780abca4bb34f1f6d063af6ecb..3f630147b0fa5687c7df6ea63f747cc7a16ee3e5 100644 (file)
@@ -671,7 +671,7 @@ transformAssignmentIndirection(ParseState *pstate,
                                                 parser_errposition(pstate, location)));
 
                        get_atttypetypmodcoll(typrelid, attnum,
-                                                                 &fieldTypeId, &fieldTypMod, &fieldCollation);
+                                                               &fieldTypeId, &fieldTypMod, &fieldCollation);
 
                        /* recurse to create appropriate RHS for field assign */
                        rhs = transformAssignmentIndirection(pstate,
@@ -783,8 +783,8 @@ transformAssignmentSubscripts(ParseState *pstate,
 
        /*
         * Array normally has same collation as elements, but there's an
-        * exception: we might be subscripting a domain over an array type.
-        * In that case use collation of the base type.
+        * exception: we might be subscripting a domain over an array type. In
+        * that case use collation of the base type.
         */
        if (arrayType == targetTypeId)
                collationNeeded = targetCollation;
index b65f5f991cb5502152985ce41a79d1375334220b..22411f1608bf328a2f801bc9cffa8dc45361eaa3 100644 (file)
@@ -121,7 +121,7 @@ static void transformFKConstraints(CreateStmtContext *cxt,
                                           bool skipValidation,
                                           bool isAddConstraint);
 static void transformConstraintAttrs(CreateStmtContext *cxt,
-                                                                        List *constraintList);
+                                                List *constraintList);
 static void transformColumnType(CreateStmtContext *cxt, ColumnDef *column);
 static void setSchemaName(char *context_schema, char **stmt_schema_name);
 
@@ -368,8 +368,8 @@ transformColumnDefinition(CreateStmtContext *cxt, ColumnDef *column)
                 * If this is ALTER ADD COLUMN, make sure the sequence will be owned
                 * by the table's owner.  The current user might be someone else
                 * (perhaps a superuser, or someone who's only a member of the owning
-                * role), but the SEQUENCE OWNED BY mechanisms will bleat unless
-                * table and sequence have exactly the same owning role.
+                * role), but the SEQUENCE OWNED BY mechanisms will bleat unless table
+                * and sequence have exactly the same owning role.
                 */
                if (cxt->rel)
                        seqstmt->ownerId = cxt->rel->rd_rel->relowner;
@@ -732,7 +732,7 @@ transformInhRelation(CreateStmtContext *cxt, InhRelation *inhRelation)
                        /* Copy comment on constraint */
                        if ((inhRelation->options & CREATE_TABLE_LIKE_COMMENTS) &&
                                (comment = GetComment(get_constraint_oid(RelationGetRelid(relation),
-                                                                                                                 n->conname, false),
+                                                                                                                n->conname, false),
                                                                          ConstraintRelationId,
                                                                          0)) != NULL)
                        {
@@ -1390,8 +1390,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
        /*
         * If it's ALTER TABLE ADD CONSTRAINT USING INDEX, look up the index and
         * verify it's usable, then extract the implied column name list.  (We
-        * will not actually need the column name list at runtime, but we need
-        * it now to check for duplicate column entries below.)
+        * will not actually need the column name list at runtime, but we need it
+        * now to check for duplicate column entries below.)
         */
        if (constraint->indexname != NULL)
        {
@@ -1436,8 +1436,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                if (OidIsValid(get_index_constraint(index_oid)))
                        ereport(ERROR,
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                        errmsg("index \"%s\" is already associated with a constraint",
-                                                       index_name),
+                          errmsg("index \"%s\" is already associated with a constraint",
+                                         index_name),
                                         parser_errposition(cxt->pstate, constraint->location)));
 
                /* Perform validity checks on the index */
@@ -1482,8 +1482,8 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                                         parser_errposition(cxt->pstate, constraint->location)));
 
                /*
-                * It's probably unsafe to change a deferred index to non-deferred.
-                * (A non-constraint index couldn't be deferred anyway, so this case
+                * It's probably unsafe to change a deferred index to non-deferred. (A
+                * non-constraint index couldn't be deferred anyway, so this case
                 * should never occur; no need to sweat, but let's check it.)
                 */
                if (!index_form->indimmediate && !constraint->deferrable)
@@ -1494,7 +1494,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                                         parser_errposition(cxt->pstate, constraint->location)));
 
                /*
-                * Insist on it being a btree.  That's the only kind that supports
+                * Insist on it being a btree.  That's the only kind that supports
                 * uniqueness at the moment anyway; but we must have an index that
                 * exactly matches what you'd get from plain ADD CONSTRAINT syntax,
                 * else dump and reload will produce a different index (breaking
@@ -1514,15 +1514,15 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
 
                for (i = 0; i < index_form->indnatts; i++)
                {
-                       int2    attnum = index_form->indkey.values[i];
+                       int2            attnum = index_form->indkey.values[i];
                        Form_pg_attribute attform;
-                       char   *attname;
-                       Oid             defopclass;
+                       char       *attname;
+                       Oid                     defopclass;
 
                        /*
                         * We shouldn't see attnum == 0 here, since we already rejected
-                        * expression indexes.  If we do, SystemAttributeDefinition
-                        * will throw an error.
+                        * expression indexes.  If we do, SystemAttributeDefinition will
+                        * throw an error.
                         */
                        if (attnum > 0)
                        {
@@ -1531,11 +1531,11 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                        }
                        else
                                attform = SystemAttributeDefinition(attnum,
-                                                                                                       heap_rel->rd_rel->relhasoids);
+                                                                                          heap_rel->rd_rel->relhasoids);
                        attname = pstrdup(NameStr(attform->attname));
 
                        /*
-                        * Insist on default opclass and sort options.  While the index
+                        * Insist on default opclass and sort options.  While the index
                         * would still work as a constraint with non-default settings, it
                         * might not provide exactly the same uniqueness semantics as
                         * you'd get from a normally-created constraint; and there's also
@@ -1549,7 +1549,7 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                                 errmsg("index \"%s\" does not have default sorting behavior", index_name),
                                                 errdetail("Cannot create a PRIMARY KEY or UNIQUE constraint using such an index."),
-                                                parser_errposition(cxt->pstate, constraint->location)));
+                                        parser_errposition(cxt->pstate, constraint->location)));
 
                        constraint->keys = lappend(constraint->keys, makeString(attname));
                }
@@ -1694,13 +1694,13 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
                                                        (errcode(ERRCODE_DUPLICATE_COLUMN),
                                                         errmsg("column \"%s\" appears twice in primary key constraint",
                                                                        key),
-                                                        parser_errposition(cxt->pstate, constraint->location)));
+                                        parser_errposition(cxt->pstate, constraint->location)));
                                else
                                        ereport(ERROR,
                                                        (errcode(ERRCODE_DUPLICATE_COLUMN),
                                        errmsg("column \"%s\" appears twice in unique constraint",
                                                   key),
-                                                        parser_errposition(cxt->pstate, constraint->location)));
+                                        parser_errposition(cxt->pstate, constraint->location)));
                        }
                }
 
@@ -1743,7 +1743,7 @@ transformFKConstraints(CreateStmtContext *cxt,
                        Constraint *constraint = (Constraint *) lfirst(fkclist);
 
                        constraint->skip_validation = true;
-                       constraint->initially_valid  = true;
+                       constraint->initially_valid = true;
                }
        }
 
@@ -2120,18 +2120,18 @@ transformRuleStmt(RuleStmt *stmt, const char *queryString,
                         * However, they were already in the outer rangetable when we
                         * analyzed the query, so we have to check.
                         *
-                        * Note that in the INSERT...SELECT case, we need to examine
-                        * the CTE lists of both top_subqry and sub_qry.
+                        * Note that in the INSERT...SELECT case, we need to examine the
+                        * CTE lists of both top_subqry and sub_qry.
                         *
-                        * Note that we aren't digging into the body of the query
-                        * looking for WITHs in nested sub-SELECTs.  A WITH down there
-                        * can legitimately refer to OLD/NEW, because it'd be an
+                        * Note that we aren't digging into the body of the query looking
+                        * for WITHs in nested sub-SELECTs.  A WITH down there can
+                        * legitimately refer to OLD/NEW, because it'd be an
                         * indirect-correlated outer reference.
                         */
                        if (rangeTableEntry_used((Node *) top_subqry->cteList,
                                                                         PRS2_OLD_VARNO, 0) ||
                                rangeTableEntry_used((Node *) sub_qry->cteList,
-                                                                         PRS2_OLD_VARNO, 0))
+                                                                        PRS2_OLD_VARNO, 0))
                                ereport(ERROR,
                                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
                                                 errmsg("cannot refer to OLD within WITH query")));
@@ -2226,12 +2226,13 @@ transformAlterTableStmt(AlterTableStmt *stmt, const char *queryString)
        lockmode = AlterTableGetLockLevel(stmt->cmds);
 
        /*
-        * Acquire appropriate lock on the target relation, which will be held until
-        * end of transaction.  This ensures any decisions we make here based on
-        * the state of the relation will still be good at execution. We must get
-        * lock now because execution will later require it; taking a lower grade lock
-        * now and trying to upgrade later risks deadlock.  Any new commands we add
-        * after this must not upgrade the lock level requested here.
+        * Acquire appropriate lock on the target relation, which will be held
+        * until end of transaction.  This ensures any decisions we make here
+        * based on the state of the relation will still be good at execution. We
+        * must get lock now because execution will later require it; taking a
+        * lower grade lock now and trying to upgrade later risks deadlock.  Any
+        * new commands we add after this must not upgrade the lock level
+        * requested here.
         */
        rel = relation_openrv(stmt->relation, lockmode);
 
@@ -2522,7 +2523,7 @@ transformColumnType(CreateStmtContext *cxt, ColumnDef *column)
        if (column->collClause)
        {
                Form_pg_type typtup = (Form_pg_type) GETSTRUCT(ctype);
-               Oid             collOid;
+               Oid                     collOid;
 
                collOid = LookupCollation(cxt->pstate,
                                                                  column->collClause->collname,
index 373d6aaec84338b7242100acfb476975225a1a6e..bc400791788d843370c7a122c94129ea5d2f9082 100644 (file)
@@ -89,7 +89,7 @@ BSD44_derived_dlsym(void *handle, const char *name)
                snprintf(buf, sizeof(buf), "_%s", name);
                name = buf;
        }
-#endif /* !__ELF__ */
+#endif   /* !__ELF__ */
        if ((vp = dlsym(handle, (char *) name)) == NULL)
                snprintf(error_message, sizeof(error_message),
                                 "dlsym (%s) failed", name);
index d12065614140fd335d82cb873f88e6e08d7c5cae..1bac83784af9d766afe9a023ffaf0fdae3fdaba4 100644 (file)
@@ -89,7 +89,7 @@ BSD44_derived_dlsym(void *handle, const char *name)
                snprintf(buf, sizeof(buf), "_%s", name);
                name = buf;
        }
-#endif /* !__ELF__ */
+#endif   /* !__ELF__ */
        if ((vp = dlsym(handle, (char *) name)) == NULL)
                snprintf(error_message, sizeof(error_message),
                                 "dlsym (%s) failed", name);
index 2d061efb1e8ecb4c8e217e6e173221cbc10d3fba..4556a2dfebb580ed33fa6b37daae955966d09db9 100644 (file)
@@ -89,7 +89,7 @@ BSD44_derived_dlsym(void *handle, const char *name)
                snprintf(buf, sizeof(buf), "_%s", name);
                name = buf;
        }
-#endif /* !__ELF__ */
+#endif   /* !__ELF__ */
        if ((vp = dlsym(handle, (char *) name)) == NULL)
                snprintf(error_message, sizeof(error_message),
                                 "dlsym (%s) failed", name);
index eeed3fc2e1132f8968870124b2be4304b55e5fc4..b86a53ad34aa74d66fdc9921b8613eeb606835b7 100644 (file)
@@ -37,7 +37,7 @@ pgpipe(int handles[2])
        serv_addr.sin_family = AF_INET;
        serv_addr.sin_port = htons(0);
        serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
-       if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+       if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                ereport(LOG, (errmsg_internal("pgpipe failed to bind: %ui", WSAGetLastError())));
                closesocket(s);
@@ -49,7 +49,7 @@ pgpipe(int handles[2])
                closesocket(s);
                return -1;
        }
-       if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
+       if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
        {
                ereport(LOG, (errmsg_internal("pgpipe failed to getsockname: %ui", WSAGetLastError())));
                closesocket(s);
@@ -62,13 +62,13 @@ pgpipe(int handles[2])
                return -1;
        }
 
-       if (connect(handles[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+       if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
        {
                ereport(LOG, (errmsg_internal("pgpipe failed to connect socket: %ui", WSAGetLastError())));
                closesocket(s);
                return -1;
        }
-       if ((handles[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET)
+       if ((handles[0] = accept(s, (SOCKADDR *) &serv_addr, &len)) == INVALID_SOCKET)
        {
                ereport(LOG, (errmsg_internal("pgpipe failed to accept socket: %ui", WSAGetLastError())));
                closesocket(handles[1]);
index 102a28ed04e1eaaf7181f0e568c6034eca5aa539..754d2ac7e04166fba75a5e35bac549e46bfa673f 100644 (file)