summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/common/heaptuple.c4
-rw-r--r--src/backend/access/common/indextuple.c8
-rw-r--r--src/backend/access/common/printtup.c58
-rw-r--r--src/backend/access/gist/gistscan.c9
-rw-r--r--src/backend/access/hash/hashfunc.c20
-rw-r--r--src/backend/access/hash/hashovfl.c6
-rw-r--r--src/backend/access/heap/heapam.c16
-rw-r--r--src/backend/access/index/indexam.c51
-rw-r--r--src/backend/access/nbtree/nbtinsert.c55
-rw-r--r--src/backend/access/nbtree/nbtpage.c149
-rw-r--r--src/backend/access/nbtree/nbtree.c90
-rw-r--r--src/backend/access/nbtree/nbtsearch.c91
-rw-r--r--src/backend/access/nbtree/nbtsort.c6
-rw-r--r--src/backend/access/nbtree/nbtxlog.c224
-rw-r--r--src/backend/access/rtree/rtscan.c10
-rw-r--r--src/backend/access/transam/clog.c4
-rw-r--r--src/backend/access/transam/rmgr.c6
-rw-r--r--src/backend/access/transam/slru.c94
-rw-r--r--src/backend/access/transam/xact.c58
-rw-r--r--src/backend/access/transam/xlog.c251
-rw-r--r--src/backend/bootstrap/bootstrap.c44
-rw-r--r--src/backend/catalog/aclchk.c61
-rw-r--r--src/backend/catalog/dependency.c86
-rw-r--r--src/backend/catalog/heap.c41
-rw-r--r--src/backend/catalog/index.c65
-rw-r--r--src/backend/catalog/namespace.c32
-rw-r--r--src/backend/catalog/pg_aggregate.c42
-rw-r--r--src/backend/catalog/pg_constraint.c19
-rw-r--r--src/backend/catalog/pg_conversion.c10
-rw-r--r--src/backend/catalog/pg_operator.c8
-rw-r--r--src/backend/catalog/pg_proc.c44
-rw-r--r--src/backend/catalog/pg_type.c27
-rw-r--r--src/backend/commands/aggregatecmds.c10
-rw-r--r--src/backend/commands/alter.c82
-rw-r--r--src/backend/commands/analyze.c8
-rw-r--r--src/backend/commands/async.c13
-rw-r--r--src/backend/commands/cluster.c133
-rw-r--r--src/backend/commands/comment.c35
-rw-r--r--src/backend/commands/conversioncmds.c8
-rw-r--r--src/backend/commands/copy.c209
-rw-r--r--src/backend/commands/dbcommands.c60
-rw-r--r--src/backend/commands/define.c5
-rw-r--r--src/backend/commands/explain.c40
-rw-r--r--src/backend/commands/functioncmds.c51
-rw-r--r--src/backend/commands/indexcmds.c85
-rw-r--r--src/backend/commands/opclasscmds.c18
-rw-r--r--src/backend/commands/operatorcmds.c6
-rw-r--r--src/backend/commands/portalcmds.c64
-rw-r--r--src/backend/commands/prepare.c58
-rw-r--r--src/backend/commands/proclang.c12
-rw-r--r--src/backend/commands/schemacmds.c14
-rw-r--r--src/backend/commands/sequence.c54
-rw-r--r--src/backend/commands/tablecmds.c285
-rw-r--r--src/backend/commands/trigger.c228
-rw-r--r--src/backend/commands/typecmds.c326
-rw-r--r--src/backend/commands/user.c56
-rw-r--r--src/backend/commands/vacuum.c61
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c85
-rw-r--r--src/backend/commands/view.c6
-rw-r--r--src/backend/executor/execAmi.c28
-rw-r--r--src/backend/executor/execGrouping.c12
-rw-r--r--src/backend/executor/execMain.c123
-rw-r--r--src/backend/executor/execProcnode.c18
-rw-r--r--src/backend/executor/execQual.c363
-rw-r--r--src/backend/executor/execScan.c23
-rw-r--r--src/backend/executor/execTuples.c8
-rw-r--r--src/backend/executor/execUtils.c62
-rw-r--r--src/backend/executor/functions.c28
-rw-r--r--src/backend/executor/nodeAgg.c168
-rw-r--r--src/backend/executor/nodeAppend.c4
-rw-r--r--src/backend/executor/nodeHash.c4
-rw-r--r--src/backend/executor/nodeHashjoin.c44
-rw-r--r--src/backend/executor/nodeIndexscan.c7
-rw-r--r--src/backend/executor/nodeLimit.c41
-rw-r--r--src/backend/executor/nodeMaterial.c21
-rw-r--r--src/backend/executor/nodeMergejoin.c28
-rw-r--r--src/backend/executor/nodeNestloop.c10
-rw-r--r--src/backend/executor/nodeResult.c4
-rw-r--r--src/backend/executor/nodeSeqscan.c20
-rw-r--r--src/backend/executor/nodeSubplan.c275
-rw-r--r--src/backend/executor/nodeSubqueryscan.c17
-rw-r--r--src/backend/executor/nodeUnique.c8
-rw-r--r--src/backend/executor/spi.c29
-rw-r--r--src/backend/executor/tstoreReceiver.c10
-rw-r--r--src/backend/lib/stringinfo.c12
-rw-r--r--src/backend/libpq/auth.c42
-rw-r--r--src/backend/libpq/be-fsstubs.c6
-rw-r--r--src/backend/libpq/be-secure.c23
-rw-r--r--src/backend/libpq/crypt.c7
-rw-r--r--src/backend/libpq/hba.c73
-rw-r--r--src/backend/libpq/ip.c153
-rw-r--r--src/backend/libpq/md5.c6
-rw-r--r--src/backend/libpq/pqcomm.c88
-rw-r--r--src/backend/libpq/pqformat.c81
-rw-r--r--src/backend/main/main.c21
-rw-r--r--src/backend/nodes/bitmapset.c174
-rw-r--r--src/backend/nodes/copyfuncs.c64
-rw-r--r--src/backend/nodes/equalfuncs.c109
-rw-r--r--src/backend/nodes/list.c19
-rw-r--r--src/backend/nodes/nodes.c4
-rw-r--r--src/backend/nodes/outfuncs.c48
-rw-r--r--src/backend/nodes/readfuncs.c59
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c25
-rw-r--r--src/backend/optimizer/geqo/geqo_main.c6
-rw-r--r--src/backend/optimizer/geqo/geqo_misc.c4
-rw-r--r--src/backend/optimizer/path/allpaths.c31
-rw-r--r--src/backend/optimizer/path/costsize.c286
-rw-r--r--src/backend/optimizer/path/indxpath.c191
-rw-r--r--src/backend/optimizer/path/joinpath.c41
-rw-r--r--src/backend/optimizer/path/joinrels.c42
-rw-r--r--src/backend/optimizer/path/orindxpath.c4
-rw-r--r--src/backend/optimizer/path/pathkeys.c57
-rw-r--r--src/backend/optimizer/path/tidpath.c6
-rw-r--r--src/backend/optimizer/plan/createplan.c183
-rw-r--r--src/backend/optimizer/plan/initsplan.c165
-rw-r--r--src/backend/optimizer/plan/planmain.c15
-rw-r--r--src/backend/optimizer/plan/planner.c222
-rw-r--r--src/backend/optimizer/plan/setrefs.c74
-rw-r--r--src/backend/optimizer/plan/subselect.c173
-rw-r--r--src/backend/optimizer/prep/prepjointree.c107
-rw-r--r--src/backend/optimizer/prep/prepqual.c18
-rw-r--r--src/backend/optimizer/prep/preptlist.c24
-rw-r--r--src/backend/optimizer/prep/prepunion.c20
-rw-r--r--src/backend/optimizer/util/clauses.c236
-rw-r--r--src/backend/optimizer/util/joininfo.c14
-rw-r--r--src/backend/optimizer/util/pathnode.c29
-rw-r--r--src/backend/optimizer/util/plancat.c28
-rw-r--r--src/backend/optimizer/util/relnode.c9
-rw-r--r--src/backend/optimizer/util/restrictinfo.c17
-rw-r--r--src/backend/optimizer/util/tlist.c6
-rw-r--r--src/backend/optimizer/util/var.c43
-rw-r--r--src/backend/parser/analyze.c249
-rw-r--r--src/backend/parser/parse_agg.c99
-rw-r--r--src/backend/parser/parse_clause.c54
-rw-r--r--src/backend/parser/parse_coerce.c212
-rw-r--r--src/backend/parser/parse_expr.c164
-rw-r--r--src/backend/parser/parse_func.c82
-rw-r--r--src/backend/parser/parse_node.c10
-rw-r--r--src/backend/parser/parse_oper.c65
-rw-r--r--src/backend/parser/parse_relation.c43
-rw-r--r--src/backend/parser/parse_target.c22
-rw-r--r--src/backend/parser/parse_type.c8
-rw-r--r--src/backend/port/beos/sem.c4
-rw-r--r--src/backend/port/dynloader/darwin.c4
-rw-r--r--src/backend/port/dynloader/linux.c4
-rw-r--r--src/backend/port/dynloader/linux.h3
-rw-r--r--src/backend/port/dynloader/win32.c8
-rw-r--r--src/backend/port/ipc_test.c14
-rw-r--r--src/backend/port/posix_sema.c3
-rw-r--r--src/backend/port/sysv_sema.c10
-rw-r--r--src/backend/port/sysv_shmem.c34
-rw-r--r--src/backend/port/win32/sema.c13
-rw-r--r--src/backend/postmaster/pgstat.c67
-rw-r--r--src/backend/postmaster/postmaster.c231
-rw-r--r--src/backend/regex/regc_color.c412
-rw-r--r--src/backend/regex/regc_cvec.c197
-rw-r--r--src/backend/regex/regc_lex.c1492
-rw-r--r--src/backend/regex/regc_locale.c982
-rw-r--r--src/backend/regex/regc_nfa.c858
-rw-r--r--src/backend/regex/regcomp.c1836
-rw-r--r--src/backend/regex/rege_dfa.c416
-rw-r--r--src/backend/regex/regerror.c118
-rw-r--r--src/backend/regex/regexec.c776
-rw-r--r--src/backend/regex/regfree.c16
-rw-r--r--src/backend/rewrite/rewriteDefine.c26
-rw-r--r--src/backend/rewrite/rewriteHandler.c113
-rw-r--r--src/backend/rewrite/rewriteManip.c17
-rw-r--r--src/backend/storage/buffer/bufmgr.c22
-rw-r--r--src/backend/storage/file/fd.c14
-rw-r--r--src/backend/storage/freespace/freespace.c215
-rw-r--r--src/backend/storage/ipc/ipc.c6
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/sinval.c9
-rw-r--r--src/backend/storage/lmgr/deadlock.c35
-rw-r--r--src/backend/storage/lmgr/lock.c65
-rw-r--r--src/backend/storage/lmgr/proc.c25
-rw-r--r--src/backend/storage/page/bufpage.c15
-rw-r--r--src/backend/storage/smgr/md.c15
-rw-r--r--src/backend/tcop/dest.c9
-rw-r--r--src/backend/tcop/fastpath.c56
-rw-r--r--src/backend/tcop/postgres.c406
-rw-r--r--src/backend/tcop/pquery.c229
-rw-r--r--src/backend/tcop/utility.c145
-rw-r--r--src/backend/utils/adt/acl.c89
-rw-r--r--src/backend/utils/adt/array_userfuncs.c91
-rw-r--r--src/backend/utils/adt/arrayfuncs.c230
-rw-r--r--src/backend/utils/adt/ascii.c22
-rw-r--r--src/backend/utils/adt/char.c4
-rw-r--r--src/backend/utils/adt/date.c70
-rw-r--r--src/backend/utils/adt/datetime.c86
-rw-r--r--src/backend/utils/adt/float.c10
-rw-r--r--src/backend/utils/adt/formatting.c81
-rw-r--r--src/backend/utils/adt/geo_ops.c16
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c180
-rw-r--r--src/backend/utils/adt/inet_net_pton.c116
-rw-r--r--src/backend/utils/adt/int.c6
-rw-r--r--src/backend/utils/adt/int8.c6
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/like_match.c4
-rw-r--r--src/backend/utils/adt/mac.c8
-rw-r--r--src/backend/utils/adt/nabstime.c22
-rw-r--r--src/backend/utils/adt/name.c4
-rw-r--r--src/backend/utils/adt/network.c258
-rw-r--r--src/backend/utils/adt/not_in.c4
-rw-r--r--src/backend/utils/adt/numeric.c285
-rw-r--r--src/backend/utils/adt/numutils.c4
-rw-r--r--src/backend/utils/adt/oid.c6
-rw-r--r--src/backend/utils/adt/oracle_compat.c17
-rw-r--r--src/backend/utils/adt/pg_locale.c8
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c14
-rw-r--r--src/backend/utils/adt/pseudotypes.c4
-rw-r--r--src/backend/utils/adt/regexp.c32
-rw-r--r--src/backend/utils/adt/regproc.c14
-rw-r--r--src/backend/utils/adt/ri_triggers.c193
-rw-r--r--src/backend/utils/adt/ruleutils.c582
-rw-r--r--src/backend/utils/adt/selfuncs.c160
-rw-r--r--src/backend/utils/adt/sets.c7
-rw-r--r--src/backend/utils/adt/timestamp.c170
-rw-r--r--src/backend/utils/adt/varbit.c14
-rw-r--r--src/backend/utils/adt/varchar.c20
-rw-r--r--src/backend/utils/adt/varlena.c69
-rw-r--r--src/backend/utils/adt/xid.c4
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c4
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/relcache.c62
-rw-r--r--src/backend/utils/error/elog.c173
-rw-r--r--src/backend/utils/fmgr/dfmgr.c8
-rw-r--r--src/backend/utils/fmgr/fmgr.c32
-rw-r--r--src/backend/utils/init/findbe.c9
-rw-r--r--src/backend/utils/init/miscinit.c94
-rw-r--r--src/backend/utils/init/postinit.c28
-rw-r--r--src/backend/utils/mb/conv.c10
-rw-r--r--src/backend/utils/mb/mbutils.c26
-rw-r--r--src/backend/utils/mb/wchar.c10
-rw-r--r--src/backend/utils/misc/guc.c135
-rw-r--r--src/backend/utils/misc/help_config.c24
-rw-r--r--src/backend/utils/mmgr/aset.c22
-rw-r--r--src/backend/utils/mmgr/mcxt.c7
-rw-r--r--src/backend/utils/mmgr/portalmem.c53
-rw-r--r--src/backend/utils/sort/logtape.c6
-rw-r--r--src/backend/utils/sort/tuplesort.c14
-rw-r--r--src/backend/utils/sort/tuplestore.c37
-rw-r--r--src/backend/utils/time/tqual.c34
-rw-r--r--src/bin/pg_controldata/pg_controldata.c24
-rw-r--r--src/bin/pg_dump/common.c8
-rw-r--r--src/bin/pg_dump/dumputils.c75
-rw-r--r--src/bin/pg_dump/dumputils.h12
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c54
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h12
-rw-r--r--src/bin/pg_dump/pg_backup_custom.c20
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c23
-rw-r--r--src/bin/pg_dump/pg_dump.c161
-rw-r--r--src/bin/pg_dump/pg_dump.h6
-rw-r--r--src/bin/pg_dump/pg_dumpall.c14
-rw-r--r--src/bin/pg_dump/pg_restore.c6
-rw-r--r--src/bin/pg_encoding/pg_encoding.c10
-rw-r--r--src/bin/pg_resetxlog/pg_resetxlog.c6
-rw-r--r--src/bin/psql/command.c32
-rw-r--r--src/bin/psql/common.c224
-rw-r--r--src/bin/psql/copy.c17
-rw-r--r--src/bin/psql/describe.c61
-rw-r--r--src/bin/psql/help.c14
-rw-r--r--src/bin/psql/input.c56
-rw-r--r--src/bin/psql/large_obj.c6
-rw-r--r--src/bin/psql/mainloop.c67
-rw-r--r--src/bin/psql/mbprint.c8
-rw-r--r--src/bin/psql/print.c20
-rw-r--r--src/bin/psql/print.h5
-rw-r--r--src/bin/psql/prompt.c60
-rw-r--r--src/bin/psql/sprompt.c13
-rw-r--r--src/bin/psql/startup.c8
-rw-r--r--src/bin/psql/stringutils.c31
-rw-r--r--src/bin/psql/tab-complete.c163
-rw-r--r--src/bin/psql/variables.c56
-rw-r--r--src/bin/psql/variables.h37
-rw-r--r--src/bin/scripts/clusterdb.c30
-rw-r--r--src/bin/scripts/common.c6
-rw-r--r--src/bin/scripts/common.h19
-rw-r--r--src/bin/scripts/createdb.c4
-rw-r--r--src/bin/scripts/createlang.c20
-rw-r--r--src/bin/scripts/createuser.c11
-rw-r--r--src/bin/scripts/dropdb.c4
-rw-r--r--src/bin/scripts/droplang.c15
-rw-r--r--src/bin/scripts/vacuumdb.c34
-rw-r--r--src/include/access/genam.h12
-rw-r--r--src/include/access/heapam.h4
-rw-r--r--src/include/access/nbtree.h33
-rw-r--r--src/include/access/printtup.h8
-rw-r--r--src/include/access/relscan.h6
-rw-r--r--src/include/access/slru.h22
-rw-r--r--src/include/access/xact.h12
-rw-r--r--src/include/access/xlog.h4
-rw-r--r--src/include/c.h25
-rw-r--r--src/include/catalog/catversion.h4
-rw-r--r--src/include/catalog/dependency.h10
-rw-r--r--src/include/catalog/pg_am.h4
-rw-r--r--src/include/catalog/pg_amproc.h28
-rw-r--r--src/include/catalog/pg_attribute.h8
-rw-r--r--src/include/catalog/pg_cast.h18
-rw-r--r--src/include/catalog/pg_constraint.h13
-rw-r--r--src/include/catalog/pg_opclass.h6
-rw-r--r--src/include/catalog/pg_operator.h32
-rw-r--r--src/include/catalog/pg_proc.h114
-rw-r--r--src/include/catalog/pg_statistic.h8
-rw-r--r--src/include/catalog/pg_type.h32
-rw-r--r--src/include/commands/alter.h4
-rw-r--r--src/include/commands/dbcommands.h6
-rw-r--r--src/include/commands/explain.h4
-rw-r--r--src/include/commands/portalcmds.h6
-rw-r--r--src/include/commands/prepare.h18
-rw-r--r--src/include/commands/sequence.h4
-rw-r--r--src/include/commands/trigger.h28
-rw-r--r--src/include/commands/typecmds.h4
-rw-r--r--src/include/executor/execdesc.h8
-rw-r--r--src/include/executor/executor.h104
-rw-r--r--src/include/executor/hashjoin.h7
-rw-r--r--src/include/executor/nodeHash.h4
-rw-r--r--src/include/executor/nodeSeqscan.h12
-rw-r--r--src/include/executor/nodeSubplan.h16
-rw-r--r--src/include/executor/spi.h6
-rw-r--r--src/include/executor/spi_priv.h7
-rw-r--r--src/include/executor/tstoreReceiver.h6
-rw-r--r--src/include/getaddrinfo.h35
-rw-r--r--src/include/getopt_long.h24
-rw-r--r--src/include/lib/stringinfo.h7
-rw-r--r--src/include/libpq/crypt.h4
-rw-r--r--src/include/libpq/ip.h34
-rw-r--r--src/include/libpq/libpq-be.h13
-rw-r--r--src/include/libpq/libpq.h18
-rw-r--r--src/include/libpq/pqcomm.h38
-rw-r--r--src/include/libpq/pqformat.h4
-rw-r--r--src/include/mb/pg_wchar.h4
-rw-r--r--src/include/miscadmin.h12
-rw-r--r--src/include/nodes/bitmapset.h58
-rw-r--r--src/include/nodes/execnodes.h151
-rw-r--r--src/include/nodes/makefuncs.h10
-rw-r--r--src/include/nodes/nodes.h6
-rw-r--r--src/include/nodes/params.h8
-rw-r--r--src/include/nodes/parsenodes.h45
-rw-r--r--src/include/nodes/pg_list.h14
-rw-r--r--src/include/nodes/plannodes.h10
-rw-r--r--src/include/nodes/primnodes.h79
-rw-r--r--src/include/nodes/relation.h37
-rw-r--r--src/include/optimizer/clauses.h20
-rw-r--r--src/include/optimizer/cost.h38
-rw-r--r--src/include/optimizer/geqo_misc.h3
-rw-r--r--src/include/optimizer/joininfo.h10
-rw-r--r--src/include/optimizer/pathnode.h16
-rw-r--r--src/include/optimizer/paths.h12
-rw-r--r--src/include/optimizer/plancat.h8
-rw-r--r--src/include/optimizer/planmain.h32
-rw-r--r--src/include/optimizer/prep.h4
-rw-r--r--src/include/optimizer/restrictinfo.h12
-rw-r--r--src/include/optimizer/tlist.h4
-rw-r--r--src/include/parser/analyze.h4
-rw-r--r--src/include/parser/parse_agg.h14
-rw-r--r--src/include/parser/parse_clause.h10
-rw-r--r--src/include/parser/parse_coerce.h44
-rw-r--r--src/include/parser/parse_func.h32
-rw-r--r--src/include/parser/parse_node.h5
-rw-r--r--src/include/parser/parse_oper.h18
-rw-r--r--src/include/pg_config_manual.h38
-rw-r--r--src/include/pgstat.h4
-rw-r--r--src/include/port.h54
-rw-r--r--src/include/port/bsdi.h1
-rw-r--r--src/include/port/cygwin.h3
-rw-r--r--src/include/port/freebsd.h1
-rw-r--r--src/include/port/hpux.h1
-rw-r--r--src/include/port/netbsd.h1
-rw-r--r--src/include/port/openbsd.h1
-rw-r--r--src/include/port/win32.h63
-rw-r--r--src/include/port/win32/dlfcn.h1
-rw-r--r--src/include/port/win32/grp.h1
-rw-r--r--src/include/port/win32/netdb.h1
-rw-r--r--src/include/port/win32/netinet/in.h1
-rw-r--r--src/include/port/win32/pwd.h1
-rw-r--r--src/include/port/win32/sys/socket.h1
-rw-r--r--src/include/port/win32/sys/wait.h1
-rw-r--r--src/include/postgres.h3
-rw-r--r--src/include/regex/regcustom.h52
-rw-r--r--src/include/regex/regerrs.h91
-rw-r--r--src/include/regex/regex.h169
-rw-r--r--src/include/regex/regguts.h390
-rw-r--r--src/include/storage/bufmgr.h6
-rw-r--r--src/include/storage/freespace.h16
-rw-r--r--src/include/storage/lock.h13
-rw-r--r--src/include/tcop/dest.h22
-rw-r--r--src/include/tcop/pquery.h22
-rw-r--r--src/include/tcop/tcopprot.h5
-rw-r--r--src/include/utils/acl.h10
-rw-r--r--src/include/utils/array.h53
-rw-r--r--src/include/utils/builtins.h4
-rw-r--r--src/include/utils/datetime.h6
-rw-r--r--src/include/utils/elog.h50
-rw-r--r--src/include/utils/errcodes.h24
-rw-r--r--src/include/utils/guc.h8
-rw-r--r--src/include/utils/guc_tables.h24
-rw-r--r--src/include/utils/help_config.h6
-rw-r--r--src/include/utils/inet.h4
-rw-r--r--src/include/utils/lsyscache.h20
-rw-r--r--src/include/utils/memutils.h7
-rw-r--r--src/include/utils/palloc.h6
-rw-r--r--src/include/utils/portal.h63
-rw-r--r--src/include/utils/rel.h6
-rw-r--r--src/include/utils/selfuncs.h8
-rw-r--r--src/include/utils/tuplestore.h8
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c616
-rw-r--r--src/interfaces/ecpg/ecpglib/connect.c106
-rw-r--r--src/interfaces/ecpg/ecpglib/data.c47
-rw-r--r--src/interfaces/ecpg/ecpglib/descriptor.c4
-rw-r--r--src/interfaces/ecpg/ecpglib/error.c13
-rw-r--r--src/interfaces/ecpg/ecpglib/execute.c161
-rw-r--r--src/interfaces/ecpg/ecpglib/extern.h12
-rw-r--r--src/interfaces/ecpg/ecpglib/misc.c103
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c20
-rw-r--r--src/interfaces/ecpg/include/datetime.h18
-rw-r--r--src/interfaces/ecpg/include/decimal.h31
-rw-r--r--src/interfaces/ecpg/include/ecpg_informix.h36
-rw-r--r--src/interfaces/ecpg/include/ecpgerrno.h4
-rw-r--r--src/interfaces/ecpg/include/ecpglib.h9
-rw-r--r--src/interfaces/ecpg/include/ecpgtype.h18
-rw-r--r--src/interfaces/ecpg/include/pgtypes_date.h15
-rw-r--r--src/interfaces/ecpg/include/pgtypes_error.h5
-rw-r--r--src/interfaces/ecpg/include/pgtypes_interval.h15
-rw-r--r--src/interfaces/ecpg/include/pgtypes_numeric.h88
-rw-r--r--src/interfaces/ecpg/include/pgtypes_timestamp.h10
-rw-r--r--src/interfaces/ecpg/include/sqlca.h2
-rw-r--r--src/interfaces/ecpg/include/sqlda.h1
-rw-r--r--src/interfaces/ecpg/include/sqltypes.h22
-rw-r--r--src/interfaces/ecpg/pgtypeslib/common.c90
-rw-r--r--src/interfaces/ecpg/pgtypeslib/datetime.c526
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt.h58
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c744
-rw-r--r--src/interfaces/ecpg/pgtypeslib/extern.h38
-rw-r--r--src/interfaces/ecpg/pgtypeslib/interval.c17
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c245
-rw-r--r--src/interfaces/ecpg/pgtypeslib/timestamp.c319
-rw-r--r--src/interfaces/ecpg/preproc/c_keywords.c10
-rw-r--r--src/interfaces/ecpg/preproc/ecpg.c40
-rw-r--r--src/interfaces/ecpg/preproc/extern.h8
-rw-r--r--src/interfaces/ecpg/preproc/type.c81
-rw-r--r--src/interfaces/ecpg/preproc/type.h41
-rw-r--r--src/interfaces/ecpg/preproc/variable.c158
-rw-r--r--src/interfaces/libpgtcl/pgtclCmds.c12
-rw-r--r--src/interfaces/libpgtcl/pgtclCmds.h40
-rw-r--r--src/interfaces/libpgtcl/pgtclId.c18
-rw-r--r--src/interfaces/libpgtcl/pgtclId.h6
-rw-r--r--src/interfaces/libpq/fe-auth.c28
-rw-r--r--src/interfaces/libpq/fe-connect.c240
-rw-r--r--src/interfaces/libpq/fe-exec.c108
-rw-r--r--src/interfaces/libpq/fe-lobj.c12
-rw-r--r--src/interfaces/libpq/fe-misc.c101
-rw-r--r--src/interfaces/libpq/fe-protocol2.c103
-rw-r--r--src/interfaces/libpq/fe-protocol3.c174
-rw-r--r--src/interfaces/libpq/fe-secure.c139
-rw-r--r--src/interfaces/libpq/libpq-fe.h64
-rw-r--r--src/interfaces/libpq/libpq-int.h70
-rw-r--r--src/pl/plperl/plperl.c23
-rw-r--r--src/pl/plpgsql/src/pl_comp.c129
-rw-r--r--src/pl/plpgsql/src/pl_exec.c159
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c8
-rw-r--r--src/pl/plpgsql/src/pl_handler.c8
-rw-r--r--src/pl/plpgsql/src/plpgsql.h23
-rw-r--r--src/pl/plpython/plpython.c62
-rw-r--r--src/pl/tcl/pltcl.c16
-rw-r--r--src/port/copydir.c31
-rw-r--r--src/port/crypt.c859
-rw-r--r--src/port/dirmod.c22
-rw-r--r--src/port/fseeko.c9
-rw-r--r--src/port/getaddrinfo.c66
-rw-r--r--src/port/getopt.c2
-rw-r--r--src/port/getopt_long.c14
-rw-r--r--src/port/gettimeofday.c15
-rw-r--r--src/port/path.c24
-rw-r--r--src/port/threads.c4
-rw-r--r--src/test/examples/testlibpq.c8
-rw-r--r--src/test/examples/testlibpq2.c27
-rw-r--r--src/test/examples/testlibpq3.c12
-rw-r--r--src/test/regress/regress.c6
-rw-r--r--src/tutorial/complex.c2
482 files changed, 17222 insertions, 15219 deletions
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index ae1df582b0..4f2fd0efd1 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.85 2003/08/04 00:43:11 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
@@ -617,7 +617,7 @@ heap_formtuple(TupleDesc tupleDescriptor,
td->t_natts = numberOfAttributes;
td->t_hoff = hoff;
- if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
+ if (tupleDescriptor->tdhasoid) /* else leave infomask = 0 */
td->t_infomask = HEAP_HASOID;
DataFill((char *) td + hoff,
diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c
index abf25915ab..d0ee379808 100644
--- a/src/backend/access/common/indextuple.c
+++ b/src/backend/access/common/indextuple.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.66 2003/08/04 00:43:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -162,9 +162,9 @@ index_formtuple(TupleDesc tupleDescriptor,
if ((size & INDEX_SIZE_MASK) != size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index tuple requires %lu bytes, maximum size is %lu",
- (unsigned long) size,
- (unsigned long) INDEX_SIZE_MASK)));
+ errmsg("index tuple requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index 61ecdcd7e5..ecee11718d 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.76 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -24,13 +24,13 @@
static void printtup_startup(DestReceiver *self, int operation,
- TupleDesc typeinfo);
+ TupleDesc typeinfo);
static void printtup(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo,
- DestReceiver *self);
+ DestReceiver *self);
static void printtup_shutdown(DestReceiver *self);
static void printtup_destroy(DestReceiver *self);
@@ -81,8 +81,8 @@ printtup_create_DR(CommandDest dest, Portal portal)
else
{
/*
- * In protocol 2.0 the Bind message does not exist, so there is
- * no way for the columns to have different print formats; it's
+ * In protocol 2.0 the Bind message does not exist, so there is no
+ * way for the columns to have different print formats; it's
* sufficient to look at the first one.
*/
if (portal->formats && portal->formats[0] != 0)
@@ -111,12 +111,13 @@ static void
printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
{
DR_printtup *myState = (DR_printtup *) self;
- Portal portal = myState->portal;
+ Portal portal = myState->portal;
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
{
/*
- * Send portal name to frontend (obsolete cruft, gone in proto 3.0)
+ * Send portal name to frontend (obsolete cruft, gone in proto
+ * 3.0)
*
* If portal name not specified, use "blank" portal.
*/
@@ -129,8 +130,8 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
}
/*
- * If this is a retrieve, and we are supposed to emit row descriptions,
- * then we send back the tuple descriptor of the tuples.
+ * If this is a retrieve, and we are supposed to emit row
+ * descriptions, then we send back the tuple descriptor of the tuples.
*/
if (operation == CMD_SELECT && myState->sendDescrip)
{
@@ -163,7 +164,7 @@ printtup_startup(DestReceiver *self, int operation, TupleDesc typeinfo)
* or some similar function; it does not contain a full set of fields.
* The targetlist will be NIL when executing a utility function that does
* not have a plan. If the targetlist isn't NIL then it is a Query node's
- * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
+ * targetlist; it is up to us to ignore resjunk columns in it. The formats[]
* array pointer might be NULL (if we are doing Describe on a prepared stmt);
* send zeroes for the format codes in that case.
*/
@@ -176,14 +177,14 @@ SendRowDescriptionMessage(TupleDesc typeinfo, List *targetlist, int16 *formats)
int i;
StringInfoData buf;
- pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
- pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
+ pq_beginmessage(&buf, 'T'); /* tuple descriptor message type */
+ pq_sendint(&buf, natts, 2); /* # of attrs in tuples */
for (i = 0; i < natts; ++i)
{
- Oid atttypid = attrs[i]->atttypid;
- int32 atttypmod = attrs[i]->atttypmod;
- Oid basetype;
+ Oid atttypid = attrs[i]->atttypid;
+ int32 atttypmod = attrs[i]->atttypmod;
+ Oid basetype;
pq_sendstring(&buf, NameStr(attrs[i]->attname));
/* column ID info appears in protocol 3.0 and up */
@@ -320,8 +321,8 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
}
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -347,7 +348,7 @@ printtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
@@ -424,8 +425,8 @@ printtup_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 0);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -536,9 +537,10 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
continue;
getTypeOutputInfo(typeinfo->attrs[i]->atttypid,
&typoutput, &typelem, &typisvarlena);
+
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -547,7 +549,7 @@ debugtup(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
value = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typelem),
+ ObjectIdGetDatum(typelem),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
@@ -627,8 +629,8 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
Assert(thisState->format == 1);
/*
- * If we have a toasted datum, forcibly detoast it here to
- * avoid memory leakage inside the type's output routine.
+ * If we have a toasted datum, forcibly detoast it here to avoid
+ * memory leakage inside the type's output routine.
*/
if (thisState->typisvarlena)
attr = PointerGetDatum(PG_DETOAST_DATUM(origattr));
@@ -637,7 +639,7 @@ printtup_internal_20(HeapTuple tuple, TupleDesc typeinfo, DestReceiver *self)
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typelem)));
+ ObjectIdGetDatum(thisState->typelem)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
diff --git a/src/backend/access/gist/gistscan.c b/src/backend/access/gist/gistscan.c
index d3208e248e..505fd76256 100644
--- a/src/backend/access/gist/gistscan.c
+++ b/src/backend/access/gist/gistscan.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gistscan.c,v 1.46 2003/08/04 00:43:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,11 +104,12 @@ gistrescan(PG_FUNCTION_ARGS)
memmove(s->keyData,
key,
s->numberOfKeys * sizeof(ScanKeyData));
+
/*
* Play games here with the scan key to use the Consistent
- * function for all comparisons: 1) the sk_procedure field
- * will now be used to hold the strategy number 2) the
- * sk_func field will point to the Consistent function
+ * function for all comparisons: 1) the sk_procedure field will
+ * now be used to hold the strategy number 2) the sk_func field
+ * will point to the Consistent function
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c
index a82b8b32d5..4dd9d9df3e 100644
--- a/src/backend/access/hash/hashfunc.c
+++ b/src/backend/access/hash/hashfunc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.36 2003/06/22 22:04:54 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.37 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
@@ -60,9 +60,9 @@ hashfloat4(PG_FUNCTION_ARGS)
float4 key = PG_GETARG_FLOAT4(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float4) 0)
PG_RETURN_UINT32(0);
@@ -76,9 +76,9 @@ hashfloat8(PG_FUNCTION_ARGS)
float8 key = PG_GETARG_FLOAT8(0);
/*
- * On IEEE-float machines, minus zero and zero have different bit patterns
- * but should compare as equal. We must ensure that they have the same
- * hash value, which is most easily done this way:
+ * On IEEE-float machines, minus zero and zero have different bit
+ * patterns but should compare as equal. We must ensure that they
+ * have the same hash value, which is most easily done this way:
*/
if (key == (float8) 0)
PG_RETURN_UINT32(0);
@@ -121,9 +121,9 @@ hashtext(PG_FUNCTION_ARGS)
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena,
- * but it seems likely that we may need to do something different
- * in non-C locales. (See also hashbpchar, if so.)
+ * Note: this is currently identical in behavior to hashvarlena, but
+ * it seems likely that we may need to do something different in non-C
+ * locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
VARSIZE(key) - VARHDRSZ);
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index ed9459feb9..fd7fc15822 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.36 2003/08/04 00:43:12 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -205,8 +205,8 @@ _hash_getovfladdr(Relation rel, Buffer *metabufp)
if (++splitnum >= NCACHED)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("out of overflow pages in hash index \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
metap->hashm_ovflpoint = splitnum;
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
metap->hashm_spares[splitnum - 1]--;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index a0d191f8a9..8b4b5590ca 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.153 2003/08/04 00:43:14 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1132,6 +1132,7 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
xlhdr.t_natts = tup->t_data->t_natts;
xlhdr.t_infomask = tup->t_data->t_infomask;
xlhdr.t_hoff = tup->t_data->t_hoff;
+
/*
* note we mark rdata[1] as belonging to buffer; if XLogInsert
* decides to write the whole page to the xlog, we don't need to
@@ -1149,9 +1150,9 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid)
rdata[2].next = NULL;
/*
- * If this is the single and first tuple on page, we can reinit the
- * page instead of restoring the whole thing. Set flag, and hide
- * buffer references from XLogInsert.
+ * If this is the single and first tuple on page, we can reinit
+ * the page instead of restoring the whole thing. Set flag, and
+ * hide buffer references from XLogInsert.
*/
if (ItemPointerGetOffsetNumber(&(tup->t_self)) == FirstOffsetNumber &&
PageGetMaxOffsetNumber(page) == FirstOffsetNumber)
@@ -1912,7 +1913,7 @@ log_heap_clean(Relation reln, Buffer buffer, OffsetNumber *unused, int uncnt)
/*
* The unused-offsets array is not actually in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * that it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buffer;
@@ -1991,9 +1992,10 @@ log_heap_update(Relation reln, Buffer oldbuf, ItemPointerData from,
2 * sizeof(TransactionId));
hsize += 2 * sizeof(TransactionId);
}
+
/*
- * As with insert records, we need not store the rdata[2] segment
- * if we decide to store the whole buffer instead.
+ * As with insert records, we need not store the rdata[2] segment if
+ * we decide to store the whole buffer instead.
*/
rdata[2].buffer = newbuf;
rdata[2].data = (char *) &xlhdr;
diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c
index 731c34b3ab..ee93e8a722 100644
--- a/src/backend/access/index/indexam.c
+++ b/src/backend/access/index/indexam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.68 2003/08/04 00:43:15 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
@@ -300,7 +300,7 @@ index_beginscan(Relation heapRelation,
* index_rescan - (re)start a scan of an index
*
* The caller may specify a new set of scankeys (but the number of keys
- * cannot change). To restart the scan without changing keys, pass NULL
+ * cannot change). To restart the scan without changing keys, pass NULL
* for the key array.
*
* Note that this is also called when first starting an indexscan;
@@ -394,8 +394,8 @@ index_restrpos(IndexScanDesc scan)
/*
* We do not reset got_tuple; so if the scan is actually being
- * short-circuited by index_getnext, the effective position restoration
- * is done by restoring unique_tuple_pos.
+ * short-circuited by index_getnext, the effective position
+ * restoration is done by restoring unique_tuple_pos.
*/
scan->unique_tuple_pos = scan->unique_tuple_mark;
@@ -427,24 +427,24 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
}
/*
- * If we already got a tuple and it must be unique, there's no need
- * to make the index AM look through any additional tuples. (This can
+ * If we already got a tuple and it must be unique, there's no need to
+ * make the index AM look through any additional tuples. (This can
* save a useful amount of work in scenarios where there are many dead
* tuples due to heavy update activity.)
*
* To do this we must keep track of the logical scan position
* (before/on/after tuple). Also, we have to be sure to release scan
- * resources before returning NULL; if we fail to do so then a multi-index
- * scan can easily run the system out of free buffers. We can release
- * index-level resources fairly cheaply by calling index_rescan. This
- * means there are two persistent states as far as the index AM is
- * concerned: on-tuple and rescanned. If we are actually asked to
- * re-fetch the single tuple, we have to go through a fresh indexscan
- * startup, which penalizes that (infrequent) case.
+ * resources before returning NULL; if we fail to do so then a
+ * multi-index scan can easily run the system out of free buffers. We
+ * can release index-level resources fairly cheaply by calling
+ * index_rescan. This means there are two persistent states as far as
+ * the index AM is concerned: on-tuple and rescanned. If we are
+ * actually asked to re-fetch the single tuple, we have to go through
+ * a fresh indexscan startup, which penalizes that (infrequent) case.
*/
if (scan->keys_are_unique && scan->got_tuple)
{
- int new_tuple_pos = scan->unique_tuple_pos;
+ int new_tuple_pos = scan->unique_tuple_pos;
if (ScanDirectionIsForward(direction))
{
@@ -459,22 +459,23 @@ index_getnext(IndexScanDesc scan, ScanDirection direction)
if (new_tuple_pos == 0)
{
/*
- * We are moving onto the unique tuple from having been off it.
- * We just fall through and let the index AM do the work. Note
- * we should get the right answer regardless of scan direction.
+ * We are moving onto the unique tuple from having been off
+ * it. We just fall through and let the index AM do the work.
+ * Note we should get the right answer regardless of scan
+ * direction.
*/
- scan->unique_tuple_pos = 0; /* need to update position */
+ scan->unique_tuple_pos = 0; /* need to update position */
}
else
{
/*
- * Moving off the tuple; must do amrescan to release index-level
- * pins before we return NULL. Since index_rescan will reset
- * my state, must save and restore...
+ * Moving off the tuple; must do amrescan to release
+ * index-level pins before we return NULL. Since index_rescan
+ * will reset my state, must save and restore...
*/
- int unique_tuple_mark = scan->unique_tuple_mark;
+ int unique_tuple_mark = scan->unique_tuple_mark;
- index_rescan(scan, NULL /* no change to key */);
+ index_rescan(scan, NULL /* no change to key */ );
scan->keys_are_unique = true;
scan->got_tuple = true;
@@ -631,7 +632,7 @@ index_bulk_delete(Relation indexRelation,
*/
IndexBulkDeleteResult *
index_vacuum_cleanup(Relation indexRelation,
- IndexVacuumCleanupInfo *info,
+ IndexVacuumCleanupInfo * info,
IndexBulkDeleteResult *stats)
{
RegProcedure procedure;
@@ -649,7 +650,7 @@ index_vacuum_cleanup(Relation indexRelation,
DatumGetPointer(OidFunctionCall3(procedure,
PointerGetDatum(indexRelation),
PointerGetDatum((Pointer) info),
- PointerGetDatum((Pointer) stats)));
+ PointerGetDatum((Pointer) stats)));
return result;
}
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index dd8eda99b9..962d7a1822 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.102 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.103 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -432,9 +432,9 @@ _bt_insertonpg(Relation rel,
*
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan
- * could fail to see our insertion. write locks on intermediate
- * dead pages won't do because we don't know when they will get
- * de-linked from the tree.
+ * could fail to see our insertion. write locks on
+ * intermediate dead pages won't do because we don't know when
+ * they will get de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -523,9 +523,10 @@ _bt_insertonpg(Relation rel,
/*
* If we are doing this insert because we split a page that was
* the only one on its tree level, but was not the root, it may
- * have been the "fast root". We need to ensure that the fast root
- * link points at or above the current page. We can safely acquire
- * a lock on the metapage here --- see comments for _bt_newroot().
+ * have been the "fast root". We need to ensure that the fast
+ * root link points at or above the current page. We can safely
+ * acquire a lock on the metapage here --- see comments for
+ * _bt_newroot().
*/
if (split_only_page)
{
@@ -1135,7 +1136,7 @@ _bt_checksplitloc(FindSplitData *state, OffsetNumber firstright,
*
* On entry, buf and rbuf are the left and right split pages, which we
* still hold write locks on per the L&Y algorithm. We release the
- * write locks once we have write lock on the parent page. (Any sooner,
+ * write locks once we have write lock on the parent page. (Any sooner,
* and it'd be possible for some other process to try to split or delete
* one of these pages, and get confused because it cannot find the downlink.)
*
@@ -1155,19 +1156,19 @@ _bt_insert_parent(Relation rel,
bool is_only)
{
/*
- * Here we have to do something Lehman and Yao don't talk about:
- * deal with a root split and construction of a new root. If our
- * stack is empty then we have just split a node on what had been
- * the root level when we descended the tree. If it was still the
- * root then we perform a new-root construction. If it *wasn't*
- * the root anymore, search to find the next higher level that
- * someone constructed meanwhile, and find the right place to insert
- * as for the normal case.
+ * Here we have to do something Lehman and Yao don't talk about: deal
+ * with a root split and construction of a new root. If our stack is
+ * empty then we have just split a node on what had been the root
+ * level when we descended the tree. If it was still the root then we
+ * perform a new-root construction. If it *wasn't* the root anymore,
+ * search to find the next higher level that someone constructed
+ * meanwhile, and find the right place to insert as for the normal
+ * case.
*
- * If we have to search for the parent level, we do so by
- * re-descending from the root. This is not super-efficient,
- * but it's rare enough not to matter. (This path is also taken
- * when called from WAL recovery --- we have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough
+ * not to matter. (This path is also taken when called from WAL
+ * recovery --- we have no stack in that case.)
*/
if (is_root)
{
@@ -1222,9 +1223,9 @@ _bt_insert_parent(Relation rel,
/*
* Find the parent buffer and get the parent page.
*
- * Oops - if we were moved right then we need to change stack
- * item! We want to find parent pointing to where we are,
- * right ? - vadim 05/27/97
+ * Oops - if we were moved right then we need to change stack item!
+ * We want to find parent pointing to where we are, right ? -
+ * vadim 05/27/97
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
bknum, P_HIKEY);
@@ -1296,16 +1297,16 @@ _bt_getstackbuf(Relation rel, BTStack stack, int access)
/*
* start = InvalidOffsetNumber means "search the whole page".
- * We need this test anyway due to possibility that
- * page has a high key now when it didn't before.
+ * We need this test anyway due to possibility that page has a
+ * high key now when it didn't before.
*/
if (start < minoff)
start = minoff;
/*
* These loops will check every item on the page --- but in an
- * order that's attuned to the probability of where it actually
- * is. Scan to the right first, then to the left.
+ * order that's attuned to the probability of where it
+ * actually is. Scan to the right first, then to the left.
*/
for (offnum = start;
offnum <= maxoff;
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 33f85cd59a..ace06f0a25 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.67 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -181,8 +181,8 @@ _bt_getroot(Relation rel, int access)
/*
* Metadata initialized by someone else. In order to
* guarantee no deadlocks, we have to release the metadata
- * page and start all over again. (Is that really true?
- * But it's hardly worth trying to optimize this case.)
+ * page and start all over again. (Is that really true? But
+ * it's hardly worth trying to optimize this case.)
*/
_bt_relbuf(rel, metabuf);
return _bt_getroot(rel, access);
@@ -190,8 +190,8 @@ _bt_getroot(Relation rel, int access)
/*
* Get, initialize, write, and leave a lock of the appropriate
- * type on the new root page. Since this is the first page in
- * the tree, it's a leaf as well as the root.
+ * type on the new root page. Since this is the first page in the
+ * tree, it's a leaf as well as the root.
*/
rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
rootblkno = BufferGetBlockNumber(rootbuf);
@@ -240,7 +240,7 @@ _bt_getroot(Relation rel, int access)
_bt_wrtnorelbuf(rel, rootbuf);
/*
- * swap root write lock for read lock. There is no danger of
+ * swap root write lock for read lock. There is no danger of
* anyone else accessing the new root page while it's unlocked,
* since no one else knows where it is yet.
*/
@@ -284,8 +284,8 @@ _bt_getroot(Relation rel, int access)
}
/*
- * By here, we have a pin and read lock on the root page, and no
- * lock set on the metadata page. Return the root page's buffer.
+ * By here, we have a pin and read lock on the root page, and no lock
+ * set on the metadata page. Return the root page's buffer.
*/
return rootbuf;
}
@@ -299,7 +299,7 @@ _bt_getroot(Relation rel, int access)
* By the time we acquire lock on the root page, it might have been split and
* not be the true root anymore. This is okay for the present uses of this
* routine; we only really need to be able to move up at least one tree level
- * from whatever non-root page we were at. If we ever do need to lock the
+ * from whatever non-root page we were at. If we ever do need to lock the
* one true root page, we could loop here, re-reading the metapage on each
* failure. (Note that it wouldn't do to hold the lock on the metapage while
* moving to the root --- that'd deadlock against any concurrent root split.)
@@ -406,9 +406,9 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* First see if the FSM knows of any free pages.
*
* We can't trust the FSM's report unreservedly; we have to check
- * that the page is still free. (For example, an already-free page
- * could have been re-used between the time the last VACUUM scanned
- * it and the time the VACUUM made its FSM updates.)
+ * that the page is still free. (For example, an already-free
+ * page could have been re-used between the time the last VACUUM
+ * scanned it and the time the VACUUM made its FSM updates.)
*/
for (;;)
{
@@ -431,10 +431,10 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
/*
* Extend the relation by one page.
*
- * We have to use a lock to ensure no one else is extending the rel at
- * the same time, else we will both try to initialize the same new
- * page. We can skip locking for new or temp relations, however,
- * since no one else could be accessing them.
+ * We have to use a lock to ensure no one else is extending the rel
+ * at the same time, else we will both try to initialize the same
+ * new page. We can skip locking for new or temp relations,
+ * however, since no one else could be accessing them.
*/
needLock = !(rel->rd_isnew || rel->rd_istemp);
@@ -444,8 +444,8 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
buf = ReadBuffer(rel, P_NEW);
/*
- * Release the file-extension lock; it's now OK for someone else to
- * extend the relation some more.
+ * Release the file-extension lock; it's now OK for someone else
+ * to extend the relation some more.
*/
if (needLock)
UnlockPage(rel, 0, ExclusiveLock);
@@ -484,7 +484,7 @@ _bt_relbuf(Relation rel, Buffer buf)
* and a pin on the buffer.
*
* NOTE: actually, the buffer manager just marks the shared buffer page
- * dirty here; the real I/O happens later. This is okay since we are not
+ * dirty here; the real I/O happens later. This is okay since we are not
* relying on write ordering anyway. The WAL mechanism is responsible for
* guaranteeing correctness after a crash.
*/
@@ -534,13 +534,14 @@ _bt_page_recyclable(Page page)
BTPageOpaque opaque;
/*
- * It's possible to find an all-zeroes page in an index --- for example,
- * a backend might successfully extend the relation one page and then
- * crash before it is able to make a WAL entry for adding the page.
- * If we find a zeroed page then reclaim it.
+ * It's possible to find an all-zeroes page in an index --- for
+ * example, a backend might successfully extend the relation one page
+ * and then crash before it is able to make a WAL entry for adding the
+ * page. If we find a zeroed page then reclaim it.
*/
if (PageIsNew(page))
return true;
+
/*
* Otherwise, recycle if deleted and too old to have any processes
* interested in it.
@@ -565,7 +566,7 @@ _bt_page_recyclable(Page page)
* mistake. On exit, metapage data is correct and we no longer have
* a pin or lock on the metapage.
*
- * Actually this is not used for splitting on-the-fly anymore. It's only used
+ * Actually this is not used for splitting on-the-fly anymore. It's only used
* in nbtsort.c at the completion of btree building, where we know we have
* sole access to the index anyway.
*/
@@ -623,7 +624,7 @@ _bt_metaproot(Relation rel, BlockNumber rootbknum, uint32 level)
/*
* Delete item(s) from a btree page.
*
- * This must only be used for deleting leaf items. Deleting an item on a
+ * This must only be used for deleting leaf items. Deleting an item on a
* non-leaf page has to be done as part of an atomic action that includes
* deleting the page it points to.
*
@@ -646,9 +647,7 @@ _bt_delitems(Relation rel, Buffer buf,
* adjusting item numbers for previous deletions.
*/
for (i = nitems - 1; i >= 0; i--)
- {
PageIndexTupleDelete(page, itemnos[i]);
- }
/* XLOG stuff */
if (!rel->rd_istemp)
@@ -666,8 +665,8 @@ _bt_delitems(Relation rel, Buffer buf,
rdata[0].next = &(rdata[1]);
/*
- * The target-offsets array is not in the buffer, but pretend
- * that it is. When XLogInsert stores the whole buffer, the offsets
+ * The target-offsets array is not in the buffer, but pretend that
+ * it is. When XLogInsert stores the whole buffer, the offsets
* array need not be stored too.
*/
rdata[1].buffer = buf;
@@ -701,7 +700,7 @@ _bt_delitems(Relation rel, Buffer buf,
* may currently be trying to follow links leading to the page; they have to
* be allowed to use its right-link to recover. See nbtree/README.
*
- * On entry, the target buffer must be pinned and read-locked. This lock and
+ * On entry, the target buffer must be pinned and read-locked. This lock and
* pin will be dropped before exiting.
*
* Returns the number of pages successfully deleted (zero on failure; could
@@ -714,7 +713,7 @@ _bt_delitems(Relation rel, Buffer buf,
int
_bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
{
- BlockNumber target,
+ BlockNumber target,
leftsib,
rightsib,
parent;
@@ -740,17 +739,18 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
BTPageOpaque opaque;
/*
- * We can never delete rightmost pages nor root pages. While at it,
+ * We can never delete rightmost pages nor root pages. While at it,
* check that page is not already deleted and is empty.
*/
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
return 0;
}
+
/*
* Save info about page, including a copy of its high key (it must
* have one, being non-rightmost).
@@ -760,12 +760,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
leftsib = opaque->btpo_prev;
itemid = PageGetItemId(page, P_HIKEY);
targetkey = CopyBTItem((BTItem) PageGetItem(page, itemid));
+
/*
* We need to get an approximate pointer to the page's parent page.
- * Use the standard search mechanism to search for the page's high key;
- * this will give us a link to either the current parent or someplace
- * to its left (if there are multiple equal high keys). To avoid
- * deadlocks, we'd better drop the target page lock first.
+ * Use the standard search mechanism to search for the page's high
+ * key; this will give us a link to either the current parent or
+ * someplace to its left (if there are multiple equal high keys). To
+ * avoid deadlocks, we'd better drop the target page lock first.
*/
_bt_relbuf(rel, buf);
/* we need a scan key to do our search, so build one */
@@ -775,9 +776,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
&lbuf, BT_READ);
/* don't need a pin on that either */
_bt_relbuf(rel, lbuf);
+
/*
* If we are trying to delete an interior page, _bt_search did more
- * than we needed. Locate the stack item pointing to our parent level.
+ * than we needed. Locate the stack item pointing to our parent
+ * level.
*/
ilevel = 0;
for (;;)
@@ -789,10 +792,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
stack = stack->bts_parent;
ilevel++;
}
+
/*
* We have to lock the pages we need to modify in the standard order:
- * moving right, then up. Else we will deadlock against other writers.
- *
+ * moving right, then up. Else we will deadlock against other
+ * writers.
+ *
* So, we need to find and write-lock the current left sibling of the
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if
@@ -823,21 +828,24 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
lbuf = InvalidBuffer;
+
/*
- * Next write-lock the target page itself. It should be okay to take just
- * a write lock not a superexclusive lock, since no scans would stop on an
- * empty page.
+ * Next write-lock the target page itself. It should be okay to take
+ * just a write lock not a superexclusive lock, since no scans would
+ * stop on an empty page.
*/
buf = _bt_getbuf(rel, target, BT_WRITE);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * Check page is still empty etc, else abandon deletion. The empty check
- * is necessary since someone else might have inserted into it while
- * we didn't have it locked; the others are just for paranoia's sake.
+ * Check page is still empty etc, else abandon deletion. The empty
+ * check is necessary since someone else might have inserted into it
+ * while we didn't have it locked; the others are just for paranoia's
+ * sake.
*/
if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
- P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
+ P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page))
{
_bt_relbuf(rel, buf);
if (BufferIsValid(lbuf))
@@ -846,14 +854,17 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
if (opaque->btpo_prev != leftsib)
elog(ERROR, "left link changed unexpectedly");
+
/*
* And next write-lock the (current) right sibling.
*/
rightsib = opaque->btpo_next;
rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
+
/*
* Next find and write-lock the current parent of the target page.
- * This is essentially the same as the corresponding step of splitting.
+ * This is essentially the same as the corresponding step of
+ * splitting.
*/
ItemPointerSet(&(stack->bts_btitem.bti_itup.t_tid),
target, P_HIKEY);
@@ -863,10 +874,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
RelationGetRelationName(rel));
parent = stack->bts_blkno;
poffset = stack->bts_offset;
+
/*
* If the target is the rightmost child of its parent, then we can't
- * delete, unless it's also the only child --- in which case the parent
- * changes to half-dead status.
+ * delete, unless it's also the only child --- in which case the
+ * parent changes to half-dead status.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -893,12 +905,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
if (OffsetNumberNext(P_FIRSTDATAKEY(opaque)) == maxoff)
parent_one_child = true;
}
+
/*
* If we are deleting the next-to-last page on the target's level,
- * then the rightsib is a candidate to become the new fast root.
- * (In theory, it might be possible to push the fast root even further
- * down, but the odds of doing so are slim, and the locking considerations
- * daunting.)
+ * then the rightsib is a candidate to become the new fast root. (In
+ * theory, it might be possible to push the fast root even further
+ * down, but the odds of doing so are slim, and the locking
+ * considerations daunting.)
*
* We can safely acquire a lock on the metapage here --- see comments for
* _bt_newroot().
@@ -914,12 +927,13 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
+
/*
* The expected case here is btm_fastlevel == targetlevel+1;
- * if the fastlevel is <= targetlevel, something is wrong, and we
- * choose to overwrite it to fix it.
+ * if the fastlevel is <= targetlevel, something is wrong, and
+ * we choose to overwrite it to fix it.
*/
- if (metad->btm_fastlevel > targetlevel+1)
+ if (metad->btm_fastlevel > targetlevel + 1)
{
/* no update wanted */
_bt_relbuf(rel, metabuf);
@@ -937,9 +951,9 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
/*
* Update parent. The normal case is a tad tricky because we want to
- * delete the target's downlink and the *following* key. Easiest way is
- * to copy the right sibling's downlink over the target downlink, and then
- * delete the following item.
+ * delete the target's downlink and the *following* key. Easiest way
+ * is to copy the right sibling's downlink over the target downlink,
+ * and then delete the following item.
*/
page = BufferGetPage(pbuf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -950,7 +964,7 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
else
{
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -968,8 +982,8 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
}
/*
- * Update siblings' side-links. Note the target page's side-links will
- * continue to point to the siblings.
+ * Update siblings' side-links. Note the target page's side-links
+ * will continue to point to the siblings.
*/
if (BufferIsValid(lbuf))
{
@@ -1096,10 +1110,11 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
_bt_wrtbuf(rel, lbuf);
/*
- * If parent became half dead, recurse to try to delete it. Otherwise,
- * if right sibling is empty and is now the last child of the parent,
- * recurse to try to delete it. (These cases cannot apply at the same
- * time, though the second case might itself recurse to the first.)
+ * If parent became half dead, recurse to try to delete it.
+ * Otherwise, if right sibling is empty and is now the last child of
+ * the parent, recurse to try to delete it. (These cases cannot apply
+ * at the same time, though the second case might itself recurse to
+ * the first.)
*/
if (parent_half_dead)
{
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 3c814725fe..7d0dea4e78 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.104 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -580,19 +580,20 @@ btbulkdelete(PG_FUNCTION_ARGS)
/*
* The outer loop iterates over index leaf pages, the inner over items
- * on a leaf page. We issue just one _bt_delitems() call per page,
- * so as to minimize WAL traffic.
+ * on a leaf page. We issue just one _bt_delitems() call per page, so
+ * as to minimize WAL traffic.
*
- * Note that we exclusive-lock every leaf page containing data items,
- * in sequence left to right. It sounds attractive to only exclusive-lock
- * those containing items we need to delete, but unfortunately that
- * is not safe: we could then pass a stopped indexscan, which could
- * in rare cases lead to deleting the item it needs to find when it
- * resumes. (See _bt_restscan --- this could only happen if an indexscan
- * stops on a deletable item and then a page split moves that item
- * into a page further to its right, which the indexscan will have no
- * pin on.) We can skip obtaining exclusive lock on empty pages
- * though, since no indexscan could be stopped on those.
+ * Note that we exclusive-lock every leaf page containing data items, in
+ * sequence left to right. It sounds attractive to only
+ * exclusive-lock those containing items we need to delete, but
+ * unfortunately that is not safe: we could then pass a stopped
+ * indexscan, which could in rare cases lead to deleting the item it
+ * needs to find when it resumes. (See _bt_restscan --- this could
+ * only happen if an indexscan stops on a deletable item and then a
+ * page split moves that item into a page further to its right, which
+ * the indexscan will have no pin on.) We can skip obtaining
+ * exclusive lock on empty pages though, since no indexscan could be
+ * stopped on those.
*/
buf = _bt_get_endpoint(rel, 0, false);
if (BufferIsValid(buf)) /* check for empty index */
@@ -604,7 +605,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
OffsetNumber offnum,
minoff,
maxoff;
- BlockNumber nextpage;
+ BlockNumber nextpage;
CHECK_FOR_INTERRUPTS();
@@ -622,12 +623,14 @@ btbulkdelete(PG_FUNCTION_ARGS)
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
+
/*
- * Recompute minoff/maxoff, both of which could have changed
- * while we weren't holding the lock.
+ * Recompute minoff/maxoff, both of which could have
+ * changed while we weren't holding the lock.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
+
/*
* Scan over all items to see which ones need deleted
* according to the callback function.
@@ -640,7 +643,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
ItemPointer htup;
btitem = (BTItem) PageGetItem(page,
- PageGetItemId(page, offnum));
+ PageGetItemId(page, offnum));
htup = &(btitem->bti_itup.t_tid);
if (callback(htup, callback_state))
{
@@ -651,6 +654,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
num_index_tuples += 1;
}
}
+
/*
* If we need to delete anything, do it and write the buffer;
* else just release the buffer.
@@ -662,9 +666,7 @@ btbulkdelete(PG_FUNCTION_ARGS)
_bt_wrtbuf(rel, buf);
}
else
- {
_bt_relbuf(rel, buf);
- }
/* And advance to next page, if any */
if (nextpage == P_NONE)
break;
@@ -712,7 +714,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/* No point in remembering more than MaxFSMPages pages */
maxFreePages = MaxFSMPages;
if ((BlockNumber) maxFreePages > num_pages)
- maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
+ maxFreePages = (int) num_pages + 1; /* +1 to avoid palloc(0) */
freePages = (BlockNumber *) palloc(maxFreePages * sizeof(BlockNumber));
nFreePages = 0;
@@ -728,10 +730,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* after we start the scan will not be examined; this should be fine,
* since they can't possibly be empty.)
*/
- for (blkno = BTREE_METAPAGE+1; blkno < num_pages; blkno++)
+ for (blkno = BTREE_METAPAGE + 1; blkno < num_pages; blkno++)
{
- Buffer buf;
- Page page;
+ Buffer buf;
+ Page page;
BTPageOpaque opaque;
buf = _bt_getbuf(rel, blkno, BT_READ);
@@ -753,7 +755,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page))
{
/* Empty, try to delete */
- int ndel;
+ int ndel;
/* Run pagedel in a temp context to avoid memory leakage */
MemoryContextReset(mycontext);
@@ -768,7 +770,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* During VACUUM FULL it's okay to recycle deleted pages
* immediately, since there can be no other transactions
- * scanning the index. Note that we will only recycle the
+ * scanning the index. Note that we will only recycle the
* current page and not any parent pages that _bt_pagedel
* might have recursed to; this seems reasonable in the name
* of simplicity. (Trying to do otherwise would mean we'd
@@ -787,16 +789,16 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
}
/*
- * During VACUUM FULL, we truncate off any recyclable pages at the
- * end of the index. In a normal vacuum it'd be unsafe to do this
- * except by acquiring exclusive lock on the index and then rechecking
- * all the pages; doesn't seem worth it.
+ * During VACUUM FULL, we truncate off any recyclable pages at the end
+ * of the index. In a normal vacuum it'd be unsafe to do this except
+ * by acquiring exclusive lock on the index and then rechecking all
+ * the pages; doesn't seem worth it.
*/
if (info->vacuum_full && nFreePages > 0)
{
- BlockNumber new_pages = num_pages;
+ BlockNumber new_pages = num_pages;
- while (nFreePages > 0 && freePages[nFreePages-1] == new_pages-1)
+ while (nFreePages > 0 && freePages[nFreePages - 1] == new_pages - 1)
{
new_pages--;
pages_deleted--;
@@ -810,9 +812,10 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Okay to truncate.
*
* First, flush any shared buffers for the blocks we intend to
- * delete. FlushRelationBuffers is a bit more than we need for
- * this, since it will also write out dirty buffers for blocks we
- * aren't deleting, but it's the closest thing in bufmgr's API.
+ * delete. FlushRelationBuffers is a bit more than we need
+ * for this, since it will also write out dirty buffers for
+ * blocks we aren't deleting, but it's the closest thing in
+ * bufmgr's API.
*/
i = FlushRelationBuffers(rel, new_pages);
if (i < 0)
@@ -822,7 +825,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* Do the physical truncation.
*/
new_pages = smgrtruncate(DEFAULT_SMGR, rel, new_pages);
- rel->rd_nblocks = new_pages; /* update relcache immediately */
+ rel->rd_nblocks = new_pages; /* update relcache
+ * immediately */
rel->rd_targblock = InvalidBlockNumber;
num_pages = new_pages;
}
@@ -856,7 +860,7 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* and so no deletion can have occurred on that page.
*
* On entry, we have a pin but no read lock on the buffer that contained
- * the index tuple we stopped the scan on. On exit, we have pin and read
+ * the index tuple we stopped the scan on. On exit, we have pin and read
* lock on the buffer that now contains that index tuple, and the scandesc's
* current position is updated to point at it.
*/
@@ -877,8 +881,8 @@ _bt_restscan(IndexScanDesc scan)
BlockNumber blkno;
/*
- * Reacquire read lock on the buffer. (We should still have
- * a reference-count pin on it, so need not get that.)
+ * Reacquire read lock on the buffer. (We should still have a
+ * reference-count pin on it, so need not get that.)
*/
LockBuffer(buf, BT_READ);
@@ -921,11 +925,11 @@ _bt_restscan(IndexScanDesc scan)
/*
* The item we're looking for moved right at least one page, so
- * move right. We are careful here to pin and read-lock the next
- * non-dead page before releasing the current one. This ensures that
- * a concurrent btbulkdelete scan cannot pass our position --- if it
- * did, it might be able to reach and delete our target item before
- * we can find it again.
+ * move right. We are careful here to pin and read-lock the next
+ * non-dead page before releasing the current one. This ensures
+ * that a concurrent btbulkdelete scan cannot pass our position
+ * --- if it did, it might be able to reach and delete our target
+ * item before we can find it again.
*/
if (P_RIGHTMOST(opaque))
elog(ERROR, "failed to re-find previous key in \"%s\"",
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 457914adf7..80abe195ce 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.77 2003/07/29 22:18:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.78 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
/*
* Race -- the page we just grabbed may have split since we read
- * its pointer in the parent (or metapage). If it has, we may need
- * to move right to its new sibling. Do that.
+ * its pointer in the parent (or metapage). If it has, we may
+ * need to move right to its new sibling. Do that.
*/
*bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ);
@@ -87,14 +87,14 @@ _bt_search(Relation rel, int keysz, ScanKey scankey,
par_blkno = BufferGetBlockNumber(*bufP);
/*
- * We need to save the location of the index entry we chose in
- * the parent page on a stack. In case we split the tree, we'll
- * use the stack to work back up to the parent page. We also save
- * the actual downlink (TID) to uniquely identify the index entry,
- * in case it moves right while we're working lower in the
- * tree. See the paper by Lehman and Yao for how this is detected
- * and handled. (We use the child link to disambiguate duplicate
- * keys in the index -- Lehman and Yao disallow duplicate keys.)
+ * We need to save the location of the index entry we chose in the
+ * parent page on a stack. In case we split the tree, we'll use
+ * the stack to work back up to the parent page. We also save the
+ * actual downlink (TID) to uniquely identify the index entry, in
+ * case it moves right while we're working lower in the tree. See
+ * the paper by Lehman and Yao for how this is detected and
+ * handled. (We use the child link to disambiguate duplicate keys
+ * in the index -- Lehman and Yao disallow duplicate keys.)
*/
new_stack = (BTStack) palloc(sizeof(BTStackData));
new_stack->bts_blkno = par_blkno;
@@ -151,8 +151,8 @@ _bt_moveright(Relation rel,
* might not need to move right; have to scan the page first anyway.)
* It could even have split more than once, so scan as far as needed.
*
- * We also have to move right if we followed a link that brought us to
- * a dead page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
while (!P_RIGHTMOST(opaque) &&
(P_IGNORE(opaque) ||
@@ -599,8 +599,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* At this point we are positioned at the first item >= scan key, or
* possibly at the end of a page on which all the existing items are
- * less than the scan key and we know that everything on later
- * pages is greater than or equal to scan key.
+ * less than the scan key and we know that everything on later pages
+ * is greater than or equal to scan key.
*
* We could step forward in the latter case, but that'd be a waste of
* time if we want to scan backwards. So, it's now time to examine
@@ -851,7 +851,8 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
}
}
- else /* backwards scan */
+ else
+/* backwards scan */
{
if (offnum > P_FIRSTDATAKEY(opaque))
offnum = OffsetNumberPrev(offnum);
@@ -860,9 +861,9 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
/*
* Walk left to the next page with data. This is much more
* complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to it,
- * plus the possibility that the page we were on gets deleted
- * after we leave it. See nbtree/README for details.
+ * that the page to our left splits while we are in flight to
+ * it, plus the possibility that the page we were on gets
+ * deleted after we leave it. See nbtree/README for details.
*/
for (;;)
{
@@ -877,10 +878,11 @@ _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir)
}
page = BufferGetPage(*bufP);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
* Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and not empty. Else loop back
- * and do it all again.
+ * Done if it's not half-dead and not empty. Else loop
+ * back and do it all again.
*/
if (!P_IGNORE(opaque))
{
@@ -946,17 +948,18 @@ _bt_walk_left(Relation rel, Buffer buf)
buf = _bt_getbuf(rel, blkno, BT_READ);
page = BufferGetPage(buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+
/*
- * If this isn't the page we want, walk right till we find
- * what we want --- but go no more than four hops (an
- * arbitrary limit). If we don't find the correct page by then,
- * the most likely bet is that the original page got deleted
- * and isn't in the sibling chain at all anymore, not that its
- * left sibling got split more than four times.
+ * If this isn't the page we want, walk right till we find what we
+ * want --- but go no more than four hops (an arbitrary limit).
+ * If we don't find the correct page by then, the most likely bet
+ * is that the original page got deleted and isn't in the sibling
+ * chain at all anymore, not that its left sibling got split more
+ * than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE
- * here, because half-dead pages are still in the sibling
- * chain. Caller must reject half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
@@ -983,8 +986,8 @@ _bt_walk_left(Relation rel, Buffer buf)
if (P_ISDELETED(opaque))
{
/*
- * It was deleted. Move right to first nondeleted page
- * (there must be one); that is the page that has acquired the
+ * It was deleted. Move right to first nondeleted page (there
+ * must be one); that is the page that has acquired the
* deleted one's keyspace, so stepping left from it will take
* us where we want to be.
*/
@@ -1001,18 +1004,18 @@ _bt_walk_left(Relation rel, Buffer buf)
if (!P_ISDELETED(opaque))
break;
}
+
/*
- * Now return to top of loop, resetting obknum to
- * point to this nondeleted page, and try again.
+ * Now return to top of loop, resetting obknum to point to
+ * this nondeleted page, and try again.
*/
}
else
{
/*
- * It wasn't deleted; the explanation had better be
- * that the page to the left got split or deleted.
- * Without this check, we'd go into an infinite loop
- * if there's anything wrong.
+ * It wasn't deleted; the explanation had better be that the
+ * page to the left got split or deleted. Without this check,
+ * we'd go into an infinite loop if there's anything wrong.
*/
if (opaque->btpo_prev == lblkno)
elog(ERROR, "could not find left sibling in \"%s\"",
@@ -1028,7 +1031,7 @@ _bt_walk_left(Relation rel, Buffer buf)
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes ereport(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
@@ -1045,8 +1048,8 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
/*
* If we are looking for a leaf page, okay to descend from fast root;
- * otherwise better descend from true root. (There is no point in being
- * smarter about intermediate levels.)
+ * otherwise better descend from true root. (There is no point in
+ * being smarter about intermediate levels.)
*/
if (level == 0)
buf = _bt_getroot(rel, BT_READ);
@@ -1066,9 +1069,9 @@ _bt_get_endpoint(Relation rel, uint32 level, bool rightmost)
{
/*
* If we landed on a deleted page, step right to find a live page
- * (there must be one). Also, if we want the rightmost page,
- * step right if needed to get to it (this could happen if the
- * page split since we obtained a pointer to it).
+ * (there must be one). Also, if we want the rightmost page, step
+ * right if needed to get to it (this could happen if the page
+ * split since we obtained a pointer to it).
*/
while (P_IGNORE(opaque) ||
(rightmost && !P_RIGHTMOST(opaque)))
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 92a73021f6..f8eb671df7 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.74 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef struct BTPageState
static void _bt_blnewpage(Relation index, Buffer *buf, Page *page,
- uint32 level);
+ uint32 level);
static BTPageState *_bt_pagestate(Relation index, uint32 level);
static void _bt_slideleft(Relation index, Buffer buf, Page page);
static void _bt_sortaddtup(Page page, Size itemsize,
@@ -469,7 +469,7 @@ _bt_buildadd(Relation index, BTPageState *state, BTItem bti)
oopaque->btpo_next = BufferGetBlockNumber(nbuf);
nopaque->btpo_prev = BufferGetBlockNumber(obuf);
- nopaque->btpo_next = P_NONE; /* redundant */
+ nopaque->btpo_next = P_NONE; /* redundant */
}
/*
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index a1a52571fe..35e5ae6ccb 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.3 2003/02/23 22:43:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,10 +29,10 @@
typedef struct bt_incomplete_split
{
RelFileNode node; /* the index */
- BlockNumber leftblk; /* left half of split */
- BlockNumber rightblk; /* right half of split */
+ BlockNumber leftblk; /* left half of split */
+ BlockNumber rightblk; /* right half of split */
bool is_root; /* we split the root */
-} bt_incomplete_split;
+} bt_incomplete_split;
static List *incomplete_splits;
@@ -107,7 +107,7 @@ _bt_restore_page(Page page, char *from, int len)
}
static void
-_bt_restore_meta(Relation reln, XLogRecPtr lsn,
+_bt_restore_meta(Relation reln, XLogRecPtr lsn,
BlockNumber root, uint32 level,
BlockNumber fastroot, uint32 fastlevel)
{
@@ -172,7 +172,7 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (!redo || !(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(false, reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)));
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
@@ -183,13 +183,11 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo)
{
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
if (PageAddItem(page, (Item) datapos, datalen,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
@@ -204,13 +202,9 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
elog(PANIC, "btree_insert_undo: bad page LSN");
if (!P_ISLEAF(pageop))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
- {
elog(PANIC, "btree_insert_undo: unimplemented");
- }
}
}
@@ -226,8 +220,8 @@ btree_xlog_insert(bool redo, bool isleaf, bool ismeta,
if (redo && !isleaf && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
}
@@ -238,9 +232,9 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
{
xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
Relation reln;
- BlockNumber targetblk;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber targetblk;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -338,9 +332,7 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
elog(PANIC, "btree_split_redo: uninitialized next right page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -357,8 +349,8 @@ btree_xlog_split(bool redo, bool onleft, bool isroot,
if (redo && xlrec->level > 0 && incomplete_splits != NIL)
{
forget_matching_split(reln, xlrec->target.node,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
false);
}
@@ -422,10 +414,10 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
xl_btree_delete_page *xlrec = (xl_btree_delete_page *) XLogRecGetData(record);
Relation reln;
- BlockNumber parent;
- BlockNumber target;
- BlockNumber leftsib;
- BlockNumber rightsib;
+ BlockNumber parent;
+ BlockNumber target;
+ BlockNumber leftsib;
+ BlockNumber rightsib;
Buffer buffer;
Page page;
BTPageOpaque pageop;
@@ -451,9 +443,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized parent page");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
OffsetNumber poffset;
@@ -469,7 +459,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
{
ItemId itemid;
BTItem btitem;
- OffsetNumber nextoffset;
+ OffsetNumber nextoffset;
itemid = PageGetItemId(page, poffset);
btitem = (BTItem) PageGetItem(page, itemid);
@@ -494,9 +484,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized right sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -520,9 +508,7 @@ btree_xlog_delete_page(bool redo, bool ismeta,
if (PageIsNew((PageHeader) page))
elog(PANIC, "btree_delete_page_redo: uninitialized left sibling");
if (XLByteLE(lsn, PageGetLSN(page)))
- {
UnlockAndReleaseBuffer(buffer);
- }
else
{
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
@@ -799,116 +785,116 @@ btree_desc(char *buf, uint8 xl_info, char *rec)
switch (info)
{
case XLOG_BTREE_INSERT_LEAF:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_UPPER:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_upper: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_upper: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_INSERT_META:
- {
- xl_btree_insert *xlrec = (xl_btree_insert *) rec;
+ {
+ xl_btree_insert *xlrec = (xl_btree_insert *) rec;
- strcat(buf, "insert_meta: ");
- out_target(buf, &(xlrec->target));
- break;
- }
+ strcat(buf, "insert_meta: ");
+ out_target(buf, &(xlrec->target));
+ break;
+ }
case XLOG_BTREE_SPLIT_L:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_L_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_l_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_l_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_SPLIT_R_ROOT:
- {
- xl_btree_split *xlrec = (xl_btree_split *) rec;
+ {
+ xl_btree_split *xlrec = (xl_btree_split *) rec;
- strcat(buf, "split_r_root: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "split_r_root: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; oth %u; rgh %u",
+ xlrec->otherblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_DELETE:
- {
- xl_btree_delete *xlrec = (xl_btree_delete *) rec;
+ {
+ xl_btree_delete *xlrec = (xl_btree_delete *) rec;
- sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
- xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
- break;
- }
+ sprintf(buf + strlen(buf), "delete: node %u/%u; blk %u",
+ xlrec->node.tblNode, xlrec->node.relNode, xlrec->block);
+ break;
+ }
case XLOG_BTREE_DELETE_PAGE:
case XLOG_BTREE_DELETE_PAGE_META:
- {
- xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
+ {
+ xl_btree_delete_page *xlrec = (xl_btree_delete_page *) rec;
- strcat(buf, "delete_page: ");
- out_target(buf, &(xlrec->target));
- sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
- xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
- break;
- }
+ strcat(buf, "delete_page: ");
+ out_target(buf, &(xlrec->target));
+ sprintf(buf + strlen(buf), "; dead %u; left %u; right %u",
+ xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
+ break;
+ }
case XLOG_BTREE_NEWROOT:
- {
- xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
+ {
+ xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
- sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->rootblk, xlrec->level);
- break;
- }
+ sprintf(buf + strlen(buf), "newroot: node %u/%u; root %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->rootblk, xlrec->level);
+ break;
+ }
case XLOG_BTREE_NEWMETA:
- {
- xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
+ {
+ xl_btree_newmeta *xlrec = (xl_btree_newmeta *) rec;
- sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->meta.root, xlrec->meta.level,
- xlrec->meta.fastroot, xlrec->meta.fastlevel);
- break;
- }
+ sprintf(buf + strlen(buf), "newmeta: node %u/%u; root %u lev %u fast %u lev %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->meta.root, xlrec->meta.level,
+ xlrec->meta.fastroot, xlrec->meta.fastlevel);
+ break;
+ }
case XLOG_BTREE_NEWPAGE:
- {
- xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
+ {
+ xl_btree_newpage *xlrec = (xl_btree_newpage *) rec;
- sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
- xlrec->node.tblNode, xlrec->node.relNode,
- xlrec->blkno);
- break;
- }
+ sprintf(buf + strlen(buf), "newpage: node %u/%u; page %u",
+ xlrec->node.tblNode, xlrec->node.relNode,
+ xlrec->blkno);
+ break;
+ }
default:
strcat(buf, "UNKNOWN");
break;
diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c
index 6358d622e1..4362835d70 100644
--- a/src/backend/access/rtree/rtscan.c
+++ b/src/backend/access/rtree/rtscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.45 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.46 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -109,10 +109,10 @@ rtrescan(PG_FUNCTION_ARGS)
s->numberOfKeys * sizeof(ScanKeyData));
/*
- * Scans on internal pages use different operators than they
- * do on leaf pages. For example, if the user wants all boxes
- * that exactly match (x1,y1,x2,y2), then on internal pages we
- * need to find all boxes that contain (x1,y1,x2,y2).
+ * Scans on internal pages use different operators than they do on
+ * leaf pages. For example, if the user wants all boxes that
+ * exactly match (x1,y1,x2,y2), then on internal pages we need to
+ * find all boxes that contain (x1,y1,x2,y2).
*/
for (i = 0; i < s->numberOfKeys; i++)
{
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 3653d05bc1..6741e5436d 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.16 2003/06/11 22:37:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.17 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -73,7 +73,7 @@
static SlruCtlData ClogCtlData;
static SlruCtl ClogCtl = &ClogCtlData;
-
+
static int ZeroCLOGPage(int pageno, bool writeXlog);
static bool CLOGPagePrecedes(int page1, int page2);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 59af280802..444d2b97d7 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -3,7 +3,7 @@
*
* Resource managers definition
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.10 2003/02/21 00:06:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/rmgr.c,v 1.11 2003/08/04 00:43:15 momjian Exp $
*/
#include "postgres.h"
@@ -19,7 +19,7 @@
#include "commands/sequence.h"
-RmgrData RmgrTable[RM_MAX_ID+1] = {
+RmgrData RmgrTable[RM_MAX_ID + 1] = {
{"XLOG", xlog_redo, xlog_undo, xlog_desc, NULL, NULL},
{"Transaction", xact_redo, xact_undo, xact_desc, NULL, NULL},
{"Storage", smgr_redo, smgr_undo, smgr_desc, NULL, NULL},
@@ -32,7 +32,7 @@ RmgrData RmgrTable[RM_MAX_ID+1] = {
{"Reserved 9", NULL, NULL, NULL, NULL, NULL},
{"Heap", heap_redo, heap_undo, heap_desc, NULL, NULL},
{"Btree", btree_redo, btree_undo, btree_desc,
- btree_xlog_startup, btree_xlog_cleanup},
+ btree_xlog_startup, btree_xlog_cleanup},
{"Hash", hash_redo, hash_undo, hash_desc, NULL, NULL},
{"Rtree", rtree_redo, rtree_undo, rtree_desc, NULL, NULL},
{"Gist", gist_redo, gist_undo, gist_desc, NULL, NULL},
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index 5129dd3c7e..1c290f2cf5 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2003, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.3 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/slru.c,v 1.4 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,7 +93,7 @@ typedef enum
SLRU_PAGE_CLEAN, /* page is valid and not dirty */
SLRU_PAGE_DIRTY, /* page is valid but needs write */
SLRU_PAGE_WRITE_IN_PROGRESS /* page is being written out */
-} SlruPageStatus;
+} SlruPageStatus;
/*
* Shared-memory state
@@ -117,7 +117,7 @@ typedef struct SlruSharedData
* swapping out the latest page.
*/
int latest_page_number;
-} SlruSharedData;
+} SlruSharedData;
typedef SlruSharedData *SlruShared;
@@ -145,7 +145,7 @@ typedef enum
SLRU_SEEK_FAILED,
SLRU_READ_FAILED,
SLRU_WRITE_FAILED
-} SlruErrorCause;
+} SlruErrorCause;
static SlruErrorCause slru_errcause;
static int slru_errno;
@@ -166,9 +166,9 @@ SimpleLruShmemSize(void)
{
return MAXALIGN(sizeof(SlruSharedData)) + BLCKSZ * NUM_CLOG_BUFFERS
#ifdef EXEC_BACKEND
- + MAXALIGN(sizeof(SlruLockData))
+ + MAXALIGN(sizeof(SlruLockData))
#endif
- ;
+ ;
}
void
@@ -183,12 +183,14 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
shared = (SlruShared) ptr;
#ifdef EXEC_BACKEND
+
/*
* Locks are in shared memory
*/
- locks = (SlruLock)(ptr + MAXALIGN(sizeof(SlruSharedData)) +
- BLCKSZ * NUM_CLOG_BUFFERS);
+ locks = (SlruLock) (ptr + MAXALIGN(sizeof(SlruSharedData)) +
+ BLCKSZ * NUM_CLOG_BUFFERS);
#else
+
/*
* Locks are in private memory
*/
@@ -199,7 +201,7 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
if (!IsUnderPostmaster)
- /* Initialize locks and shared memory area */
+ /* Initialize locks and shared memory area */
{
char *bufptr;
int slotno;
@@ -210,8 +212,8 @@ SimpleLruInit(SlruCtl ctl, const char *name, const char *subdir)
memset(shared, 0, sizeof(SlruSharedData));
- bufptr = (char *)shared + MAXALIGN(sizeof(SlruSharedData));
-
+ bufptr = (char *) shared + MAXALIGN(sizeof(SlruSharedData));
+
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
locks->BufferLocks[slotno] = LWLockAssign();
@@ -247,7 +249,7 @@ int
SimpleLruZeroPage(SlruCtl ctl, int pageno)
{
int slotno;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Find a suitable buffer slot for the page */
slotno = SlruSelectLRUPage(ctl, pageno);
@@ -285,7 +287,7 @@ SimpleLruZeroPage(SlruCtl ctl, int pageno)
char *
SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid, bool forwrite)
{
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Outer loop handles restart if we lose the buffer to someone else */
for (;;)
@@ -383,7 +385,7 @@ SimpleLruWritePage(SlruCtl ctl, int slotno)
{
int pageno;
bool ok;
- SlruShared shared = (SlruShared) ctl->shared;
+ SlruShared shared = (SlruShared) ctl->shared;
/* Do nothing if page does not need writing */
if (shared->page_status[slotno] != SLRU_PAGE_DIRTY &&
@@ -539,13 +541,13 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno)
* possible for this to need to happen when writing a page that's not
* first in its segment; we assume the OS can cope with that. (Note:
* it might seem that it'd be okay to create files only when
- * SimpleLruZeroPage is called for the first page of a segment. However,
- * if after a crash and restart the REDO logic elects to replay the
- * log from a checkpoint before the latest one, then it's possible
- * that we will get commands to set transaction status of transactions
- * that have already been truncated from the commit log. Easiest way
- * to deal with that is to accept references to nonexistent files here
- * and in SlruPhysicalReadPage.)
+ * SimpleLruZeroPage is called for the first page of a segment.
+ * However, if after a crash and restart the REDO logic elects to
+ * replay the log from a checkpoint before the latest one, then it's
+ * possible that we will get commands to set transaction status of
+ * transactions that have already been truncated from the commit log.
+ * Easiest way to deal with that is to accept references to
+ * nonexistent files here and in SlruPhysicalReadPage.)
*/
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
@@ -608,37 +610,37 @@ SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid)
case SLRU_OPEN_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("open of file \"%s\" failed: %m",
path)));
break;
case SLRU_CREATE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
+ errmsg("could not access status of transaction %u", xid),
errdetail("creation of file \"%s\" failed: %m",
path)));
break;
case SLRU_SEEK_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("lseek of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("lseek of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("read of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("read of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not access status of transaction %u", xid),
- errdetail("write of file \"%s\", offset %u failed: %m",
- path, offset)));
+ errmsg("could not access status of transaction %u", xid),
+ errdetail("write of file \"%s\", offset %u failed: %m",
+ path, offset)));
break;
default:
/* can't get here, we trust */
@@ -665,6 +667,7 @@ static int
SlruSelectLRUPage(SlruCtl ctl, int pageno)
{
SlruShared shared = (SlruShared) ctl->shared;
+
/* Outer loop handles restart after I/O */
for (;;)
{
@@ -689,7 +692,7 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
if (shared->page_status[slotno] == SLRU_PAGE_EMPTY)
return slotno;
if (shared->page_lru_count[slotno] > bestcount &&
- shared->page_number[slotno] != shared->latest_page_number)
+ shared->page_number[slotno] != shared->latest_page_number)
{
bestslot = slotno;
bestcount = shared->page_lru_count[slotno];
@@ -705,12 +708,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it
* out, but it's possible in the worst case to have selected a
- * read-busy page. In that case we use SimpleLruReadPage to wait for
- * the read to complete.
+ * read-busy page. In that case we use SimpleLruReadPage to wait
+ * for the read to complete.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
(void) SimpleLruReadPage(ctl, shared->page_number[bestslot],
- InvalidTransactionId, false);
+ InvalidTransactionId, false);
else
SimpleLruWritePage(ctl, bestslot);
@@ -747,10 +750,11 @@ SimpleLruFlush(SlruCtl ctl, bool checkpoint)
for (slotno = 0; slotno < NUM_CLOG_BUFFERS; slotno++)
{
SimpleLruWritePage(ctl, slotno);
+
/*
- * When called during a checkpoint,
- * we cannot assert that the slot is clean now, since another
- * process might have re-dirtied it already. That's okay.
+ * When called during a checkpoint, we cannot assert that the slot
+ * is clean now, since another process might have re-dirtied it
+ * already. That's okay.
*/
Assert(checkpoint ||
shared->page_status[slotno] == SLRU_PAGE_EMPTY ||
@@ -792,10 +796,10 @@ SimpleLruTruncate(SlruCtl ctl, int cutoffPage)
CreateCheckPoint(false, true);
/*
- * Scan shared memory and remove any pages preceding the cutoff
- * page, to ensure we won't rewrite them later. (Any dirty pages
- * should have been flushed already during the checkpoint, we're just
- * being extra careful here.)
+ * Scan shared memory and remove any pages preceding the cutoff page,
+ * to ensure we won't rewrite them later. (Any dirty pages should
+ * have been flushed already during the checkpoint, we're just being
+ * extra careful here.)
*/
LWLockAcquire(ctl->locks->ControlLock, LW_EXCLUSIVE);
@@ -870,7 +874,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (cldir == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not open directory \"%s\": %m", ctl->Dir)));
errno = 0;
while ((clde = readdir(cldir)) != NULL)
@@ -898,7 +902,7 @@ SlruScanDirectory(SlruCtl ctl, int cutoffPage, bool doDeletions)
if (errno)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read directory \"%s\": %m", ctl->Dir)));
+ errmsg("could not read directory \"%s\": %m", ctl->Dir)));
closedir(cldir);
return found;
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 40b41519a9..550f2ae924 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.150 2003/08/04 00:43:15 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
@@ -92,7 +92,7 @@
* AbortTransactionBlock
*
* These are invoked only in response to a user "BEGIN WORK", "COMMIT",
- * or "ROLLBACK" command. The tricky part about these functions
+ * or "ROLLBACK" command. The tricky part about these functions
* is that they are called within the postgres main loop, in between
* the StartTransactionCommand() and CommitTransactionCommand().
*
@@ -197,8 +197,8 @@ static TransactionStateData CurrentTransactionStateData = {
0, /* scan command id */
0x0, /* start time */
TRANS_DEFAULT, /* transaction state */
- TBLOCK_DEFAULT /* transaction block state from
- the client perspective */
+ TBLOCK_DEFAULT /* transaction block state from the client
+ * perspective */
};
TransactionState CurrentTransactionState = &CurrentTransactionStateData;
@@ -359,7 +359,7 @@ GetCurrentTransactionStartTimeUsec(int *msec)
* TransactionIdIsCurrentTransactionId
*
* During bootstrap, we cheat and say "it's not my transaction ID" even though
- * it is. Along with transam.c's cheat to say that the bootstrap XID is
+ * it is. Along with transam.c's cheat to say that the bootstrap XID is
* already committed, this causes the tqual.c routines to see previously
* inserted tuples as committed, which is what we need during bootstrap.
*/
@@ -561,13 +561,13 @@ RecordTransactionCommit(void)
/*
* We must mark the transaction committed in clog if its XID
- * appears either in permanent rels or in local temporary rels.
- * We test this by seeing if we made transaction-controlled
- * entries *OR* local-rel tuple updates. Note that if we made
- * only the latter, we have not emitted an XLOG record for our
- * commit, and so in the event of a crash the clog update might be
- * lost. This is okay because no one else will ever care whether
- * we committed.
+ * appears either in permanent rels or in local temporary rels. We
+ * test this by seeing if we made transaction-controlled entries
+ * *OR* local-rel tuple updates. Note that if we made only the
+ * latter, we have not emitted an XLOG record for our commit, and
+ * so in the event of a crash the clog update might be lost. This
+ * is okay because no one else will ever care whether we
+ * committed.
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
TransactionIdCommit(xid);
@@ -755,9 +755,9 @@ AtAbort_Memory(void)
{
/*
* Make sure we are in a valid context (not a child of
- * TopTransactionContext...). Note that it is possible for this
- * code to be called when we aren't in a transaction at all; go
- * directly to TopMemoryContext in that case.
+ * TopTransactionContext...). Note that it is possible for this code
+ * to be called when we aren't in a transaction at all; go directly to
+ * TopMemoryContext in that case.
*/
if (TopTransactionContext != NULL)
{
@@ -891,8 +891,8 @@ CommitTransaction(void)
DeferredTriggerEndXact();
/*
- * Similarly, let ON COMMIT management do its thing before we start
- * to commit.
+ * Similarly, let ON COMMIT management do its thing before we start to
+ * commit.
*/
PreCommit_on_commit_actions();
@@ -953,10 +953,10 @@ CommitTransaction(void)
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is:
- * release resources visible to other backends (eg, files, buffer pins);
- * then release locks; then release backend-local resources. We want
- * to release locks at the point where any backend waiting for us will
- * see our transaction as being fully cleaned up.
+ * release resources visible to other backends (eg, files, buffer
+ * pins); then release locks; then release backend-local resources.
+ * We want to release locks at the point where any backend waiting for
+ * us will see our transaction as being fully cleaned up.
*/
smgrDoPendingDeletes(true);
@@ -1064,7 +1064,7 @@ AbortTransaction(void)
}
/*
- * Post-abort cleanup. See notes in CommitTransaction() concerning
+ * Post-abort cleanup. See notes in CommitTransaction() concerning
* ordering.
*/
@@ -1194,8 +1194,8 @@ StartTransactionCommand(void)
}
/*
- * We must switch to TopTransactionContext before returning. This
- * is already done if we called StartTransaction, otherwise not.
+ * We must switch to TopTransactionContext before returning. This is
+ * already done if we called StartTransaction, otherwise not.
*/
Assert(TopTransactionContext != NULL);
MemoryContextSwitchTo(TopTransactionContext);
@@ -1370,9 +1370,10 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s cannot run inside a transaction block",
stmtType)));
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1381,8 +1382,8 @@ PreventTransactionChain(void *stmtNode, const char *stmtType)
if (!MemoryContextContains(QueryContext, stmtNode))
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
- errmsg("%s cannot be executed from a function", stmtType)));
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
elog(ERROR, "cannot prevent transaction chain");
@@ -1414,6 +1415,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
*/
if (IsTransactionBlock())
return;
+
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
@@ -1423,7 +1425,7 @@ RequireTransactionChain(void *stmtNode, const char *stmtType)
return;
ereport(ERROR,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
- /* translator: %s represents an SQL statement name */
+ /* translator: %s represents an SQL statement name */
errmsg("%s may only be used in BEGIN/END transaction blocks",
stmtType)));
}
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 0ceb8951cb..45a2743ba9 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.120 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.121 2003/08/04 00:43:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1046,8 +1046,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
@@ -1162,8 +1162,8 @@ XLogWrite(XLogwrtRqst WriteRqst)
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
@@ -1266,7 +1266,7 @@ XLogFlush(XLogRecPtr record)
XLogCtlInsert *Insert = &XLogCtl->Insert;
uint32 freespace = INSERT_FREESPACE(Insert);
- if (freespace < SizeOfXLogRecord) /* buffer is full */
+ if (freespace < SizeOfXLogRecord) /* buffer is full */
WriteRqstPtr = XLogCtl->xlblocks[Insert->curridx];
else
{
@@ -1449,8 +1449,8 @@ XLogFileInit(uint32 log, uint32 seg,
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
@@ -1563,14 +1563,14 @@ XLogFileOpen(uint32 log, uint32 seg, bool econt)
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
- path, log, seg)));
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
return (fd);
@@ -1621,8 +1621,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (xldir == NULL)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLogDir)));
sprintf(lastoff, "%08X%08X", log, seg);
@@ -1654,15 +1654,15 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
true))
{
ereport(LOG,
- (errmsg("recycled transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
}
else
{
/* No need for any more future segments... */
ereport(LOG,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
}
@@ -1672,8 +1672,8 @@ MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr)
if (errno)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not read transaction log directory \"%s\": %m",
- XLogDir)));
+ errmsg("could not read transaction log directory \"%s\": %m",
+ XLogDir)));
closedir(xldir);
}
@@ -1746,8 +1746,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("bad resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
@@ -1769,8 +1769,8 @@ RecordIsValid(XLogRecord *record, XLogRecPtr recptr, int emode)
if (!EQ_CRC64(cbuf, crc))
{
ereport(emode,
- (errmsg("bad checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff)));
+ (errmsg("bad checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
@@ -1931,7 +1931,7 @@ got_record:;
{
ereport(emode,
(errmsg("invalid resource manager id %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
nextRecord = NULL;
@@ -2063,7 +2063,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
{
ereport(emode,
(errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
readId, readSeg, readOff)));
return false;
}
@@ -2084,7 +2084,7 @@ ValidXLOGHeader(XLogPageHeader hdr, int emode, bool checkSUI)
hdr->xlp_sui > lastReadSUI + 512)
{
ereport(emode,
- /* translator: SUI = startup id */
+ /* translator: SUI = startup id */
(errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
hdr->xlp_sui, lastReadSUI,
readId, readSeg, readOff)));
@@ -2235,8 +2235,8 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
- " but the server was compiled with PG_CONTROL_VERSION %d.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC64(crc);
@@ -2265,75 +2265,75 @@ ReadControlFile(void)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
- " but the server was compiled with CATALOG_VERSION_NO %d.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
errhint("It looks like you need to initdb.")));
if (ControlFile->blcksz != BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with BLCKSZ %d,"
- " but the server was compiled with BLCKSZ %d.",
- ControlFile->blcksz, BLCKSZ),
- errhint("It looks like you need to recompile or initdb.")));
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
- " but the server was compiled with RELSEG_SIZE %d.",
+ " but the server was compiled with RELSEG_SIZE %d.",
ControlFile->relseg_size, RELSEG_SIZE),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with NAMEDATALEN %d,"
- " but the server was compiled with NAMEDATALEN %d.",
+ " but the server was compiled with NAMEDATALEN %d.",
ControlFile->nameDataLen, NAMEDATALEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
- " but the server was compiled with FUNC_MAX_ARGS %d.",
+ " but the server was compiled with FUNC_MAX_ARGS %d.",
ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
- " but the server was compiled with HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
- " but the server was compiled without HAVE_INT64_TIMESTAMP."),
- errhint("It looks like you need to recompile or initdb.")));
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
- " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
- errhint("It looks like you need to recompile or initdb.")));
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_collate),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
ereport(FATAL,
- (errmsg("database files are incompatible with operating system"),
- errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
- " which is not recognized by setlocale().",
- ControlFile->lc_ctype),
- errhint("It looks like you need to initdb or install locale support.")));
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
@@ -2602,10 +2602,10 @@ StartupXLOG(void)
str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
- errhint("This probably means that some data is corrupted and"
- " you will have to use the last backup for recovery.")));
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
@@ -2637,12 +2637,12 @@ StartupXLOG(void)
checkPointLoc = ControlFile->prevCheckPoint;
ereport(LOG,
(errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
@@ -2665,11 +2665,12 @@ StartupXLOG(void)
ShmemVariableCache->oidCount = 0;
/*
- * If it was a shutdown checkpoint, then any following WAL entries were
- * created under the next StartUpID; if it was a regular checkpoint then
- * any following WAL entries were created under the same StartUpID.
- * We must replay WAL entries using the same StartUpID they were created
- * under, so temporarily adopt that SUI (see also xlog_redo()).
+ * If it was a shutdown checkpoint, then any following WAL entries
+ * were created under the next StartUpID; if it was a regular
+ * checkpoint then any following WAL entries were created under the
+ * same StartUpID. We must replay WAL entries using the same StartUpID
+ * they were created under, so temporarily adopt that SUI (see also
+ * xlog_redo()).
*/
if (wasShutdown)
ThisStartUpID = checkPoint.ThisStartUpID + 1;
@@ -2690,7 +2691,7 @@ StartupXLOG(void)
{
if (wasShutdown)
ereport(PANIC,
- (errmsg("invalid redo/undo record in shutdown checkpoint")));
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
@@ -2699,7 +2700,7 @@ StartupXLOG(void)
/* REDO */
if (InRecovery)
{
- int rmid;
+ int rmid;
ereport(LOG,
(errmsg("database system was not properly shut down; "
@@ -2791,8 +2792,8 @@ StartupXLOG(void)
/*
* Tricky point here: readBuf contains the *last* block that the
- * LastRec record spans, not the one it starts in. The last block
- * is indeed the one we want to use.
+ * LastRec record spans, not the one it starts in. The last block is
+ * indeed the one we want to use.
*/
Assert(readOff == (XLogCtl->xlblocks[0].xrecoff - BLCKSZ) % XLogSegSize);
memcpy((char *) Insert->currpage, readBuf, BLCKSZ);
@@ -2818,11 +2819,12 @@ StartupXLOG(void)
else
{
/*
- * Whenever Write.LogwrtResult points to exactly the end of a page,
- * Write.curridx must point to the *next* page (see XLogWrite()).
+ * Whenever Write.LogwrtResult points to exactly the end of a
+ * page, Write.curridx must point to the *next* page (see
+ * XLogWrite()).
*
- * Note: it might seem we should do AdvanceXLInsertBuffer() here,
- * but we can't since we haven't yet determined the correct StartUpID
+ * Note: it might seem we should do AdvanceXLInsertBuffer() here, but
+ * we can't since we haven't yet determined the correct StartUpID
* to put into the new page's header. The first actual attempt to
* insert a log record will advance the insert state.
*/
@@ -2859,7 +2861,7 @@ StartupXLOG(void)
if (InRecovery)
{
- int rmid;
+ int rmid;
/*
* Allow resource managers to do any required cleanup.
@@ -2885,14 +2887,15 @@ StartupXLOG(void)
ThisStartUpID = ControlFile->checkPointCopy.ThisStartUpID;
/*
- * Perform a new checkpoint to update our recovery activity to disk.
+ * Perform a new checkpoint to update our recovery activity to
+ * disk.
*
* Note that we write a shutdown checkpoint. This is correct since
- * the records following it will use SUI one more than what is shown
- * in the checkpoint's ThisStartUpID.
+ * the records following it will use SUI one more than what is
+ * shown in the checkpoint's ThisStartUpID.
*
- * In case we had to use the secondary checkpoint, make sure that
- * it will still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -2907,10 +2910,10 @@ StartupXLOG(void)
else
{
/*
- * If we are not doing recovery, then we saw a checkpoint with nothing
- * after it, and we can safely use StartUpID equal to one more than
- * the checkpoint's SUI. But just for paranoia's sake, check against
- * pg_control too.
+ * If we are not doing recovery, then we saw a checkpoint with
+ * nothing after it, and we can safely use StartUpID equal to one
+ * more than the checkpoint's SUI. But just for paranoia's sake,
+ * check against pg_control too.
*/
ThisStartUpID = checkPoint.ThisStartUpID;
if (ThisStartUpID < ControlFile->checkPointCopy.ThisStartUpID)
@@ -2923,7 +2926,8 @@ StartupXLOG(void)
PreallocXlogFiles(EndOfLog);
/*
- * Advance StartUpID to one more than the highest value used previously.
+ * Advance StartUpID to one more than the highest value used
+ * previously.
*/
ThisStartUpID++;
XLogCtl->ThisStartUpID = ThisStartUpID;
@@ -2973,9 +2977,9 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (!XRecOffIsValid(RecPtr.xrecoff))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint link in control file",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
@@ -2984,34 +2988,34 @@ ReadCheckpointRecord(XLogRecPtr RecPtr,
if (record == NULL)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_rmid != RM_XLOG_ID)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
- (errmsg("invalid resource manager id in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid resource manager id in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
record->xl_info != XLOG_CHECKPOINT_ONLINE)
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid xl_info in %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_len != sizeof(CheckPoint))
{
ereport(LOG,
- /* translator: %s is "primary" or "secondary" */
+ /* translator: %s is "primary" or "secondary" */
(errmsg("invalid length of %s checkpoint record",
- (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
return record;
@@ -3112,10 +3116,11 @@ CreateCheckPoint(bool shutdown, bool force)
if (MyXactMadeXLogEntry)
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("checkpoint cannot be made inside transaction block")));
+ errmsg("checkpoint cannot be made inside transaction block")));
/*
- * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
+ * Acquire CheckpointLock to ensure only one checkpoint happens at a
+ * time.
*
* The CheckpointLock can be held for quite a while, which is not good
* because we won't respond to a cancel/die request while waiting for
@@ -3149,14 +3154,15 @@ CreateCheckPoint(bool shutdown, bool force)
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
/*
- * If this isn't a shutdown or forced checkpoint, and we have not inserted
- * any XLOG records since the start of the last checkpoint, skip the
- * checkpoint. The idea here is to avoid inserting duplicate checkpoints
- * when the system is idle. That wastes log space, and more importantly it
- * exposes us to possible loss of both current and previous checkpoint
- * records if the machine crashes just as we're writing the update.
- * (Perhaps it'd make even more sense to checkpoint only when the previous
- * checkpoint record is in a different xlog page?)
+ * If this isn't a shutdown or forced checkpoint, and we have not
+ * inserted any XLOG records since the start of the last checkpoint,
+ * skip the checkpoint. The idea here is to avoid inserting duplicate
+ * checkpoints when the system is idle. That wastes log space, and
+ * more importantly it exposes us to possible loss of both current and
+ * previous checkpoint records if the machine crashes just as we're
+ * writing the update. (Perhaps it'd make even more sense to
+ * checkpoint only when the previous checkpoint record is in a
+ * different xlog page?)
*
* We have to make two tests to determine that nothing has happened since
* the start of the last checkpoint: current insertion point must
@@ -3204,12 +3210,13 @@ CreateCheckPoint(bool shutdown, bool force)
* Here we update the shared RedoRecPtr for future XLogInsert calls;
* this must be done while holding the insert lock AND the info_lck.
*
- * Note: if we fail to complete the checkpoint, RedoRecPtr will be
- * left pointing past where it really needs to point. This is okay;
- * the only consequence is that XLogInsert might back up whole buffers
- * that it didn't really need to. We can't postpone advancing RedoRecPtr
- * because XLogInserts that happen while we are dumping buffers must
- * assume that their buffer changes are not included in the checkpoint.
+ * Note: if we fail to complete the checkpoint, RedoRecPtr will be left
+ * pointing past where it really needs to point. This is okay; the
+ * only consequence is that XLogInsert might back up whole buffers
+ * that it didn't really need to. We can't postpone advancing
+ * RedoRecPtr because XLogInserts that happen while we are dumping
+ * buffers must assume that their buffer changes are not included in
+ * the checkpoint.
*/
{
/* use volatile pointer to prevent code rearrangement */
@@ -3538,15 +3545,15 @@ assign_xlog_sync_method(const char *method, bool doit, bool interactive)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
@@ -3570,16 +3577,16 @@ issue_xlog_fsync(void)
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("fdatasync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg)));
+ errmsg("fdatasync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN:
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index b02fa775de..328f2ab9b3 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.163 2003/07/27 21:49:53 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.164 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,7 +238,7 @@ BootstrapMain(int argc, char *argv[])
*
* If we are running under the postmaster, this is done already.
*/
- if (!IsUnderPostmaster /* when exec || ExecBackend */)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
MemoryContextInit();
/*
@@ -247,7 +247,7 @@ BootstrapMain(int argc, char *argv[])
/* Set defaults, to be overriden by explicit options below */
dbname = NULL;
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
InitializeGUCOptions();
potential_DataDir = getenv("PGDATA"); /* Null if no PGDATA
@@ -285,22 +285,22 @@ BootstrapMain(int argc, char *argv[])
xlogop = atoi(optarg);
break;
case 'p':
- {
- /* indicates fork from postmaster */
+ {
+ /* indicates fork from postmaster */
#ifdef EXEC_BACKEND
- char *p;
-
- sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
- p = strchr(optarg, ',');
- if (p)
- p = strchr(p+1, ',');
- if (p)
- dbname = strdup(p+1);
+ char *p;
+
+ sscanf(optarg, "%d,%p,", &UsedShmemSegID, &UsedShmemSegAddr);
+ p = strchr(optarg, ',');
+ if (p)
+ p = strchr(p + 1, ',');
+ if (p)
+ dbname = strdup(p + 1);
#else
- dbname = strdup(optarg);
+ dbname = strdup(optarg);
#endif
- break;
- }
+ break;
+ }
case 'B':
SetConfigOption("shared_buffers", optarg, PGC_POSTMASTER, PGC_S_ARGV);
break;
@@ -346,12 +346,10 @@ BootstrapMain(int argc, char *argv[])
usage();
- if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */)
- {
+ if (IsUnderPostmaster && ExecBackend && MyProc /* ordinary backend */ )
AttachSharedMemoryAndSemaphores();
- }
-
- if (!IsUnderPostmaster /* when exec || ExecBackend*/)
+
+ if (!IsUnderPostmaster /* when exec || ExecBackend */ )
{
if (!potential_DataDir)
{
@@ -473,8 +471,8 @@ BootstrapMain(int argc, char *argv[])
/*
* In NOP mode, all we really want to do is create shared memory and
- * semaphores (just to prove we can do it with the current GUC settings).
- * So, quit now.
+ * semaphores (just to prove we can do it with the current GUC
+ * settings). So, quit now.
*/
if (xlogop == BS_XLOG_NOP)
proc_exit(0);
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 77b1d3b2d7..7ace67de6b 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.85 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.86 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* See acl.h.
@@ -97,37 +97,40 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
if (grantee->username)
{
- aclitem.ai_grantee = get_usesysid(grantee->username);
+ aclitem. ai_grantee = get_usesysid(grantee->username);
+
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
- aclitem.ai_grantee = get_grosysid(grantee->groupname);
+ aclitem. ai_grantee = get_grosysid(grantee->groupname);
+
idtype = ACL_IDTYPE_GID;
}
else
{
- aclitem.ai_grantee = ACL_ID_WORLD;
+ aclitem. ai_grantee = ACL_ID_WORLD;
+
idtype = ACL_IDTYPE_WORLD;
}
/*
* Grant options can only be granted to individual users, not
- * groups or public. The reason is that if a user would
- * re-grant a privilege that he held through a group having a
- * grant option, and later the user is removed from the group,
- * the situation is impossible to clean up.
+ * groups or public. The reason is that if a user would re-grant
+ * a privilege that he held through a group having a grant option,
+ * and later the user is removed from the group, the situation is
+ * impossible to clean up.
*/
if (is_grant && idtype != ACL_IDTYPE_UID && grant_option)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to individual users")));
- aclitem.ai_grantor = GetUserId();
+ aclitem. ai_grantor = GetUserId();
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
- (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
- (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
+ (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
+ (grant_option || !is_grant) ? privileges : ACL_NO_RIGHTS,
idtype);
new_acl = aclinsert3(new_acl, &aclitem, modechg, behavior);
@@ -247,7 +250,7 @@ ExecuteGrantStmt_Relation(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -346,7 +349,7 @@ ExecuteGrantStmt_Database(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -443,7 +446,7 @@ ExecuteGrantStmt_Function(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -543,7 +546,7 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -619,7 +622,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
pg_namespace_tuple = (Form_pg_namespace) GETSTRUCT(tuple);
if (stmt->is_grant
- && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
+ && !pg_namespace_ownercheck(HeapTupleGetOid(tuple), GetUserId())
&& pg_namespace_aclcheck(HeapTupleGetOid(tuple), GetUserId(), ACL_GRANT_OPTION_FOR(privileges)) != ACLCHECK_OK)
aclcheck_error(ACLCHECK_NO_PRIV, ACL_KIND_NAMESPACE,
nspname);
@@ -640,7 +643,7 @@ ExecuteGrantStmt_Namespace(GrantStmt *stmt)
new_acl = merge_acl_with_grant(old_acl, stmt->is_grant,
stmt->grantees, privileges,
- stmt->grant_option, stmt->behavior);
+ stmt->grant_option, stmt->behavior);
/* finished building new ACL value, now insert it */
MemSet(values, 0, sizeof(values));
@@ -805,7 +808,7 @@ in_group(AclId uid, AclId gid)
static AclResult
aclcheck(Acl *acl, AclId userid, AclMode mode)
{
- AclItem *aidat;
+ AclItem *aidat;
int i,
num;
@@ -833,10 +836,10 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
if (aidat[i].ai_privs & mode)
return ACLCHECK_OK;
}
-
+
/*
- * See if he has the permission via any group (do this in a
- * separate pass to avoid expensive(?) lookups in pg_group)
+ * See if he has the permission via any group (do this in a separate
+ * pass to avoid expensive(?) lookups in pg_group)
*/
for (i = 0; i < num; i++)
if (ACLITEM_GET_IDTYPE(aidat[i]) == ACL_IDTYPE_GID
@@ -856,7 +859,7 @@ aclcheck(Acl *acl, AclId userid, AclMode mode)
* supply strings that might be already quoted.
*/
-static const char * const no_priv_msg[MAX_ACL_KIND] =
+static const char *const no_priv_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("permission denied for relation %s"),
@@ -878,7 +881,7 @@ static const char * const no_priv_msg[MAX_ACL_KIND] =
gettext_noop("permission denied for conversion %s")
};
-static const char * const not_owner_msg[MAX_ACL_KIND] =
+static const char *const not_owner_msg[MAX_ACL_KIND] =
{
/* ACL_KIND_CLASS */
gettext_noop("must be owner of relation %s"),
@@ -972,7 +975,7 @@ pg_class_aclcheck(Oid table_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", table_oid)));
+ errmsg("relation with OID %u does not exist", table_oid)));
/*
* Deny anyone permission to update a system catalog unless
@@ -1124,7 +1127,7 @@ pg_proc_aclcheck(Oid proc_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
aclDatum = SysCacheGetAttr(PROCOID, tuple, Anum_pg_proc_proacl,
&isNull);
@@ -1179,7 +1182,7 @@ pg_language_aclcheck(Oid lang_oid, AclId userid, AclMode mode)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("language with OID %u does not exist", lang_oid)));
+ errmsg("language with OID %u does not exist", lang_oid)));
aclDatum = SysCacheGetAttr(LANGOID, tuple, Anum_pg_language_lanacl,
&isNull);
@@ -1288,7 +1291,7 @@ pg_class_ownercheck(Oid class_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation with OID %u does not exist", class_oid)));
+ errmsg("relation with OID %u does not exist", class_oid)));
owner_id = ((Form_pg_class) GETSTRUCT(tuple))->relowner;
@@ -1344,7 +1347,7 @@ pg_oper_ownercheck(Oid oper_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator with OID %u does not exist", oper_oid)));
+ errmsg("operator with OID %u does not exist", oper_oid)));
owner_id = ((Form_pg_operator) GETSTRUCT(tuple))->oprowner;
@@ -1372,7 +1375,7 @@ pg_proc_ownercheck(Oid proc_oid, AclId userid)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function with OID %u does not exist", proc_oid)));
+ errmsg("function with OID %u does not exist", proc_oid)));
owner_id = ((Form_pg_proc) GETSTRUCT(tuple))->proowner;
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 2cdf4bc229..251fb82d81 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.28 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/dependency.c,v 1.29 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,8 +93,8 @@ static Oid object_classes[MAX_OCLASS];
static void findAutoDeletableObjects(const ObjectAddress *object,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static bool recursiveDeletion(const ObjectAddress *object,
DropBehavior behavior,
int msglevel,
@@ -102,11 +102,11 @@ static bool recursiveDeletion(const ObjectAddress *object,
ObjectAddresses *oktodelete,
Relation depRel);
static bool deleteDependentObjects(const ObjectAddress *object,
- const char *objDescription,
- DropBehavior behavior,
- int msglevel,
- ObjectAddresses *oktodelete,
- Relation depRel);
+ const char *objDescription,
+ DropBehavior behavior,
+ int msglevel,
+ ObjectAddresses *oktodelete,
+ Relation depRel);
static void doDeletion(const ObjectAddress *object);
static bool find_expr_references_walker(Node *node,
find_expr_references_context *context);
@@ -118,7 +118,7 @@ static void add_object_address(ObjectClasses oclass, Oid objectId, int32 subId,
static void add_exact_object_address(const ObjectAddress *object,
ObjectAddresses *addrs);
static bool object_address_present(const ObjectAddress *object,
- ObjectAddresses *addrs);
+ ObjectAddresses *addrs);
static void term_object_addresses(ObjectAddresses *addrs);
static void init_object_classes(void);
static ObjectClasses getObjectClass(const ObjectAddress *object);
@@ -158,9 +158,9 @@ performDeletion(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -170,8 +170,8 @@ performDeletion(const ObjectAddress *object,
NULL, &oktodelete, depRel))
ereport(ERROR,
(errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because other objects depend on it",
- objDescription),
+ errmsg("cannot drop %s because other objects depend on it",
+ objDescription),
errhint("Use DROP ... CASCADE to drop the dependent objects too.")));
term_object_addresses(&oktodelete);
@@ -184,7 +184,7 @@ performDeletion(const ObjectAddress *object,
/*
* deleteWhatDependsOn: attempt to drop everything that depends on the
- * specified object, though not the object itself. Behavior is always
+ * specified object, though not the object itself. Behavior is always
* CASCADE.
*
* This is currently used only to clean out the contents of a schema
@@ -212,9 +212,9 @@ deleteWhatDependsOn(const ObjectAddress *object,
/*
* Construct a list of objects that are reachable by AUTO or INTERNAL
- * dependencies from the target object. These should be deleted silently,
- * even if the actual deletion pass first reaches one of them via a
- * non-auto dependency.
+ * dependencies from the target object. These should be deleted
+ * silently, even if the actual deletion pass first reaches one of
+ * them via a non-auto dependency.
*/
init_object_addresses(&oktodelete);
@@ -266,9 +266,9 @@ findAutoDeletableObjects(const ObjectAddress *object,
ObjectAddress otherObject;
/*
- * If this object is already in oktodelete, then we already visited it;
- * don't do so again (this prevents infinite recursion if there's a loop
- * in pg_depend). Otherwise, add it.
+ * If this object is already in oktodelete, then we already visited
+ * it; don't do so again (this prevents infinite recursion if there's
+ * a loop in pg_depend). Otherwise, add it.
*/
if (object_address_present(object, oktodelete))
return;
@@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
/*
* Scan pg_depend records that link to this object, showing the things
- * that depend on it. For each one that is AUTO or INTERNAL, visit the
- * referencing object.
+ * that depend on it. For each one that is AUTO or INTERNAL, visit
+ * the referencing object.
*
* When dropping a whole object (subId = 0), find pg_depend records for
* its sub-objects too.
@@ -319,6 +319,7 @@ findAutoDeletableObjects(const ObjectAddress *object,
findAutoDeletableObjects(&otherObject, oktodelete, depRel);
break;
case DEPENDENCY_PIN:
+
/*
* For a PIN dependency we just ereport immediately; there
* won't be any others to examine, and we aren't ever
@@ -461,11 +462,11 @@ recursiveDeletion(const ObjectAddress *object,
char *otherObjDesc = getObjectDescription(&otherObject);
ereport(ERROR,
- (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
- errmsg("cannot drop %s because %s requires it",
- objDescription, otherObjDesc),
- errhint("You may drop %s instead.",
- otherObjDesc)));
+ (errcode(ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST),
+ errmsg("cannot drop %s because %s requires it",
+ objDescription, otherObjDesc),
+ errhint("You may drop %s instead.",
+ otherObjDesc)));
}
/*
@@ -559,10 +560,9 @@ recursiveDeletion(const ObjectAddress *object,
/*
* Step 2: scan pg_depend records that link to this object, showing
* the things that depend on it. Recursively delete those things.
- * Note it's important to delete the dependent objects
- * before the referenced one, since the deletion routines might do
- * things like try to update the pg_class record when deleting a check
- * constraint.
+ * Note it's important to delete the dependent objects before the
+ * referenced one, since the deletion routines might do things like
+ * try to update the pg_class record when deleting a check constraint.
*/
if (!deleteDependentObjects(object, objDescription,
behavior, msglevel,
@@ -674,11 +674,12 @@ deleteDependentObjects(const ObjectAddress *object,
switch (foundDep->deptype)
{
case DEPENDENCY_NORMAL:
+
/*
* Perhaps there was another dependency path that would
- * have allowed silent deletion of the otherObject, had
- * we only taken that path first.
- * In that case, act like this link is AUTO, too.
+ * have allowed silent deletion of the otherObject, had we
+ * only taken that path first. In that case, act like this
+ * link is AUTO, too.
*/
if (object_address_present(&otherObject, oktodelete))
ereport(DEBUG2,
@@ -872,7 +873,7 @@ recordDependencyOnExpr(const ObjectAddress *depender,
* recordDependencyOnSingleRelExpr - find expression dependencies
*
* As above, but only one relation is expected to be referenced (with
- * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
+ * varno = 1 and varlevelsup = 0). Pass the relation OID instead of a
* range table. An additional frammish is that dependencies on that
* relation (or its component columns) will be marked with 'self_behavior',
* whereas 'behavior' is used for everything else.
@@ -1001,7 +1002,7 @@ find_expr_references_walker(Node *node,
else if (rte->rtekind == RTE_JOIN)
{
/* Scan join output column to add references to join inputs */
- List *save_rtables;
+ List *save_rtables;
/* We must make the context appropriate for join's level */
save_rtables = context->rtables;
@@ -1026,7 +1027,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, OpExpr))
{
- OpExpr *opexpr = (OpExpr *) node;
+ OpExpr *opexpr = (OpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1034,7 +1035,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, DistinctExpr))
{
- DistinctExpr *distinctexpr = (DistinctExpr *) node;
+ DistinctExpr *distinctexpr = (DistinctExpr *) node;
add_object_address(OCLASS_OPERATOR, distinctexpr->opno, 0,
&context->addrs);
@@ -1042,7 +1043,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, ScalarArrayOpExpr))
{
- ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
+ ScalarArrayOpExpr *opexpr = (ScalarArrayOpExpr *) node;
add_object_address(OCLASS_OPERATOR, opexpr->opno, 0,
&context->addrs);
@@ -1066,7 +1067,7 @@ find_expr_references_walker(Node *node,
}
if (IsA(node, SubLink))
{
- SubLink *sublink = (SubLink *) node;
+ SubLink *sublink = (SubLink *) node;
List *opid;
foreach(opid, sublink->operOids)
@@ -1092,7 +1093,8 @@ find_expr_references_walker(Node *node,
* Add whole-relation refs for each plain relation mentioned in
* the subquery's rtable. (Note: query_tree_walker takes care of
* recursing into RTE_FUNCTION and RTE_SUBQUERY RTEs, so no need
- * to do that here. But keep it from looking at join alias lists.)
+ * to do that here. But keep it from looking at join alias
+ * lists.)
*/
foreach(rtable, query->rtable)
{
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 15dbc50a13..c8a411646f 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.249 2003/07/29 17:21:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/heap.c,v 1.250 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -418,8 +418,8 @@ CheckAttributeType(const char *attname, Oid atttypid)
* Warn user, but don't fail, if column to be created has UNKNOWN type
* (usually as a result of a 'retrieve into' - jolly)
*
- * Refuse any attempt to create a pseudo-type column or one that uses
- * a standalone composite type. (Eventually we should probably refuse
+ * Refuse any attempt to create a pseudo-type column or one that uses a
+ * standalone composite type. (Eventually we should probably refuse
* all references to complex types, but for now there's still some
* Berkeley-derived code that thinks it can do this...)
*/
@@ -439,7 +439,7 @@ CheckAttributeType(const char *attname, Oid atttypid)
}
else if (att_typtype == 'c')
{
- Oid typrelid = get_typ_typrelid(atttypid);
+ Oid typrelid = get_typ_typrelid(atttypid);
if (get_rel_relkind(typrelid) == RELKIND_COMPOSITE_TYPE)
ereport(ERROR,
@@ -975,12 +975,13 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
attStruct->attisdropped = true;
/*
- * Set the type OID to invalid. A dropped attribute's type link cannot
- * be relied on (once the attribute is dropped, the type might be too).
- * Fortunately we do not need the type row --- the only really essential
- * information is the type's typlen and typalign, which are preserved in
- * the attribute's attlen and attalign. We set atttypid to zero here
- * as a means of catching code that incorrectly expects it to be valid.
+ * Set the type OID to invalid. A dropped attribute's type link
+ * cannot be relied on (once the attribute is dropped, the type might
+ * be too). Fortunately we do not need the type row --- the only
+ * really essential information is the type's typlen and typalign,
+ * which are preserved in the attribute's attlen and attalign. We set
+ * atttypid to zero here as a means of catching code that incorrectly
+ * expects it to be valid.
*/
attStruct->atttypid = InvalidOid;
@@ -1401,7 +1402,7 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
ccsrc); /* Source form check constraint */
@@ -1568,8 +1569,8 @@ AddRelationRawConstraints(Relation rel,
if (strcmp(cdef2->name, ccname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("CHECK constraint \"%s\" already exists",
- ccname)));
+ errmsg("CHECK constraint \"%s\" already exists",
+ ccname)));
}
}
else
@@ -1639,7 +1640,7 @@ AddRelationRawConstraints(Relation rel,
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in CHECK constraint")));
+ errmsg("cannot use sub-select in CHECK constraint")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
@@ -1750,7 +1751,7 @@ cookDefault(ParseState *pstate,
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use column references in DEFAULT clause")));
+ errmsg("cannot use column references in DEFAULT clause")));
/*
* It can't return a set either.
@@ -1773,9 +1774,9 @@ cookDefault(ParseState *pstate,
errmsg("cannot use aggregate in DEFAULT clause")));
/*
- * Coerce the expression to the correct type and typmod, if given. This
- * should match the parser's processing of non-defaulted expressions ---
- * see updateTargetListEntry().
+ * Coerce the expression to the correct type and typmod, if given.
+ * This should match the parser's processing of non-defaulted
+ * expressions --- see updateTargetListEntry().
*/
if (OidIsValid(atttypid))
{
@@ -1793,7 +1794,7 @@ cookDefault(ParseState *pstate,
attname,
format_type_be(atttypid),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return expr;
@@ -1952,7 +1953,7 @@ RelationTruncateIndexes(Oid heapId)
/*
* index_build will close both the heap and index relations (but
- * not give up the locks we hold on them). We're done with this
+ * not give up the locks we hold on them). We're done with this
* index, but we must re-open the heap rel.
*/
heapRelation = heap_open(heapId, NoLock);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index cb5a78c3da..0b03c630b5 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.212 2003/07/21 01:59:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.213 2003/08/04 00:43:16 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -65,8 +65,8 @@
/* non-export function prototypes */
static TupleDesc ConstructTupleDescriptor(Relation heapRelation,
- IndexInfo *indexInfo,
- Oid *classObjectId);
+ IndexInfo *indexInfo,
+ Oid *classObjectId);
static void UpdateRelationRelation(Relation indexRelation);
static void InitializeAttributeOids(Relation indexRelation,
int numatts, Oid indexoid);
@@ -124,7 +124,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/*
* For simple index columns, we copy the pg_attribute row from the
- * parent relation and modify it as necessary. For expressions we
+ * parent relation and modify it as necessary. For expressions we
* have to cons up a pg_attribute row the hard way.
*/
for (i = 0; i < numatts; i++)
@@ -149,7 +149,7 @@ ConstructTupleDescriptor(Relation heapRelation,
* here we are indexing on a system attribute (-1...-n)
*/
from = SystemAttributeDefinition(atnum,
- heapRelation->rd_rel->relhasoids);
+ heapRelation->rd_rel->relhasoids);
}
else
{
@@ -162,8 +162,8 @@ ConstructTupleDescriptor(Relation heapRelation,
}
/*
- * now that we've determined the "from", let's copy the tuple desc
- * data...
+ * now that we've determined the "from", let's copy the tuple
+ * desc data...
*/
memcpy(to, from, ATTRIBUTE_TUPLE_SIZE);
@@ -185,7 +185,7 @@ ConstructTupleDescriptor(Relation heapRelation,
/* Expressional index */
Node *indexkey;
- if (indexprs == NIL) /* shouldn't happen */
+ if (indexprs == NIL) /* shouldn't happen */
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexprs);
indexprs = lnext(indexprs);
@@ -197,7 +197,8 @@ ConstructTupleDescriptor(Relation heapRelation,
sprintf(NameStr(to->attname), "pg_expression_%d", i + 1);
/*
- * Lookup the expression type in pg_type for the type length etc.
+ * Lookup the expression type in pg_type for the type length
+ * etc.
*/
keyType = exprType(indexkey);
tuple = SearchSysCache(TYPEOID,
@@ -534,7 +535,7 @@ index_create(Oid heapRelationId,
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared indexes cannot be created after initdb")));
+ errmsg("shared indexes cannot be created after initdb")));
if (get_relname_relid(indexRelationName, namespaceId))
ereport(ERROR,
@@ -668,7 +669,7 @@ index_create(Oid heapRelationId,
' ',
' ',
' ',
- InvalidOid, /* no associated index */
+ InvalidOid, /* no associated index */
NULL, /* no check constraint */
NULL,
NULL);
@@ -709,7 +710,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Expressions)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Expressions,
+ (Node *) indexInfo->ii_Expressions,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -719,7 +720,7 @@ index_create(Oid heapRelationId,
if (indexInfo->ii_Predicate)
{
recordDependencyOnSingleRelExpr(&myself,
- (Node *) indexInfo->ii_Predicate,
+ (Node *) indexInfo->ii_Predicate,
heapRelationId,
DEPENDENCY_NORMAL,
DEPENDENCY_AUTO);
@@ -831,8 +832,8 @@ index_drop(Oid indexId)
/*
* We are presently too lazy to attempt to compute the new correct
- * value of relhasindex (the next VACUUM will fix it if necessary).
- * So there is no need to update the pg_class tuple for the owning
+ * value of relhasindex (the next VACUUM will fix it if necessary). So
+ * there is no need to update the pg_class tuple for the owning
* relation. But we must send out a shared-cache-inval notice on the
* owning relation to ensure other backends update their relcache
* lists of indexes.
@@ -958,7 +959,7 @@ FormIndexDatum(IndexInfo *indexInfo,
if (indexprs == NIL)
elog(ERROR, "wrong number of index expressions");
iDatum = ExecEvalExprSwitchContext((ExprState *) lfirst(indexprs),
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
indexprs = lnext(indexprs);
@@ -1160,7 +1161,7 @@ setNewRelfilenode(Relation relation)
if (!in_place_upd)
{
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
}
else
@@ -1170,7 +1171,7 @@ setNewRelfilenode(Relation relation)
ScanKeyEntryInitialize(&key[0], 0,
ObjectIdAttributeNumber,
F_OIDEQ,
- ObjectIdGetDatum(RelationGetRelid(relation)));
+ ObjectIdGetDatum(RelationGetRelid(relation)));
pg_class_scan = heap_beginscan(pg_class, SnapshotNow, 1, key);
tuple = heap_getnext(pg_class_scan, ForwardScanDirection);
@@ -1325,9 +1326,9 @@ UpdateStats(Oid relid, double reltuples)
}
/*
- * Update statistics in pg_class, if they changed. (Avoiding an
- * unnecessary update is not just a tiny performance improvement;
- * it also reduces the window wherein concurrent CREATE INDEX commands
+ * Update statistics in pg_class, if they changed. (Avoiding an
+ * unnecessary update is not just a tiny performance improvement; it
+ * also reduces the window wherein concurrent CREATE INDEX commands
* may conflict.)
*/
rd_rel = (Form_pg_class) GETSTRUCT(tuple);
@@ -1338,8 +1339,9 @@ UpdateStats(Oid relid, double reltuples)
if (in_place_upd)
{
/*
- * At bootstrap time, we don't need to worry about concurrency or
- * visibility of changes, so we cheat. Also cheat if REINDEX.
+ * At bootstrap time, we don't need to worry about concurrency
+ * or visibility of changes, so we cheat. Also cheat if
+ * REINDEX.
*/
LockBuffer(pg_class_scan->rs_cbuf, BUFFER_LOCK_EXCLUSIVE);
rd_rel->relpages = (int32) relpages;
@@ -1367,7 +1369,7 @@ UpdateStats(Oid relid, double reltuples)
/*
* We shouldn't have to do this, but we do... Modify the reldesc in
* place with the new values so that the cache contains the latest
- * copy. (XXX is this really still necessary? The relcache will get
+ * copy. (XXX is this really still necessary? The relcache will get
* fixed at next CommandCounterIncrement, so why bother here?)
*/
whichRel->rd_rel->relpages = (int32) relpages;
@@ -1454,8 +1456,8 @@ IndexBuildHeapScan(Relation heapRelation,
heapDescriptor = RelationGetDescr(heapRelation);
/*
- * Need an EState for evaluation of index expressions
- * and partial-index predicates.
+ * Need an EState for evaluation of index expressions and
+ * partial-index predicates.
*/
estate = CreateExecutorState();
econtext = GetPerTupleExprContext(estate);
@@ -1463,7 +1465,8 @@ IndexBuildHeapScan(Relation heapRelation,
/*
* If this is a predicate (partial) index, we will need to evaluate
* the predicate using ExecQual, which requires the current tuple to
- * be in a slot of a TupleTable. Likewise if there are any expressions.
+ * be in a slot of a TupleTable. Likewise if there are any
+ * expressions.
*/
if (indexInfo->ii_Predicate != NIL || indexInfo->ii_Expressions != NIL)
{
@@ -1741,15 +1744,15 @@ reindex_index(Oid indexId, bool force, bool inplace)
* it's a nailed-in-cache index, we must do inplace processing because
* the relcache can't cope with changing its relfilenode.
*
- * In either of these cases, we are definitely processing a system
- * index, so we'd better be ignoring system indexes.
+ * In either of these cases, we are definitely processing a system index,
+ * so we'd better be ignoring system indexes.
*/
if (iRel->rd_rel->relisshared)
{
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is shared", indexId)));
+ errmsg("the target relation %u is shared", indexId)));
inplace = true;
}
if (iRel->rd_isnailed)
@@ -1757,7 +1760,7 @@ reindex_index(Oid indexId, bool force, bool inplace)
if (!IsIgnoringSystemIndexes())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("the target relation %u is nailed", indexId)));
+ errmsg("the target relation %u is nailed", indexId)));
inplace = true;
}
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 28b9859677..6a39fc6901 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.55 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/namespace.c,v 1.56 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,7 +164,7 @@ RangeVarGetRelid(const RangeVar *relation, bool failOK)
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (relation->schemaname)
@@ -217,7 +217,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
}
if (newRelation->istemp)
@@ -226,7 +226,7 @@ RangeVarGetCreationNamespace(const RangeVar *newRelation)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("TEMP tables may not specify a schema name")));
+ errmsg("TEMP tables may not specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
@@ -1057,7 +1057,7 @@ OpclassIsVisible(Oid opcid)
Oid
ConversionGetConid(const char *conname)
{
- Oid conid;
+ Oid conid;
List *lptr;
recomputeNamespacePath();
@@ -1115,11 +1115,11 @@ ConversionIsVisible(Oid conid)
/*
* If it is in the path, it might still not be visible; it could
* be hidden by another conversion of the same name earlier in the
- * path. So we must do a slow check to see if this conversion would
- * be found by ConversionGetConid.
+ * path. So we must do a slow check to see if this conversion
+ * would be found by ConversionGetConid.
*/
char *conname = NameStr(conform->conname);
-
+
visible = (ConversionGetConid(conname) == conid);
}
@@ -1164,13 +1164,13 @@ DeconstructQualifiedName(List *names,
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented")));
+ errmsg("cross-database references are not implemented")));
break;
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper qualified name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper qualified name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1281,8 +1281,8 @@ makeRangeVarFromNameList(List *names)
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("improper relation name (too many dotted names): %s",
- NameListToString(names))));
+ errmsg("improper relation name (too many dotted names): %s",
+ NameListToString(names))));
break;
}
@@ -1720,8 +1720,8 @@ RemoveTempRelations(Oid tempNamespaceId)
/*
* We want to get rid of everything in the target namespace, but not
- * the namespace itself (deleting it only to recreate it later would be
- * a waste of cycles). We do this by finding everything that has a
+ * the namespace itself (deleting it only to recreate it later would
+ * be a waste of cycles). We do this by finding everything that has a
* dependency on the namespace.
*/
object.classId = get_system_catalog_relid(NamespaceRelationName);
@@ -1797,7 +1797,7 @@ assign_search_path(const char *newval, bool doit, bool interactive)
0, 0, 0))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
- errmsg("schema \"%s\" does not exist", curname)));
+ errmsg("schema \"%s\" does not exist", curname)));
}
}
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 6837f3b922..779468ce21 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.61 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.62 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,8 +29,8 @@
#include "utils/syscache.h"
-static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
- Oid *rettype);
+static Oid lookup_agg_function(List *fnName, int nargs, Oid *input_types,
+ Oid *rettype);
/*
@@ -79,7 +79,7 @@ AggregateCreate(const char *aggName,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition datatype"),
errdetail("An aggregate using ANYARRAY or ANYELEMENT as "
- "trans type must have one of them as its base type.")));
+ "trans type must have one of them as its base type.")));
/* handle transfn */
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -99,8 +99,8 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic)
* must exactly match declared transtype.
*
- * In the non-polymorphic-transtype case, it might be okay to allow
- * a rettype that's binary-coercible to transtype, but I'm not quite
+ * In the non-polymorphic-transtype case, it might be okay to allow a
+ * rettype that's binary-coercible to transtype, but I'm not quite
* convinced that it's either safe or useful. When transtype is
* polymorphic we *must* demand exact equality.
*/
@@ -151,9 +151,9 @@ AggregateCreate(const char *aggName,
Assert(OidIsValid(finaltype));
/*
- * If finaltype (i.e. aggregate return type) is polymorphic,
- * basetype must be polymorphic also, else parser will fail to deduce
- * result type. (Note: given the previous test on transtype and basetype,
+ * If finaltype (i.e. aggregate return type) is polymorphic, basetype
+ * must be polymorphic also, else parser will fail to deduce result
+ * type. (Note: given the previous test on transtype and basetype,
* this cannot happen, unless someone has snuck a finalfn definition
* into the catalogs that itself violates the rule against polymorphic
* result with no polymorphic input.)
@@ -163,8 +163,8 @@ AggregateCreate(const char *aggName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result datatype"),
- errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
- "must have one of them as its base type.")));
+ errdetail("An aggregate returning ANYARRAY or ANYELEMENT "
+ "must have one of them as its base type.")));
/*
* Everything looks okay. Try to create the pg_proc entry for the
@@ -278,21 +278,21 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("function %s does not exist",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
if (retset)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s returns a set",
- func_signature_string(fnName, nargs, input_types))));
+ func_signature_string(fnName, nargs, input_types))));
/*
- * If the given type(s) are all polymorphic, there's nothing we
- * can check. Otherwise, enforce consistency, and possibly refine
- * the result type.
+ * If the given type(s) are all polymorphic, there's nothing we can
+ * check. Otherwise, enforce consistency, and possibly refine the
+ * result type.
*/
if ((input_types[0] == ANYARRAYOID || input_types[0] == ANYELEMENTOID) &&
(nargs == 1 ||
- (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
+ (input_types[1] == ANYARRAYOID || input_types[1] == ANYELEMENTOID)))
{
/* nothing to check here */
}
@@ -305,8 +305,8 @@ lookup_agg_function(List *fnName,
}
/*
- * func_get_detail will find functions requiring run-time argument type
- * coercion, but nodeAgg.c isn't prepared to deal with that
+ * func_get_detail will find functions requiring run-time argument
+ * type coercion, but nodeAgg.c isn't prepared to deal with that
*/
if (true_oid_array[0] != ANYARRAYOID &&
true_oid_array[0] != ANYELEMENTOID &&
@@ -314,7 +314,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
if (nargs == 2 &&
true_oid_array[1] != ANYARRAYOID &&
@@ -323,7 +323,7 @@ lookup_agg_function(List *fnName,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
return fnOid;
}
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 89adeb57b9..ae8b7dec03 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.14 2003/07/21 01:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_constraint.c,v 1.15 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -195,7 +195,7 @@ CreateConstraintEntry(const char *constraintName,
/*
* Register auto dependency from constraint to owning domain
*/
- ObjectAddress domobject;
+ ObjectAddress domobject;
domobject.classId = RelOid_pg_type;
domobject.objectId = domainId;
@@ -234,8 +234,8 @@ CreateConstraintEntry(const char *constraintName,
if (OidIsValid(indexRelId))
{
/*
- * Register normal dependency on the unique index that supports
- * a foreign-key constraint.
+ * Register normal dependency on the unique index that supports a
+ * foreign-key constraint.
*/
ObjectAddress relobject;
@@ -438,8 +438,8 @@ RemoveConstraintById(Oid conId)
Relation rel;
/*
- * If the constraint is for a relation, open and exclusive-lock the
- * relation it's for.
+ * If the constraint is for a relation, open and exclusive-lock
+ * the relation it's for.
*/
rel = heap_open(con->conrelid, AccessExclusiveLock);
@@ -463,7 +463,7 @@ RemoveConstraintById(Oid conId)
con->conrelid);
classForm = (Form_pg_class) GETSTRUCT(relTup);
- if (classForm->relchecks == 0) /* should not happen */
+ if (classForm->relchecks == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has relchecks = 0",
RelationGetRelationName(rel));
classForm->relchecks--;
@@ -483,16 +483,15 @@ RemoveConstraintById(Oid conId)
else if (OidIsValid(con->contypid))
{
/*
- * XXX for now, do nothing special when dropping a domain constraint
+ * XXX for now, do nothing special when dropping a domain
+ * constraint
*
* Probably there should be some form of locking on the domain type,
* but we have no such concept at the moment.
*/
}
else
- {
elog(ERROR, "constraint %u is not of a known type", conId);
- }
/* Fry the constraint itself */
simple_heap_delete(conDesc, &tup->t_self);
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 70bd294297..5c10fa7b28 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.13 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_conversion.c,v 1.14 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -76,9 +76,9 @@ ConversionCreate(const char *conname, Oid connamespace,
contoencoding))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("default conversion for %s to %s already exists",
- pg_encoding_to_char(conforencoding),
- pg_encoding_to_char(contoencoding))));
+ errmsg("default conversion for %s to %s already exists",
+ pg_encoding_to_char(conforencoding),
+ pg_encoding_to_char(contoencoding))));
}
/* open pg_conversion */
@@ -147,7 +147,7 @@ ConversionDrop(Oid conversionOid, DropBehavior behavior)
if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tuple))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
- NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
+ NameStr(((Form_pg_conversion) GETSTRUCT(tuple))->conname));
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index da3e2a4692..141d3a142a 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.81 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.82 2003/08/04 00:43:16 momjian Exp $
*
* NOTES
* these routines moved here from commands/define.c and somewhat cleaned up.
@@ -409,7 +409,7 @@ OperatorCreate(const char *operatorName,
if (!OidIsValid(leftTypeId) && !OidIsValid(rightTypeId))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("at least one of leftarg or rightarg must be specified")));
+ errmsg("at least one of leftarg or rightarg must be specified")));
if (!(OidIsValid(leftTypeId) && OidIsValid(rightTypeId)))
{
@@ -417,11 +417,11 @@ OperatorCreate(const char *operatorName,
if (commutatorName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have commutators")));
+ errmsg("only binary operators can have commutators")));
if (joinName)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("only binary operators can have join selectivity")));
+ errmsg("only binary operators can have join selectivity")));
if (canHash)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index d8ff4a5225..2c11a17db5 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.102 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.103 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -94,7 +94,7 @@ ProcedureCreate(const char *procedureName,
*/
if (returnType == ANYARRAYOID || returnType == ANYELEMENTOID)
{
- bool genericParam = false;
+ bool genericParam = false;
for (i = 0; i < parameterCount; i++)
{
@@ -231,7 +231,7 @@ ProcedureCreate(const char *procedureName,
returnsSet != oldproc->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("cannot change return type of existing function"),
+ errmsg("cannot change return type of existing function"),
errhint("Use DROP FUNCTION first.")));
/* Can't change aggregate status, either */
@@ -339,8 +339,8 @@ ProcedureCreate(const char *procedureName,
*
* This is normally applied during function definition, but in the case
* of a function with polymorphic arguments, we instead apply it during
- * function execution startup. The rettype is then the actual resolved
- * output type of the function, rather than the declared type. (Therefore,
+ * function execution startup. The rettype is then the actual resolved
+ * output type of the function, rather than the declared type. (Therefore,
* we should never see ANYARRAY or ANYELEMENT as rettype.)
*/
void
@@ -366,7 +366,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errdetail("Function's final statement must be a SELECT.")));
return;
}
@@ -395,9 +395,9 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
if (cmd != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type mismatch in function declared to return %s",
- format_type_be(rettype)),
- errdetail("Function's final statement must be a SELECT.")));
+ errmsg("return type mismatch in function declared to return %s",
+ format_type_be(rettype)),
+ errdetail("Function's final statement must be a SELECT.")));
/*
* Count the non-junk entries in the result targetlist.
@@ -421,7 +421,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT must return exactly one column.")));
+ errdetail("Final SELECT must return exactly one column.")));
restype = ((TargetEntry *) lfirst(tlist))->resdom->restype;
if (!IsBinaryCoercible(restype, rettype))
@@ -481,7 +481,7 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("return type mismatch in function declared to return %s",
format_type_be(rettype)),
- errdetail("Final SELECT returns too many columns.")));
+ errdetail("Final SELECT returns too many columns.")));
attr = reln->rd_att->attrs[colindex - 1];
} while (attr->attisdropped);
rellogcols++;
@@ -538,8 +538,8 @@ check_sql_fn_retval(Oid rettype, char fn_typtype, List *queryTreeList)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("return type %s is not supported for SQL functions",
- format_type_be(rettype))));
+ errmsg("return type %s is not supported for SQL functions",
+ format_type_be(rettype))));
}
@@ -684,8 +684,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL functions cannot have arguments of type %s",
- format_type_be(proc->proargtypes[i]))));
+ errmsg("SQL functions cannot have arguments of type %s",
+ format_type_be(proc->proargtypes[i]))));
}
}
@@ -696,13 +696,13 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
prosrc = DatumGetCString(DirectFunctionCall1(textout, tmp));
/*
- * We can't do full prechecking of the function definition if there are
- * any polymorphic input types, because actual datatypes of expression
- * results will be unresolvable. The check will be done at runtime
- * instead.
+ * We can't do full prechecking of the function definition if there
+ * are any polymorphic input types, because actual datatypes of
+ * expression results will be unresolvable. The check will be done at
+ * runtime instead.
*
- * We can run the text through the raw parser though; this will at
- * least catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at least
+ * catch silly syntactic errors.
*/
if (!haspolyarg)
{
@@ -712,9 +712,7 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
check_sql_fn_retval(proc->prorettype, functyptype, querytree_list);
}
else
- {
querytree_list = pg_parse_query(prosrc);
- }
ReleaseSysCache(tuple);
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 7980e6afad..d578644e68 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.88 2003/07/21 01:59:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.89 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -359,7 +359,8 @@ TypeCreate(const char *typeName,
void
GenerateTypeDependencies(Oid typeNamespace,
Oid typeObjectId,
- Oid relationOid, /* only for 'c'atalog types */
+ Oid relationOid, /* only for 'c'atalog
+ * types */
char relationKind, /* ditto */
Oid inputProcedure,
Oid outputProcedure,
@@ -426,13 +427,13 @@ GenerateTypeDependencies(Oid typeNamespace,
/*
* If the type is a rowtype for a relation, mark it as internally
- * dependent on the relation, *unless* it is a stand-alone
- * composite type relation. For the latter case, we have to
- * reverse the dependency.
+ * dependent on the relation, *unless* it is a stand-alone composite
+ * type relation. For the latter case, we have to reverse the
+ * dependency.
*
- * In the former case, this allows the type to be auto-dropped when
- * the relation is, and not otherwise. And in the latter, of
- * course we get the opposite effect.
+ * In the former case, this allows the type to be auto-dropped when the
+ * relation is, and not otherwise. And in the latter, of course we get
+ * the opposite effect.
*/
if (OidIsValid(relationOid))
{
@@ -447,11 +448,11 @@ GenerateTypeDependencies(Oid typeNamespace,
}
/*
- * If the type is an array type, mark it auto-dependent on the
- * base type. (This is a compromise between the typical case
- * where the array type is automatically generated and the case
- * where it is manually created: we'd prefer INTERNAL for the
- * former case and NORMAL for the latter.)
+ * If the type is an array type, mark it auto-dependent on the base
+ * type. (This is a compromise between the typical case where the
+ * array type is automatically generated and the case where it is
+ * manually created: we'd prefer INTERNAL for the former case and
+ * NORMAL for the latter.)
*/
if (OidIsValid(elementType))
{
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 1d9b25b5b0..5a57d5c5c7 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.12 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/aggregatecmds.c,v 1.13 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -256,16 +256,16 @@ RenameAggregate(List *name, TypeName *basetype, const char *newname)
if (basetypeOid == ANYOID)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
- errmsg("function %s(*) already exists in schema \"%s\"",
- newname,
- get_namespace_name(namespaceOid))));
+ errmsg("function %s(*) already exists in schema \"%s\"",
+ newname,
+ get_namespace_name(namespaceOid))));
else
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_FUNCTION),
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes),
+ procForm->proargtypes),
get_namespace_name(namespaceOid))));
}
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index b377635099..4fd43871e9 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.4 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/alter.c,v 1.5 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,52 +79,52 @@ ExecRenameStmt(RenameStmt *stmt)
case OBJECT_TABLE:
case OBJECT_COLUMN:
case OBJECT_TRIGGER:
- {
- Oid relid;
+ {
+ Oid relid;
- CheckRelationOwnership(stmt->relation, true);
+ CheckRelationOwnership(stmt->relation, true);
- relid = RangeVarGetRelid(stmt->relation, false);
+ relid = RangeVarGetRelid(stmt->relation, false);
- switch (stmt->renameType)
- {
- case OBJECT_TABLE:
+ switch (stmt->renameType)
{
- /*
- * RENAME TABLE requires that we (still) hold
- * CREATE rights on the containing namespace, as
- * well as ownership of the table.
- */
- Oid namespaceId = get_rel_namespace(relid);
- AclResult aclresult;
-
- aclresult = pg_namespace_aclcheck(namespaceId,
- GetUserId(),
- ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
- get_namespace_name(namespaceId));
-
- renamerel(relid, stmt->newname);
- break;
- }
- case OBJECT_COLUMN:
- renameatt(relid,
- stmt->subname, /* old att name */
- stmt->newname, /* new att name */
+ case OBJECT_TABLE:
+ {
+ /*
+ * RENAME TABLE requires that we (still) hold
+ * CREATE rights on the containing namespace,
+ * as well as ownership of the table.
+ */
+ Oid namespaceId = get_rel_namespace(relid);
+ AclResult aclresult;
+
+ aclresult = pg_namespace_aclcheck(namespaceId,
+ GetUserId(),
+ ACL_CREATE);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_NAMESPACE,
+ get_namespace_name(namespaceId));
+
+ renamerel(relid, stmt->newname);
+ break;
+ }
+ case OBJECT_COLUMN:
+ renameatt(relid,
+ stmt->subname, /* old att name */
+ stmt->newname, /* new att name */
interpretInhOption(stmt->relation->inhOpt), /* recursive? */
- false); /* recursing already? */
- break;
- case OBJECT_TRIGGER:
- renametrig(relid,
- stmt->subname, /* old att name */
- stmt->newname); /* new att name */
- break;
- default:
- /*can't happen*/;
+ false); /* recursing already? */
+ break;
+ case OBJECT_TRIGGER:
+ renametrig(relid,
+ stmt->subname, /* old att name */
+ stmt->newname); /* new att name */
+ break;
+ default:
+ /* can't happen */ ;
+ }
+ break;
}
- break;
- }
default:
elog(ERROR, "unrecognized rename stmt type: %d",
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 5277884f1f..dac2d5d7bb 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.56 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.57 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -220,9 +220,9 @@ analyze_rel(Oid relid, VacuumStmt *vacstmt)
/*
* Silently ignore tables that are temp tables of other backends ---
- * trying to analyze these is rather pointless, since their
- * contents are probably not up-to-date on disk. (We don't throw a
- * warning here; it would just lead to chatter during a database-wide
+ * trying to analyze these is rather pointless, since their contents
+ * are probably not up-to-date on disk. (We don't throw a warning
+ * here; it would just lead to chatter during a database-wide
* ANALYZE.)
*/
if (isOtherTempNamespace(RelationGetNamespace(onerel)))
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index dafea7c869..69085740cc 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.96 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.97 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -603,10 +603,10 @@ Async_NotifyHandler(SIGNAL_ARGS)
bool save_ImmediateInterruptOK = ImmediateInterruptOK;
/*
- * We may be called while ImmediateInterruptOK is true; turn it off
- * while messing with the NOTIFY state. (We would have to save
- * and restore it anyway, because PGSemaphore operations inside
- * ProcessIncomingNotify() might reset it.)
+ * We may be called while ImmediateInterruptOK is true; turn it
+ * off while messing with the NOTIFY state. (We would have to
+ * save and restore it anyway, because PGSemaphore operations
+ * inside ProcessIncomingNotify() might reset it.)
*/
ImmediateInterruptOK = false;
@@ -639,7 +639,8 @@ Async_NotifyHandler(SIGNAL_ARGS)
}
/*
- * Restore ImmediateInterruptOK, and check for interrupts if needed.
+ * Restore ImmediateInterruptOK, and check for interrupts if
+ * needed.
*/
ImmediateInterruptOK = save_ImmediateInterruptOK;
if (save_ImmediateInterruptOK)
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 18f6bfcf6b..23e03443fc 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.112 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.113 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,12 +58,12 @@ typedef struct
*/
typedef struct
{
- Oid tableOid;
- Oid indexOid;
-} RelToCluster;
+ Oid tableOid;
+ Oid indexOid;
+} RelToCluster;
-static void cluster_rel(RelToCluster *rv, bool recheck);
+static void cluster_rel(RelToCluster * rv, bool recheck);
static Oid make_new_heap(Oid OIDOldHeap, const char *NewName);
static void copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex);
static List *get_indexattr_list(Relation OldHeap, Oid OldIndex);
@@ -74,7 +74,7 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
/*---------------------------------------------------------------------------
- * This cluster code allows for clustering multiple tables at once. Because
+ * This cluster code allows for clustering multiple tables at once. Because
* of this, we cannot just run everything on a single transaction, or we
* would be forced to acquire exclusive locks on all the tables being
* clustered, simultaneously --- very likely leading to deadlock.
@@ -82,17 +82,17 @@ static List *get_tables_to_cluster(MemoryContext cluster_context);
* To solve this we follow a similar strategy to VACUUM code,
* clustering each relation in a separate transaction. For this to work,
* we need to:
- * - provide a separate memory context so that we can pass information in
- * a way that survives across transactions
- * - start a new transaction every time a new relation is clustered
- * - check for validity of the information on to-be-clustered relations,
- * as someone might have deleted a relation behind our back, or
- * clustered one on a different index
- * - end the transaction
+ * - provide a separate memory context so that we can pass information in
+ * a way that survives across transactions
+ * - start a new transaction every time a new relation is clustered
+ * - check for validity of the information on to-be-clustered relations,
+ * as someone might have deleted a relation behind our back, or
+ * clustered one on a different index
+ * - end the transaction
*
* The single-relation case does not have any such overhead.
*
- * We also allow a relation being specified without index. In that case,
+ * We also allow a relation being specified without index. In that case,
* the indisclustered bit will be looked up, and an ERROR will be thrown
* if there is no index with the bit set.
*---------------------------------------------------------------------------
@@ -103,10 +103,10 @@ cluster(ClusterStmt *stmt)
if (stmt->relation != NULL)
{
/* This is the single-relation case. */
- Oid tableOid,
- indexOid = InvalidOid;
- Relation rel;
- RelToCluster rvtc;
+ Oid tableOid,
+ indexOid = InvalidOid;
+ Relation rel;
+ RelToCluster rvtc;
/* Find and lock the table */
rel = heap_openrv(stmt->relation, AccessExclusiveLock);
@@ -123,10 +123,10 @@ cluster(ClusterStmt *stmt)
List *index;
/* We need to find the index that has indisclustered set. */
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index indexForm;
+ HeapTuple idxtuple;
+ Form_pg_index indexForm;
indexOid = lfirsto(index);
idxtuple = SearchSysCache(INDEXRELID,
@@ -152,14 +152,17 @@ cluster(ClusterStmt *stmt)
}
else
{
- /* The index is expected to be in the same namespace as the relation. */
+ /*
+ * The index is expected to be in the same namespace as the
+ * relation.
+ */
indexOid = get_relname_relid(stmt->indexname,
rel->rd_rel->relnamespace);
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("index \"%s\" for table \"%s\" does not exist",
- stmt->indexname, stmt->relation->relname)));
+ errmsg("index \"%s\" for table \"%s\" does not exist",
+ stmt->indexname, stmt->relation->relname)));
}
/* All other checks are done in cluster_rel() */
@@ -175,16 +178,16 @@ cluster(ClusterStmt *stmt)
else
{
/*
- * This is the "multi relation" case. We need to cluster all tables
- * that have some index with indisclustered set.
+ * This is the "multi relation" case. We need to cluster all
+ * tables that have some index with indisclustered set.
*/
- MemoryContext cluster_context;
- List *rv,
- *rvs;
+ MemoryContext cluster_context;
+ List *rv,
+ *rvs;
/*
- * We cannot run this form of CLUSTER inside a user transaction block;
- * we'd be holding locks way too long.
+ * We cannot run this form of CLUSTER inside a user transaction
+ * block; we'd be holding locks way too long.
*/
PreventTransactionChain((void *) stmt, "CLUSTER");
@@ -201,8 +204,8 @@ cluster(ClusterStmt *stmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Build the list of relations to cluster. Note that this lives in
- * cluster_context.
+ * Build the list of relations to cluster. Note that this lives
+ * in cluster_context.
*/
rvs = get_tables_to_cluster(cluster_context);
@@ -210,13 +213,14 @@ cluster(ClusterStmt *stmt)
CommitTransactionCommand();
/* Ok, now that we've got them all, cluster them one by one */
- foreach (rv, rvs)
+ foreach(rv, rvs)
{
- RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
+ RelToCluster *rvtc = (RelToCluster *) lfirst(rv);
/* Start a new transaction for each relation. */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
cluster_rel(rvtc, true);
CommitTransactionCommand();
}
@@ -244,7 +248,7 @@ cluster(ClusterStmt *stmt)
* them incrementally while we load the table.
*/
static void
-cluster_rel(RelToCluster *rvtc, bool recheck)
+cluster_rel(RelToCluster * rvtc, bool recheck)
{
Relation OldHeap,
OldIndex;
@@ -256,14 +260,14 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to
* check that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests.
- * We *must* skip the one on indisclustered since it would reject an
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an
* attempt to cluster a not-previously-clustered index.
*/
if (recheck)
{
- HeapTuple tuple;
- Form_pg_index indexForm;
+ HeapTuple tuple;
+ Form_pg_index indexForm;
/*
* Check if the relation and index still exist before opening them
@@ -319,10 +323,10 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
RelationGetRelationName(OldHeap))));
/*
- * Disallow clustering on incomplete indexes (those that might not index
- * every row of the relation). We could relax this by making a separate
- * seqscan pass over the table to copy the missing rows, but that seems
- * expensive and tedious.
+ * Disallow clustering on incomplete indexes (those that might not
+ * index every row of the relation). We could relax this by making a
+ * separate seqscan pass over the table to copy the missing rows, but
+ * that seems expensive and tedious.
*/
if (!heap_attisnull(OldIndex->rd_indextuple, Anum_pg_index_indpred))
ereport(ERROR,
@@ -334,7 +338,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
/*
* If the AM doesn't index nulls, then it's a partial index unless
- * we can prove all the rows are non-null. Note we only need look
+ * we can prove all the rows are non-null. Note we only need look
* at the first column; multicolumn-capable AMs are *required* to
* index nulls in columns after the first.
*/
@@ -347,7 +351,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster when index access method does not handle nulls"),
errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -382,7 +386,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temp tables of other processes")));
+ errmsg("cannot cluster temp tables of other processes")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
@@ -397,7 +401,7 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* rebuild_relation: rebuild an existing relation
*
* This is shared code between CLUSTER and TRUNCATE. In the TRUNCATE
- * case, the new relation is built and left empty. In the CLUSTER case,
+ * case, the new relation is built and left empty. In the CLUSTER case,
* it is filled with data read from the old relation in the order specified
* by the index.
*
@@ -432,6 +436,7 @@ rebuild_relation(Relation OldHeap, Oid indexOid)
snprintf(NewHeapName, sizeof(NewHeapName), "pg_temp_%u", tableOid);
OIDNewHeap = make_new_heap(tableOid, NewHeapName);
+
/*
* We don't need CommandCounterIncrement() because make_new_heap did
* it.
@@ -754,8 +759,8 @@ swap_relfilenodes(Oid r1, Oid r2)
/* swap size statistics too, since new rel has freshly-updated stats */
{
- int4 swap_pages;
- float4 swap_tuples;
+ int4 swap_pages;
+ float4 swap_tuples;
swap_pages = relform1->relpages;
relform1->relpages = relform2->relpages;
@@ -857,20 +862,20 @@ swap_relfilenodes(Oid r1, Oid r2)
static List *
get_tables_to_cluster(MemoryContext cluster_context)
{
- Relation indRelation;
- HeapScanDesc scan;
- ScanKeyData entry;
- HeapTuple indexTuple;
- Form_pg_index index;
- MemoryContext old_context;
- RelToCluster *rvtc;
- List *rvs = NIL;
+ Relation indRelation;
+ HeapScanDesc scan;
+ ScanKeyData entry;
+ HeapTuple indexTuple;
+ Form_pg_index index;
+ MemoryContext old_context;
+ RelToCluster *rvtc;
+ List *rvs = NIL;
/*
* Get all indexes that have indisclustered set and are owned by
- * appropriate user. System relations or nailed-in relations cannot ever
- * have indisclustered set, because CLUSTER will refuse to set it when
- * called with one of them as argument.
+ * appropriate user. System relations or nailed-in relations cannot
+ * ever have indisclustered set, because CLUSTER will refuse to set it
+ * when called with one of them as argument.
*/
indRelation = relation_openr(IndexRelationName, AccessShareLock);
ScanKeyEntryInitialize(&entry, 0,
@@ -886,8 +891,8 @@ get_tables_to_cluster(MemoryContext cluster_context)
continue;
/*
- * We have to build the list in a different memory context so
- * it will survive the cross-transaction processing
+ * We have to build the list in a different memory context so it
+ * will survive the cross-transaction processing
*/
old_context = MemoryContextSwitchTo(cluster_context);
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index ecd50bdb36..e0ebba0df9 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.67 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/comment.c,v 1.68 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -383,8 +383,8 @@ CommentAttribute(List *qualname, char *comment)
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
@@ -418,16 +418,17 @@ CommentDatabase(List *qualname, char *comment)
database = strVal(lfirst(qualname));
/*
- * We cannot currently support cross-database comments (since other DBs
- * cannot see pg_description of this database). So, we reject attempts
- * to comment on a database other than the current one. Someday this
- * might be improved, but it would take a redesigned infrastructure.
+ * We cannot currently support cross-database comments (since other
+ * DBs cannot see pg_description of this database). So, we reject
+ * attempts to comment on a database other than the current one.
+ * Someday this might be improved, but it would take a redesigned
+ * infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
- * of the database. Erroring out would prevent pg_restore from completing
- * (which is really pg_restore's fault, but for now we will work around
- * the problem here). Consensus is that the best fix is to treat wrong
- * database name as a WARNING not an ERROR.
+ * of the database. Erroring out would prevent pg_restore from
+ * completing (which is really pg_restore's fault, but for now we will
+ * work around the problem here). Consensus is that the best fix is
+ * to treat wrong database name as a WARNING not an ERROR.
*/
/* First get the database OID */
@@ -569,7 +570,7 @@ CommentRule(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("there are multiple rules \"%s\"", rulename),
- errhint("Specify a relation name as well as a rule name.")));
+ errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
heap_close(RewriteRelation, AccessShareLock);
@@ -811,8 +812,8 @@ CommentTrigger(List *qualname, char *comment)
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
@@ -891,7 +892,7 @@ CommentConstraint(List *qualname, char *comment)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("relation \"%s\" has multiple constraints named \"%s\"",
- RelationGetRelationName(relation), conName)));
+ RelationGetRelationName(relation), conName)));
conOid = HeapTupleGetOid(tuple);
}
}
@@ -902,8 +903,8 @@ CommentConstraint(List *qualname, char *comment)
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for relation \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
diff --git a/src/backend/commands/conversioncmds.c b/src/backend/commands/conversioncmds.c
index b917c527ac..e9afb95624 100644
--- a/src/backend/commands/conversioncmds.c
+++ b/src/backend/commands/conversioncmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.9 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/conversioncmds.c,v 1.10 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -148,11 +148,11 @@ RenameConversion(List *name, const char *newname)
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("conversion \"%s\" already exists in schema \"%s\"",
- newname, get_namespace_name(namespaceOid))));
+ errmsg("conversion \"%s\" already exists in schema \"%s\"",
+ newname, get_namespace_name(namespaceOid))));
/* must be owner */
- if (!superuser() &&
+ if (!superuser() &&
((Form_pg_conversion) GETSTRUCT(tup))->conowner != GetUserId())
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CONVERSION,
NameListToString(name));
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index fa91439a57..5c7238de8d 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.205 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.206 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -61,7 +61,7 @@ typedef enum CopyDest
COPY_FILE, /* to/from file */
COPY_OLD_FE, /* to/from frontend (old protocol) */
COPY_NEW_FE /* to/from frontend (new protocol) */
-} CopyDest;
+} CopyDest;
/*
* Represents the type of data returned by CopyReadAttribute()
@@ -82,17 +82,17 @@ typedef enum EolType
EOL_NL,
EOL_CR,
EOL_CRNL
-} EolType;
+} EolType;
/* non-export function prototypes */
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print);
+ char *delim, char *null_print);
static char *CopyReadAttribute(const char *delim, CopyReadResult *result);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
- Oid typelem, bool *isnull);
+ Oid typelem, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
@@ -136,6 +136,7 @@ static void CopySendChar(char c);
static void CopySendEndOfRow(bool binary);
static void CopyGetData(void *databuf, int datasize);
static int CopyGetChar(void);
+
#define CopyGetEof() (fe_eof)
static int CopyPeekChar(void);
static void CopyDonePeek(int c, bool pickup);
@@ -155,14 +156,14 @@ SendCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'H');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -200,14 +201,14 @@ ReceiveCopyBegin(bool binary, int natts)
{
/* new way */
StringInfoData buf;
- int16 format = (binary ? 1 : 0);
- int i;
+ int16 format = (binary ? 1 : 0);
+ int i;
pq_beginmessage(&buf, 'G');
- pq_sendbyte(&buf, format); /* overall format */
+ pq_sendbyte(&buf, format); /* overall format */
pq_sendint(&buf, natts, 2);
for (i = 0; i < natts; i++)
- pq_sendint(&buf, format, 2); /* per-column formats */
+ pq_sendint(&buf, format, 2); /* per-column formats */
pq_endmessage(&buf);
copy_dest = COPY_NEW_FE;
copy_msgbuf = makeStringInfo();
@@ -289,7 +290,7 @@ CopySendData(void *databuf, int datasize)
/* no hope of recovering connection sync, so FATAL */
ereport(FATAL,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("connection lost during COPY to stdout")));
+ errmsg("connection lost during COPY to stdout")));
}
break;
case COPY_NEW_FE:
@@ -378,7 +379,7 @@ CopyGetData(void *databuf, int datasize)
case COPY_NEW_FE:
while (datasize > 0 && !fe_eof)
{
- int avail;
+ int avail;
while (copy_msgbuf->cursor >= copy_msgbuf->len)
{
@@ -389,24 +390,24 @@ CopyGetData(void *databuf, int datasize)
if (mtype == EOF)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
if (pq_getmessage(copy_msgbuf, 0))
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on client connection")));
+ errmsg("unexpected EOF on client connection")));
switch (mtype)
{
- case 'd': /* CopyData */
+ case 'd': /* CopyData */
break;
- case 'c': /* CopyDone */
+ case 'c': /* CopyDone */
/* COPY IN correctly terminated by frontend */
fe_eof = true;
return;
- case 'f': /* CopyFail */
+ case 'f': /* CopyFail */
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
- pq_getmsgstring(copy_msgbuf))));
+ pq_getmsgstring(copy_msgbuf))));
break;
default:
ereport(ERROR,
@@ -421,7 +422,7 @@ CopyGetData(void *databuf, int datasize)
avail = datasize;
pq_copymsgbytes(copy_msgbuf, databuf, avail);
databuf = (void *) ((char *) databuf + avail);
- datasize =- avail;
+ datasize = -avail;
}
break;
}
@@ -430,7 +431,7 @@ CopyGetData(void *databuf, int datasize)
static int
CopyGetChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -448,16 +449,16 @@ CopyGetChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -479,7 +480,7 @@ CopyGetChar(void)
static int
CopyPeekChar(void)
{
- int ch;
+ int ch;
switch (copy_dest)
{
@@ -497,16 +498,16 @@ CopyPeekChar(void)
}
break;
case COPY_NEW_FE:
- {
- unsigned char cc;
+ {
+ unsigned char cc;
- CopyGetData(&cc, 1);
- if (fe_eof)
- ch = EOF;
- else
- ch = cc;
- break;
- }
+ CopyGetData(&cc, 1);
+ if (fe_eof)
+ ch = EOF;
+ else
+ ch = cc;
+ break;
+ }
default:
ch = EOF;
break;
@@ -524,7 +525,7 @@ CopyDonePeek(int c, bool pickup)
switch (copy_dest)
{
case COPY_FILE:
- if (!pickup)
+ if (!pickup)
{
/* We don't want to pick it up - so put it back in there */
ungetc(c, copy_file);
@@ -537,7 +538,11 @@ CopyDonePeek(int c, bool pickup)
/* We want to pick it up */
(void) pq_getbyte();
}
- /* If we didn't want to pick it up, just leave it where it sits */
+
+ /*
+ * If we didn't want to pick it up, just leave it where it
+ * sits
+ */
break;
case COPY_NEW_FE:
if (!pickup)
@@ -737,7 +742,7 @@ DoCopy(const CopyStmt *stmt)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to COPY to or from a file"),
errhint("Anyone can COPY to stdout or from stdin. "
- "psql's \\copy command also works for anyone.")));
+ "psql's \\copy command also works for anyone.")));
/*
* Presently, only single-character delimiter strings are supported.
@@ -791,8 +796,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy to non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy to non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -810,8 +815,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for reading: %m",
- filename)));
+ errmsg("could not open file \"%s\" for reading: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -841,8 +846,8 @@ DoCopy(const CopyStmt *stmt)
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot copy from non-table relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("cannot copy from non-table relation \"%s\"",
+ RelationGetRelationName(rel))));
}
if (pipe)
{
@@ -863,7 +868,7 @@ DoCopy(const CopyStmt *stmt)
if (!is_absolute_path(filename))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("relative path not allowed for COPY to file")));
+ errmsg("relative path not allowed for COPY to file")));
oumask = umask((mode_t) 022);
copy_file = AllocateFile(filename, PG_BINARY_W);
@@ -872,8 +877,8 @@ DoCopy(const CopyStmt *stmt)
if (copy_file == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" for writing: %m",
- filename)));
+ errmsg("could not open file \"%s\" for writing: %m",
+ filename)));
fstat(fileno(copy_file), &st);
if (S_ISDIR(st.st_mode))
@@ -955,8 +960,8 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Create a temporary memory context that we can reset once per row
- * to recover palloc'd memory. This avoids any problems with leaks
+ * Create a temporary memory context that we can reset once per row to
+ * recover palloc'd memory. This avoids any problems with leaks
* inside datatype output routines, and should be faster than retail
* pfree's anyway. (We don't need a whole econtext as CopyFrom does.)
*/
@@ -1040,9 +1045,9 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
if (isnull)
{
if (!binary)
- CopySendString(null_print); /* null indicator */
+ CopySendString(null_print); /* null indicator */
else
- CopySendInt32(-1); /* null marker */
+ CopySendInt32(-1); /* null marker */
}
else
{
@@ -1060,7 +1065,7 @@ CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(elements[attnum - 1])));
+ ObjectIdGetDatum(elements[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
@@ -1199,7 +1204,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
/* attribute is NOT to be copied from input */
/* use default value if one exists */
- Node *defexpr = build_column_default(rel, i + 1);
+ Node *defexpr = build_column_default(rel, i + 1);
if (defexpr != NULL)
{
@@ -1219,10 +1224,10 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Easiest way to do this is to use parse_coerce.c to set up
* an expression that checks the constraints. (At present,
- * the expression might contain a length-coercion-function call
- * and/or CoerceToDomain nodes.) The bottom of the expression
- * is a Param node so that we can fill in the actual datum during
- * the data input loop.
+ * the expression might contain a length-coercion-function
+ * call and/or CoerceToDomain nodes.) The bottom of the
+ * expression is a Param node so that we can fill in the
+ * actual datum during the data input loop.
*/
prm = makeNode(Param);
prm->paramkind = PARAM_EXEC;
@@ -1241,11 +1246,11 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
}
/*
- * Check BEFORE STATEMENT insertion triggers. It's debateable
- * whether we should do this for COPY, since it's not really an
- * "INSERT" statement as such. However, executing these triggers
- * maintains consistency with the EACH ROW triggers that we already
- * fire on COPY.
+ * Check BEFORE STATEMENT insertion triggers. It's debateable whether
+ * we should do this for COPY, since it's not really an "INSERT"
+ * statement as such. However, executing these triggers maintains
+ * consistency with the EACH ROW triggers that we already fire on
+ * COPY.
*/
ExecBSInsertTriggers(estate, resultRelInfo);
@@ -1276,13 +1281,13 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if ((tmp >> 16) != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unrecognized critical flags in COPY file header")));
+ errmsg("unrecognized critical flags in COPY file header")));
/* Header extension length */
tmp = CopyGetInt32();
if (CopyGetEof() || tmp < 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (missing length)")));
+ errmsg("invalid COPY file header (missing length)")));
/* Skip extension header, if present */
while (tmp-- > 0)
{
@@ -1290,7 +1295,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (CopyGetEof())
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("invalid COPY file header (wrong length)")));
+ errmsg("invalid COPY file header (wrong length)")));
}
}
@@ -1418,9 +1423,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
/*
* Complain if there are more fields on the input line.
*
- * Special case: if we're reading a zero-column table, we
- * won't yet have called CopyReadAttribute() at all; so do that
- * and check we have an empty line. Fortunately we can keep that
+ * Special case: if we're reading a zero-column table, we won't
+ * yet have called CopyReadAttribute() at all; so do that and
+ * check we have an empty line. Fortunately we can keep that
* silly corner case out of the main line of execution.
*/
if (result == NORMAL_ATTR)
@@ -1431,7 +1436,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
if (result == NORMAL_ATTR || *string != '\0')
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
if (result == END_OF_FILE)
{
/* EOF at start of line: all is well */
@@ -1442,7 +1447,7 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
else
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
/*
@@ -1475,8 +1480,8 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
{
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_in_element,
+ &oid_in_function,
+ oid_in_element,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
@@ -1531,9 +1536,9 @@ CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
prmdata->isnull = (nulls[i] == 'n');
/*
- * Execute the constraint expression. Allow the expression
- * to replace the value (consider e.g. a timestamp precision
- * restriction).
+ * Execute the constraint expression. Allow the
+ * expression to replace the value (consider e.g. a
+ * timestamp precision restriction).
*/
values[i] = ExecEvalExpr(exprstate, econtext,
&isnull, NULL);
@@ -1674,11 +1679,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("literal carriage return found in data"),
- errhint("Use \"\\r\" to represent carriage return.")));
- /* Check for \r\n on first line, _and_ handle \r\n. */
+ errhint("Use \"\\r\" to represent carriage return.")));
+ /* Check for \r\n on first line, _and_ handle \r\n. */
if (copy_lineno == 1 || eol_type == EOL_CRNL)
{
- int c2 = CopyPeekChar();
+ int c2 = CopyPeekChar();
+
if (c2 == '\n')
{
CopyDonePeek(c2, true); /* eat newline */
@@ -1690,9 +1696,13 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
if (eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("literal carriage return found in data"),
+ errmsg("literal carriage return found in data"),
errhint("Use \"\\r\" to represent carriage return.")));
- /* if we got here, it is the first line and we didn't get \n, so put it back */
+
+ /*
+ * if we got here, it is the first line and we didn't
+ * get \n, so put it back
+ */
CopyDonePeek(c2, false);
eol_type = EOL_CR;
}
@@ -1802,12 +1812,12 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
c = CopyGetChar();
if (c == '\n')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker does not match previous newline style")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker does not match previous newline style")));
if (c != '\r')
ereport(ERROR,
- (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("end-of-copy marker corrupt")));
+ (errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
+ errmsg("end-of-copy marker corrupt")));
}
c = CopyGetChar();
if (c != '\r' && c != '\n')
@@ -1816,21 +1826,20 @@ CopyReadAttribute(const char *delim, CopyReadResult *result)
errmsg("end-of-copy marker corrupt")));
if ((eol_type == EOL_NL && c != '\n') ||
(eol_type == EOL_CRNL && c != '\n') ||
- (eol_type == EOL_CR && c != '\r'))
+ (eol_type == EOL_CR && c != '\r'))
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("end-of-copy marker does not match previous newline style")));
+
/*
- * In protocol version 3, we should ignore anything after
- * \. up to the protocol end of copy data. (XXX maybe
- * better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything
+ * after \. up to the protocol end of copy data. (XXX
+ * maybe better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
while (c != EOF)
- {
c = CopyGetChar();
- }
}
*result = END_OF_FILE;
goto copy_eof;
@@ -2045,8 +2054,8 @@ CopyGetAttnums(Relation rel, List *attnamelist)
if (intMember(attnum, attnums))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" specified more than once",
- name)));
+ errmsg("attribute \"%s\" specified more than once",
+ name)));
attnums = lappendi(attnums, attnum);
}
}
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 70678b26b0..547f3fb2f3 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.119 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/dbcommands.c,v 1.120 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -200,7 +200,7 @@ createdb(const CreatedbStmt *stmt)
if (dbpath != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use an alternate location on this platform")));
+ errmsg("cannot use an alternate location on this platform")));
#endif
/*
@@ -260,8 +260,8 @@ createdb(const CreatedbStmt *stmt)
if (DatabaseHasActiveBackends(src_dboid, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("source database \"%s\" is being accessed by other users",
- dbtemplate)));
+ errmsg("source database \"%s\" is being accessed by other users",
+ dbtemplate)));
/* If encoding is defaulted, use source's encoding */
if (encoding < 0)
@@ -345,7 +345,7 @@ createdb(const CreatedbStmt *stmt)
/* Make the symlink, if needed */
if (alt_loc)
{
-#ifdef HAVE_SYMLINK /* already throws error above */
+#ifdef HAVE_SYMLINK /* already throws error above */
if (symlink(alt_loc, nominal_loc) != 0)
#endif
ereport(ERROR,
@@ -450,7 +450,7 @@ dropdb(const char *dbname)
char *nominal_loc;
char dbpath[MAXPGPATH];
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
@@ -503,8 +503,8 @@ dropdb(const char *dbname)
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- dbname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ dbname)));
/*
* Find the database's tuple by OID (should be unique).
@@ -577,10 +577,13 @@ dropdb(const char *dbname)
void
RenameDatabase(const char *oldname, const char *newname)
{
- HeapTuple tup, newtup;
+ HeapTuple tup,
+ newtup;
Relation rel;
- SysScanDesc scan, scan2;
- ScanKeyData key, key2;
+ SysScanDesc scan,
+ scan2;
+ ScanKeyData key,
+ key2;
/*
* Obtain AccessExclusiveLock so that no new session gets started
@@ -610,15 +613,14 @@ RenameDatabase(const char *oldname, const char *newname)
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. Might
- * not be necessary, but it's consistent with other database
- * operations.
+ * Make sure the database does not have active sessions. Might not be
+ * necessary, but it's consistent with other database operations.
*/
if (DatabaseHasActiveBackends(HeapTupleGetOid(tup), false))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
- errmsg("database \"%s\" is being accessed by other users",
- oldname)));
+ errmsg("database \"%s\" is being accessed by other users",
+ oldname)));
/* make sure the new name doesn't exist */
ScanKeyEntryInitialize(&key2, 0, Anum_pg_database_datname,
@@ -651,10 +653,10 @@ RenameDatabase(const char *oldname, const char *newname)
heap_close(rel, NoLock);
/*
- * Force dirty buffers out to disk, so that newly-connecting
- * backends will see the renamed database in pg_database right
- * away. (They'll see an uncommitted tuple, but they don't care;
- * see GetRawDatabaseInfo.)
+ * Force dirty buffers out to disk, so that newly-connecting backends
+ * will see the renamed database in pg_database right away. (They'll
+ * see an uncommitted tuple, but they don't care; see
+ * GetRawDatabaseInfo.)
*/
BufferSync();
}
@@ -671,7 +673,7 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
newtuple;
Relation rel;
ScanKeyData scankey;
- SysScanDesc scan;
+ SysScanDesc scan;
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
@@ -689,9 +691,9 @@ AlterDatabaseSet(AlterDatabaseSetStmt *stmt)
errmsg("database \"%s\" does not exist", stmt->dbname)));
if (!(superuser()
- || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
+ || ((Form_pg_database) GETSTRUCT(tuple))->datdba == GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_DATABASE,
- stmt->dbname);
+ stmt->dbname);
MemSet(repl_repl, ' ', sizeof(repl_repl));
repl_repl[Anum_pg_database_datconfig - 1] = 'r';
@@ -750,7 +752,7 @@ get_db_info(const char *name, Oid *dbIdP, int4 *ownerIdP,
{
Relation relation;
ScanKeyData scanKey;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple tuple;
bool gottuple;
@@ -862,7 +864,7 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
#ifndef ALLOW_ABSOLUTE_DBPATHS
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("absolute paths are not allowed as database locations")));
+ errmsg("absolute paths are not allowed as database locations")));
#endif
prefix = dbpath;
}
@@ -874,8 +876,8 @@ resolve_alt_dbpath(const char *dbpath, Oid dboid)
if (!var)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("postmaster environment variable \"%s\" not found",
- dbpath)));
+ errmsg("postmaster environment variable \"%s\" not found",
+ dbpath)));
if (!is_absolute_path(var))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
@@ -955,7 +957,7 @@ get_database_oid(const char *dbname)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
Oid oid;
@@ -993,7 +995,7 @@ get_database_name(Oid dbid)
{
Relation pg_database;
ScanKeyData entry[1];
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple dbtuple;
char *result;
diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c
index bf0c95a75e..c924dcc7b7 100644
--- a/src/backend/commands/define.c
+++ b/src/backend/commands/define.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.82 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.83 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -51,7 +51,8 @@ case_translate_language_name(const char *input, char *output)
{
int i;
- MemSet(output, 0, NAMEDATALEN); /* ensure result Name is zero-filled */
+ MemSet(output, 0, NAMEDATALEN); /* ensure result Name is
+ * zero-filled */
for (i = 0; i < NAMEDATALEN - 1 && input[i]; ++i)
output[i] = tolower((unsigned char) input[i]);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index bc137b0eac..916c1ff772 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.111 2003/07/20 21:56:32 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.112 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,11 +45,11 @@ typedef struct ExplainState
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
TupOutputState *tstate);
-static double elapsed_time(struct timeval *starttime);
+static double elapsed_time(struct timeval * starttime);
static void explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
- Plan *outer_plan,
- int indent, ExplainState *es);
+ Plan *plan, PlanState * planstate,
+ Plan *outer_plan,
+ int indent, ExplainState *es);
static void show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
int scanrelid, Plan *outer_plan,
StringInfo str, int indent, ExplainState *es);
@@ -58,8 +58,8 @@ static void show_upper_qual(List *qual, const char *qlabel,
const char *inner_name, int inner_varno, Plan *inner_plan,
StringInfo str, int indent, ExplainState *es);
static void show_sort_keys(List *tlist, int nkeys, AttrNumber *keycols,
- const char *qlabel,
- StringInfo str, int indent, ExplainState *es);
+ const char *qlabel,
+ StringInfo str, int indent, ExplainState *es);
static Node *make_ors_ands_explicit(List *orclauses);
/*
@@ -255,8 +255,8 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
}
/*
- * Close down the query and free resources. Include time for this
- * in the total runtime.
+ * Close down the query and free resources. Include time for this in
+ * the total runtime.
*/
gettimeofday(&starttime, NULL);
@@ -282,7 +282,7 @@ ExplainOnePlan(QueryDesc *queryDesc, ExplainStmt *stmt,
/* Compute elapsed time in seconds since given gettimeofday() timestamp */
static double
-elapsed_time(struct timeval *starttime)
+elapsed_time(struct timeval * starttime)
{
struct timeval endtime;
@@ -313,7 +313,7 @@ elapsed_time(struct timeval *starttime)
*/
static void
explain_outNode(StringInfo str,
- Plan *plan, PlanState *planstate,
+ Plan *plan, PlanState * planstate,
Plan *outer_plan,
int indent, ExplainState *es)
{
@@ -542,8 +542,8 @@ explain_outNode(StringInfo str,
/*
* If the expression is still a function call, we can get
* the real name of the function. Otherwise, punt (this
- * can happen if the optimizer simplified away the function
- * call, for example).
+ * can happen if the optimizer simplified away the
+ * function call, for example).
*/
if (rte->funcexpr && IsA(rte->funcexpr, FuncExpr))
{
@@ -583,15 +583,13 @@ explain_outNode(StringInfo str,
double nloops = planstate->instrument->nloops;
appendStringInfo(str, " (actual time=%.2f..%.2f rows=%.0f loops=%.0f)",
- 1000.0 * planstate->instrument->startup / nloops,
- 1000.0 * planstate->instrument->total / nloops,
+ 1000.0 * planstate->instrument->startup / nloops,
+ 1000.0 * planstate->instrument->total / nloops,
planstate->instrument->ntuples / nloops,
planstate->instrument->nloops);
}
else if (es->printAnalyze)
- {
appendStringInfo(str, " (never executed)");
- }
}
appendStringInfoChar(str, '\n');
@@ -709,7 +707,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->initPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -807,7 +805,7 @@ explain_outNode(StringInfo str,
foreach(lst, planstate->subPlan)
{
SubPlanState *sps = (SubPlanState *) lfirst(lst);
- SubPlan *sp = (SubPlan *) sps->xprstate.expr;
+ SubPlan *sp = (SubPlan *) sps->xprstate.expr;
es->rtable = sp->rtable;
for (i = 0; i < indent; i++)
@@ -865,7 +863,7 @@ show_scan_qual(List *qual, bool is_or_qual, const char *qlabel,
*/
if (outer_plan)
{
- Relids varnos = pull_varnos(node);
+ Relids varnos = pull_varnos(node);
if (bms_is_member(OUTER, varnos))
outercontext = deparse_context_for_subplan("outer",
@@ -1037,9 +1035,7 @@ make_ors_ands_explicit(List *orclauses)
FastListInit(&args);
foreach(orptr, orclauses)
- {
FastAppend(&args, make_ands_explicit(lfirst(orptr)));
- }
return (Node *) make_orclause(FastListValue(&args));
}
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 7a6a3775d6..181f52e114 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.31 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/functioncmds.c,v 1.32 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
@@ -80,8 +80,8 @@ compute_return_type(TypeName *returnType, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot return shell type %s",
- TypeNameToString(returnType))));
+ errmsg("SQL function cannot return shell type %s",
+ TypeNameToString(returnType))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -147,8 +147,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (parameterCount >= FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("functions cannot have more than %d arguments",
- FUNC_MAX_ARGS)));
+ errmsg("functions cannot have more than %d arguments",
+ FUNC_MAX_ARGS)));
toid = LookupTypeName(t);
if (OidIsValid(toid))
@@ -159,8 +159,8 @@ compute_parameter_types(List *argTypes, Oid languageOid,
if (languageOid == SQLlanguageId)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("SQL function cannot accept shell type %s",
- TypeNameToString(t))));
+ errmsg("SQL function cannot accept shell type %s",
+ TypeNameToString(t))));
else
ereport(NOTICE,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
@@ -330,8 +330,8 @@ compute_attributes_with_style(List *parameters, bool *isStrict_p, char *volatili
else
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unrecognized function attribute \"%s\" ignored",
- param->defname)));
+ errmsg("unrecognized function attribute \"%s\" ignored",
+ param->defname)));
}
}
@@ -558,7 +558,7 @@ RemoveFunction(RemoveFuncStmt *stmt)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(functionName)),
- errhint("Use DROP AGGREGATE to drop aggregate functions.")));
+ errhint("Use DROP AGGREGATE to drop aggregate functions.")));
if (((Form_pg_proc) GETSTRUCT(tup))->prolang == INTERNALlanguageId)
{
@@ -664,7 +664,7 @@ RenameFunction(List *name, List *argtypes, const char *newname)
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to rename aggregate functions.")));
namespaceOid = procForm->pronamespace;
@@ -728,7 +728,7 @@ SetFunctionReturnType(Oid funcOid, Oid newRetType)
elog(ERROR, "cache lookup failed for function %u", funcOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- if (procForm->prorettype != OPAQUEOID) /* caller messed up */
+ if (procForm->prorettype != OPAQUEOID) /* caller messed up */
elog(ERROR, "function %u doesn't return OPAQUE", funcOid);
/* okay to overwrite copied tuple */
@@ -815,7 +815,7 @@ CreateCast(CreateCastStmt *stmt)
if (sourcetypeid == targettypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("source data type and target data type are the same")));
+ errmsg("source data type and target data type are the same")));
/* No shells, no pseudo-types allowed */
if (!get_typisdefined(sourcetypeid))
@@ -878,10 +878,11 @@ CreateCast(CreateCastStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("return data type of cast function must match target data type")));
+
/*
* Restricting the volatility of a cast function may or may not be
* a good idea in the abstract, but it definitely breaks many old
- * user-defined types. Disable this check --- tgl 2/1/03
+ * user-defined types. Disable this check --- tgl 2/1/03
*/
#ifdef NOT_USED
if (procstruct->provolatile == PROVOLATILE_VOLATILE)
@@ -892,7 +893,7 @@ CreateCast(CreateCastStmt *stmt)
if (procstruct->proisagg)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must not be an aggregate function")));
+ errmsg("cast function must not be an aggregate function")));
if (procstruct->proretset)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -902,12 +903,12 @@ CreateCast(CreateCastStmt *stmt)
}
else
{
- int16 typ1len;
- int16 typ2len;
- bool typ1byval;
- bool typ2byval;
- char typ1align;
- char typ2align;
+ int16 typ1len;
+ int16 typ2len;
+ bool typ1byval;
+ bool typ2byval;
+ char typ1align;
+ char typ2align;
/* indicates binary coercibility */
funcid = InvalidOid;
@@ -924,7 +925,7 @@ CreateCast(CreateCastStmt *stmt)
/*
* Also, insist that the types match as to size, alignment, and
* pass-by-value attributes; this provides at least a crude check
- * that they have similar representations. A pair of types that
+ * that they have similar representations. A pair of types that
* fail this test should certainly not be equated.
*/
get_typlenbyvalalign(sourcetypeid, &typ1len, &typ1byval, &typ1align);
@@ -958,9 +959,9 @@ CreateCast(CreateCastStmt *stmt)
relation = heap_openr(CastRelationName, RowExclusiveLock);
/*
- * Check for duplicate. This is just to give a friendly error message,
- * the unique index would catch it anyway (so no need to sweat about
- * race conditions).
+ * Check for duplicate. This is just to give a friendly error
+ * message, the unique index would catch it anyway (so no need to
+ * sweat about race conditions).
*/
tuple = SearchSysCache(CASTSOURCETARGET,
ObjectIdGetDatum(sourcetypeid),
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 4cd66fd1b5..5e3cec954d 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.103 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/indexcmds.c,v 1.104 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -44,11 +44,11 @@
/* non-export function prototypes */
static void CheckPredicate(List *predList);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
- char *accessMethodName, Oid accessMethodId);
+ char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
/*
@@ -157,8 +157,8 @@ DefineIndex(RangeVar *heapRelation,
if (unique && !accessMethodForm->amcanunique)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("access method \"%s\" does not support UNIQUE indexes",
- accessMethodName)));
+ errmsg("access method \"%s\" does not support UNIQUE indexes",
+ accessMethodName)));
if (numberOfAttributes > 1 && !accessMethodForm->amcanmulticol)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -192,16 +192,16 @@ DefineIndex(RangeVar *heapRelation,
}
/*
- * Check that all of the attributes in a primary key are marked
- * as not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as not
+ * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
if (primary)
{
- List *keys;
+ List *keys;
foreach(keys, attributeList)
{
- IndexElem *key = (IndexElem *) lfirst(keys);
+ IndexElem *key = (IndexElem *) lfirst(keys);
HeapTuple atttuple;
if (!key->name)
@@ -216,15 +216,16 @@ DefineIndex(RangeVar *heapRelation,
atttuple = SearchSysCacheAttName(relationId, key->name);
if (HeapTupleIsValid(atttuple))
{
- if (! ((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/*
* Try to make it NOT NULL.
*
* XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
* to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the notnull
- * constraint either; but this is pretty debatable.
+ * itself doesn't cascade, we don't cascade the
+ * notnull constraint either; but this is pretty
+ * debatable.
*/
AlterTableAlterColumnSetNotNull(relationId, false,
key->name);
@@ -236,8 +237,8 @@ DefineIndex(RangeVar *heapRelation,
/* This shouldn't happen if parser did its job ... */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- key->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ key->name)));
}
}
}
@@ -248,7 +249,7 @@ DefineIndex(RangeVar *heapRelation,
*/
indexInfo = makeNode(IndexInfo);
indexInfo->ii_NumIndexAttrs = numberOfAttributes;
- indexInfo->ii_Expressions = NIL; /* for now */
+ indexInfo->ii_Expressions = NIL; /* for now */
indexInfo->ii_ExpressionsState = NIL;
indexInfo->ii_Predicate = cnfPred;
indexInfo->ii_PredicateState = NIL;
@@ -308,7 +309,7 @@ CheckPredicate(List *predList)
if (contain_mutable_functions((Node *) predList))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("functions in index predicate must be marked IMMUTABLE")));
+ errmsg("functions in index predicate must be marked IMMUTABLE")));
}
static void
@@ -351,7 +352,7 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
else if (attribute->expr && IsA(attribute->expr, Var))
{
/* Tricky tricky, he wrote (column) ... treat as simple attr */
- Var *var = (Var *) attribute->expr;
+ Var *var = (Var *) attribute->expr;
indexInfo->ii_KeyAttrNumbers[attn] = var->varattno;
atttype = get_atttype(relId, var->varattno);
@@ -360,30 +361,30 @@ ComputeIndexAttrs(IndexInfo *indexInfo,
{
/* Index expression */
Assert(attribute->expr != NULL);
- indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
+ indexInfo->ii_KeyAttrNumbers[attn] = 0; /* marks expression */
indexInfo->ii_Expressions = lappend(indexInfo->ii_Expressions,
attribute->expr);
atttype = exprType(attribute->expr);
/*
- * We don't currently support generation of an actual query plan
- * for an index expression, only simple scalar expressions;
- * hence these restrictions.
+ * We don't currently support generation of an actual query
+ * plan for an index expression, only simple scalar
+ * expressions; hence these restrictions.
*/
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use sub-select in index expression")));
+ errmsg("cannot use sub-select in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate in index expression")));
+ errmsg("cannot use aggregate in index expression")));
/*
* A expression using mutable functions is probably wrong,
- * since if you aren't going to get the same result for the same
- * data every time, it's not clear what the index entries mean at
- * all.
+ * since if you aren't going to get the same result for the
+ * same data every time, it's not clear what the index entries
+ * mean at all.
*/
if (contain_mutable_functions(attribute->expr))
ereport(ERROR,
@@ -413,21 +414,20 @@ GetIndexOpClass(List *opclass, Oid attrType,
opInputType;
/*
- * Release 7.0 removed network_ops, timespan_ops, and
- * datetime_ops, so we ignore those opclass names
- * so the default *_ops is used. This can be
- * removed in some later release. bjm 2000/02/07
+ * Release 7.0 removed network_ops, timespan_ops, and datetime_ops, so
+ * we ignore those opclass names so the default *_ops is used. This
+ * can be removed in some later release. bjm 2000/02/07
*
- * Release 7.1 removes lztext_ops, so suppress that too
- * for a while. tgl 2000/07/30
+ * Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
+ * 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops,
- * so suppress that too for awhile. I'm starting to
- * think we need a better approach. tgl 2000/10/01
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach.
+ * tgl 2000/10/01
*/
if (length(opclass) == 1)
{
- char *claname = strVal(lfirst(opclass));
+ char *claname = strVal(lfirst(opclass));
if (strcmp(claname, "network_ops") == 0 ||
strcmp(claname, "timespan_ops") == 0 ||
@@ -499,8 +499,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
if (!IsBinaryCoercible(attrType, opInputType))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("operator class \"%s\" does not accept data type %s",
- NameListToString(opclass), format_type_be(attrType))));
+ errmsg("operator class \"%s\" does not accept data type %s",
+ NameListToString(opclass), format_type_be(attrType))));
ReleaseSysCache(tuple);
@@ -607,7 +607,7 @@ ReindexIndex(RangeVar *indexRelation, bool force /* currently unused */ )
tuple = SearchSysCache(RELOID,
ObjectIdGetDatum(indOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
+ if (!HeapTupleIsValid(tuple)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", indOid);
if (((Form_pg_class) GETSTRUCT(tuple))->relkind != RELKIND_INDEX)
@@ -785,7 +785,8 @@ ReindexDatabase(const char *dbname, bool force, bool all)
for (i = 0; i < relcnt; i++)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
if (reindex_relation(relids[i], force))
ereport(NOTICE,
(errmsg("relation %u was reindexed", relids[i])));
diff --git a/src/backend/commands/opclasscmds.c b/src/backend/commands/opclasscmds.c
index 60b041466f..52792bc31a 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.15 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/opclasscmds.c,v 1.16 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -103,13 +103,13 @@ DefineOpClass(CreateOpClassStmt *stmt)
* Currently, we require superuser privileges to create an opclass.
* This seems necessary because we have no way to validate that the
* offered set of operators and functions are consistent with the AM's
- * expectations. It would be nice to provide such a check someday,
- * if it can be done without solving the halting problem :-(
+ * expectations. It would be nice to provide such a check someday, if
+ * it can be done without solving the halting problem :-(
*/
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create an operator class")));
+ errmsg("must be superuser to create an operator class")));
/* Look up the datatype */
typeoid = typenameTypeId(stmt->datatype);
@@ -157,8 +157,8 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (operators[item->number - 1] != InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- item->number)));
+ errmsg("operator number %d appears more than once",
+ item->number)));
if (item->args != NIL)
{
TypeName *typeName1 = (TypeName *) lfirst(item->args);
@@ -211,7 +211,7 @@ DefineOpClass(CreateOpClassStmt *stmt)
if (OidIsValid(storageoid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("storage type specified more than once")));
+ errmsg("storage type specified more than once")));
storageoid = typenameTypeId(item->storedtype);
break;
default:
@@ -532,7 +532,7 @@ RemoveOpClass(RemoveOpClassStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
opcID = HeapTupleGetOid(tuple);
@@ -681,7 +681,7 @@ RenameOpClass(List *name, const char *access_method, const char *newname)
tup = SearchSysCacheCopy(CLAOID,
ObjectIdGetDatum(opcOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for opclass %u", opcOid);
namespaceOid = ((Form_pg_opclass) GETSTRUCT(tup))->opcnamespace;
diff --git a/src/backend/commands/operatorcmds.c b/src/backend/commands/operatorcmds.c
index 6a4d479c12..ddc088fe2f 100644
--- a/src/backend/commands/operatorcmds.c
+++ b/src/backend/commands/operatorcmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.10 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/operatorcmds.c,v 1.11 2003/08/04 00:43:16 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -103,7 +103,7 @@ DefineOperator(List *names, List *parameters)
if (typeName1->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "rightarg") == 0)
{
@@ -111,7 +111,7 @@ DefineOperator(List *names, List *parameters)
if (typeName2->setof)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("setof type not allowed for operator argument")));
+ errmsg("setof type not allowed for operator argument")));
}
else if (strcasecmp(defel->defname, "procedure") == 0)
functionName = defGetQualifiedName(defel);
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index cf4a063871..aa5a5b9ea6 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -4,17 +4,17 @@
* Utility commands affecting portals (that is, SQL cursor commands)
*
* Note: see also tcop/pquery.c, which implements portal operations for
- * the FE/BE protocol. This module uses pquery.c for some operations.
+ * the FE/BE protocol. This module uses pquery.c for some operations.
* And both modules depend on utils/mmgr/portalmem.c, which controls
* storage management for portals (but doesn't run any queries in them).
- *
+ *
*
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.19 2003/08/01 13:53:36 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/portalcmds.c,v 1.20 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -36,7 +36,7 @@
* Execute SQL DECLARE CURSOR command.
*/
void
-PerformCursorOpen(DeclareCursorStmt *stmt)
+PerformCursorOpen(DeclareCursorStmt * stmt)
{
List *rewritten;
Query *query;
@@ -64,7 +64,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* The query has been through parse analysis, but not rewriting or
* planning as yet. Note that the grammar ensured we have a SELECT
- * query, so we are not expecting rule rewriting to do anything strange.
+ * query, so we are not expecting rule rewriting to do anything
+ * strange.
*/
rewritten = QueryRewrite((Query *) stmt->query);
if (length(rewritten) != 1 || !IsA(lfirst(rewritten), Query))
@@ -86,8 +87,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
plan = planner(query, true, stmt->options);
/*
- * Create a portal and copy the query and plan into its memory context.
- * (If a duplicate cursor name already exists, warn and drop it.)
+ * Create a portal and copy the query and plan into its memory
+ * context. (If a duplicate cursor name already exists, warn and drop
+ * it.)
*/
portal = CreatePortal(stmt->portalname, true, false);
@@ -98,7 +100,7 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
PortalDefineQuery(portal,
NULL, /* unfortunately don't have sourceText */
- "SELECT", /* cursor's query is always a SELECT */
+ "SELECT", /* cursor's query is always a SELECT */
makeList1(query),
makeList1(plan),
PortalGetHeapMemory(portal));
@@ -108,9 +110,9 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
/*
* Set up options for portal.
*
- * If the user didn't specify a SCROLL type, allow or disallow
- * scrolling based on whether it would require any additional
- * runtime overhead to do so.
+ * If the user didn't specify a SCROLL type, allow or disallow scrolling
+ * based on whether it would require any additional runtime overhead
+ * to do so.
*/
portal->cursorOptions = stmt->options;
if (!(portal->cursorOptions & (CURSOR_OPT_SCROLL | CURSOR_OPT_NO_SCROLL)))
@@ -129,8 +131,8 @@ PerformCursorOpen(DeclareCursorStmt *stmt)
Assert(portal->strategy == PORTAL_ONE_SELECT);
/*
- * We're done; the query won't actually be run until PerformPortalFetch
- * is called.
+ * We're done; the query won't actually be run until
+ * PerformPortalFetch is called.
*/
}
@@ -169,7 +171,7 @@ PerformPortalFetch(FetchStmt *stmt,
/* FIXME: shouldn't this be an ERROR? */
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("portal \"%s\" does not exist", stmt->portalname)));
+ errmsg("portal \"%s\" does not exist", stmt->portalname)));
if (completionTag)
strcpy(completionTag, stmt->ismove ? "MOVE 0" : "FETCH 0");
return;
@@ -219,7 +221,7 @@ PerformPortalClose(const char *name)
ereport(WARNING,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("portal \"%s\" does not exist", name),
- errfunction("PerformPortalClose"))); /* for ecpg */
+ errfunction("PerformPortalClose"))); /* for ecpg */
return;
}
@@ -249,7 +251,8 @@ PortalCleanup(Portal portal, bool isError)
/*
* Shut down executor, if still running. We skip this during error
* abort, since other mechanisms will take care of releasing executor
- * resources, and we can't be sure that ExecutorEnd itself wouldn't fail.
+ * resources, and we can't be sure that ExecutorEnd itself wouldn't
+ * fail.
*/
queryDesc = PortalGetQueryDesc(portal);
if (queryDesc)
@@ -271,14 +274,14 @@ PortalCleanup(Portal portal, bool isError)
void
PersistHoldablePortal(Portal portal)
{
- QueryDesc *queryDesc = PortalGetQueryDesc(portal);
+ QueryDesc *queryDesc = PortalGetQueryDesc(portal);
MemoryContext savePortalContext;
MemoryContext saveQueryContext;
MemoryContext oldcxt;
/*
- * If we're preserving a holdable portal, we had better be
- * inside the transaction that originally created it.
+ * If we're preserving a holdable portal, we had better be inside the
+ * transaction that originally created it.
*/
Assert(portal->createXact == GetCurrentTransactionId());
Assert(queryDesc != NULL);
@@ -321,9 +324,8 @@ PersistHoldablePortal(Portal portal)
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in
- * the tuplestore, so that subsequent backward FETCHs can be
- * processed.
+ * Rewind the executor: we need to store the entire result set in the
+ * tuplestore, so that subsequent backward FETCHs can be processed.
*/
ExecutorRewind(queryDesc);
@@ -351,17 +353,17 @@ PersistHoldablePortal(Portal portal)
/*
* Reset the position in the result set: ideally, this could be
* implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we
- * start at the beginning of the tuplestore and iterate through it
- * until we reach where we need to be. FIXME someday?
+ * to be at, but the tuplestore API doesn't support that. So we start
+ * at the beginning of the tuplestore and iterate through it until we
+ * reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (!portal->atEnd)
{
- long store_pos;
+ long store_pos;
- if (portal->posOverflow) /* oops, cannot trust portalPos */
+ if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not reposition held cursor")));
@@ -370,8 +372,8 @@ PersistHoldablePortal(Portal portal)
for (store_pos = 0; store_pos < portal->portalPos; store_pos++)
{
- HeapTuple tup;
- bool should_free;
+ HeapTuple tup;
+ bool should_free;
tup = tuplestore_gettuple(portal->holdStore, true,
&should_free);
@@ -389,8 +391,8 @@ PersistHoldablePortal(Portal portal)
/*
* We can now release any subsidiary memory of the portal's heap
* context; we'll never use it again. The executor already dropped
- * its context, but this will clean up anything that glommed onto
- * the portal's heap via PortalContext.
+ * its context, but this will clean up anything that glommed onto the
+ * portal's heap via PortalContext.
*/
MemoryContextDeleteChildren(PortalGetHeapMemory(portal));
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index cd58d7fc7b..d0fabd1ad3 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -10,7 +10,7 @@
* Copyright (c) 2002-2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.21 2003/07/28 00:09:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/prepare.c,v 1.22 2003/08/04 00:43:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -39,7 +39,7 @@ static HTAB *prepared_queries = NULL;
static void InitQueryHashTable(void);
static ParamListInfo EvaluateParams(EState *estate,
- List *params, List *argtypes);
+ List *params, List *argtypes);
/*
* Implements the 'PREPARE' utility statement.
@@ -90,12 +90,12 @@ PrepareQuery(PrepareStmt *stmt)
/* Rewrite the query. The result could be 0, 1, or many queries. */
query_list = QueryRewrite(stmt->query);
- /* Generate plans for queries. Snapshot is already set. */
+ /* Generate plans for queries. Snapshot is already set. */
plan_list = pg_plan_queries(query_list, false);
/* Save the results. */
StorePreparedStatement(stmt->name,
- NULL, /* text form not available */
+ NULL, /* text form not available */
commandTag,
query_list,
plan_list,
@@ -131,8 +131,8 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, stmt->params, entry->argtype_list);
@@ -144,15 +144,15 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
portal = CreateNewPortal();
/*
- * For CREATE TABLE / AS EXECUTE, make a copy of the stored query
- * so that we can modify its destination (yech, but this has
- * always been ugly). For regular EXECUTE we can just use the
- * stored query where it sits, since the executor is read-only.
+ * For CREATE TABLE / AS EXECUTE, make a copy of the stored query so
+ * that we can modify its destination (yech, but this has always been
+ * ugly). For regular EXECUTE we can just use the stored query where
+ * it sits, since the executor is read-only.
*/
if (stmt->into)
{
MemoryContext oldContext;
- Query *query;
+ Query *query;
oldContext = MemoryContextSwitchTo(PortalGetHeapMemory(portal));
@@ -208,11 +208,11 @@ ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest)
static ParamListInfo
EvaluateParams(EState *estate, List *params, List *argtypes)
{
- int nargs = length(argtypes);
- ParamListInfo paramLI;
- List *exprstates;
- List *l;
- int i = 0;
+ int nargs = length(argtypes);
+ ParamListInfo paramLI;
+ List *exprstates;
+ List *l;
+ int i = 0;
/* Parser should have caught this error, but check for safety */
if (length(params) != nargs)
@@ -229,7 +229,7 @@ EvaluateParams(EState *estate, List *params, List *argtypes)
bool isNull;
paramLI[i].value = ExecEvalExprSwitchContext(n,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull,
NULL);
paramLI[i].kind = PARAM_NUM;
@@ -273,7 +273,7 @@ InitQueryHashTable(void)
* to the hash entry, so the caller can dispose of their copy.
*
* Exception: commandTag is presumed to be a pointer to a constant string,
- * or possibly NULL, so it need not be copied. Note that commandTag should
+ * or possibly NULL, so it need not be copied. Note that commandTag should
* be NULL only if the original query (before rewriting) was empty.
*/
void
@@ -367,9 +367,9 @@ FetchPreparedStatement(const char *stmt_name, bool throwError)
if (prepared_queries)
{
/*
- * We can't just use the statement name as supplied by the user: the
- * hash package is picky enough that it needs to be NULL-padded out to
- * the appropriate length to work correctly.
+ * We can't just use the statement name as supplied by the user:
+ * the hash package is picky enough that it needs to be
+ * NULL-padded out to the appropriate length to work correctly.
*/
MemSet(key, 0, sizeof(key));
strncpy(key, stmt_name, sizeof(key));
@@ -412,9 +412,9 @@ FetchPreparedStatementParams(const char *stmt_name)
* Note: the result is created or copied into current memory context.
*/
TupleDesc
-FetchPreparedStatementResultDesc(PreparedStatement *stmt)
+FetchPreparedStatementResultDesc(PreparedStatement * stmt)
{
- Query *query;
+ Query *query;
switch (ChoosePortalStrategy(stmt->query_list))
{
@@ -476,7 +476,7 @@ DropPreparedStatement(const char *stmt_name, bool showError)
void
ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
{
- ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
+ ExecuteStmt *execstmt = (ExecuteStmt *) stmt->query->utilityStmt;
PreparedStatement *entry;
List *l,
*query_list,
@@ -499,8 +499,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (entry->argtype_list != NIL)
{
/*
- * Need an EState to evaluate parameters; must not delete it
- * till end of query, in case parameters are pass-by-reference.
+ * Need an EState to evaluate parameters; must not delete it till
+ * end of query, in case parameters are pass-by-reference.
*/
estate = CreateExecutorState();
paramLI = EvaluateParams(estate, execstmt->params,
@@ -510,8 +510,8 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
/* Explain each query */
foreach(l, query_list)
{
- Query *query = (Query *) lfirst(l);
- Plan *plan = (Plan *) lfirst(plan_list);
+ Query *query = (Query *) lfirst(l);
+ Plan *plan = (Plan *) lfirst(plan_list);
bool is_last_query;
plan_list = lnext(plan_list);
@@ -533,7 +533,7 @@ ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate)
if (query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("prepared statement is not a SELECT")));
+ errmsg("prepared statement is not a SELECT")));
/* Copy the query so we can modify it */
query = copyObject(query);
diff --git a/src/backend/commands/proclang.c b/src/backend/commands/proclang.c
index 69000b29bc..b0a4702a71 100644
--- a/src/backend/commands/proclang.c
+++ b/src/backend/commands/proclang.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.47 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/proclang.c,v 1.48 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -60,7 +60,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to create procedural language")));
+ errmsg("must be superuser to create procedural language")));
/*
* Translate the language name and check that this language doesn't
@@ -85,7 +85,7 @@ CreateProceduralLanguage(CreatePLangStmt *stmt)
if (funcrettype != LANGUAGE_HANDLEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a handler function declared OPAQUE, change it to
* LANGUAGE_HANDLER.
*/
@@ -183,7 +183,7 @@ DropProceduralLanguage(DropPLangStmt *stmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to drop procedural language")));
+ errmsg("must be superuser to drop procedural language")));
/*
* Translate the language name, check that this language exist and is
@@ -225,7 +225,7 @@ DropProceduralLanguageById(Oid langOid)
langTup = SearchSysCache(LANGOID,
ObjectIdGetDatum(langOid),
0, 0, 0);
- if (!HeapTupleIsValid(langTup)) /* should not happen */
+ if (!HeapTupleIsValid(langTup)) /* should not happen */
elog(ERROR, "cache lookup failed for language %u", langOid);
simple_heap_delete(rel, &langTup->t_self);
@@ -266,7 +266,7 @@ RenameLanguage(const char *oldname, const char *newname)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to rename procedural language")));
+ errmsg("must be superuser to rename procedural language")));
/* rename */
namestrcpy(&(((Form_pg_language) GETSTRUCT(tup))->lanname), newname);
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 5ad81634f4..4eb285daa3 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.14 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/schemacmds.c,v 1.15 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,7 +98,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* Create the schema's namespace */
namespaceId = NamespaceCreate(schemaName, owner_userid);
@@ -215,7 +215,7 @@ RemoveSchemaById(Oid schemaOid)
tup = SearchSysCache(NAMESPACEOID,
ObjectIdGetDatum(schemaOid),
0, 0, 0);
- if (!HeapTupleIsValid(tup)) /* should not happen */
+ if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for namespace %u", schemaOid);
simple_heap_delete(relation, &tup->t_self);
@@ -248,9 +248,9 @@ RenameSchema(const char *oldname, const char *newname)
/* make sure the new name doesn't exist */
if (HeapTupleIsValid(
- SearchSysCache(NAMESPACENAME,
- CStringGetDatum(newname),
- 0, 0, 0)))
+ SearchSysCache(NAMESPACENAME,
+ CStringGetDatum(newname),
+ 0, 0, 0)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_SCHEMA),
errmsg("schema \"%s\" already exists", newname)));
@@ -270,7 +270,7 @@ RenameSchema(const char *oldname, const char *newname)
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system schemas.")));
+ errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/* rename */
namestrcpy(&(((Form_pg_namespace) GETSTRUCT(tup))->nspname), newname);
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 7ce7810fbc..01544a015b 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.99 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.100 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static SeqTable seqtab = NULL; /* Head of list of SeqTable items */
static void init_sequence(RangeVar *relation,
- SeqTable *p_elm, Relation *p_rel);
+ SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, Form_pg_sequence new);
static void do_setval(RangeVar *sequence, int64 next, bool iscalled);
@@ -97,10 +97,10 @@ DefineSequence(CreateSeqStmt *seq)
/* Values are NULL (or false) by default */
new.last_value = 0;
new.increment_by = 0;
- new.max_value = 0;
+ new.max_value = 0;
new.min_value = 0;
new.cache_value = 0;
- new.is_cycled = false;
+ new.is_cycled = false;
/* Check and set values */
init_params(seq->options, &new);
@@ -299,10 +299,10 @@ DefineSequence(CreateSeqStmt *seq)
/*
* AlterSequence
*
- * Modify the defition of a sequence relation
+ * Modify the defition of a sequence relation
*/
void
-AlterSequence(AlterSeqStmt *stmt)
+AlterSequence(AlterSeqStmt * stmt)
{
SeqTable elm;
Relation seqrel;
@@ -324,7 +324,7 @@ AlterSequence(AlterSeqStmt *stmt)
page = BufferGetPage(buf);
new.increment_by = seq->increment_by;
- new.max_value = seq->max_value;
+ new.max_value = seq->max_value;
new.min_value = seq->min_value;
new.cache_value = seq->cache_value;
new.is_cycled = seq->is_cycled;
@@ -346,9 +346,9 @@ AlterSequence(AlterSeqStmt *stmt)
}
/* save info in local cache */
- elm->last = new.last_value; /* last returned number */
- elm->cached = new.last_value; /* last cached number (forget cached
- * values) */
+ elm->last = new.last_value; /* last returned number */
+ elm->cached = new.last_value; /* last cached number (forget
+ * cached values) */
START_CRIT_SECTION();
@@ -494,9 +494,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, maxv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MAXVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MAXVALUE (%s)",
+ sequence->relname, buf)));
}
next = minv;
}
@@ -517,9 +517,9 @@ nextval(PG_FUNCTION_ARGS)
snprintf(buf, sizeof(buf), INT64_FORMAT, minv);
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("%s.nextval: reached MINVALUE (%s)",
- sequence->relname, buf)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("%s.nextval: reached MINVALUE (%s)",
+ sequence->relname, buf)));
}
next = maxv;
}
@@ -895,9 +895,9 @@ init_params(List *options, Form_pg_sequence new)
errmsg("conflicting or redundant options")));
increment_by = defel;
}
+
/*
- * start is for a new sequence
- * restart is for alter
+ * start is for a new sequence restart is for alter
*/
else if (strcmp(defel->defname, "start") == 0 ||
strcmp(defel->defname, "restart") == 0)
@@ -963,9 +963,9 @@ init_params(List *options, Form_pg_sequence new)
|| (max_value != (DefElem *) NULL && !max_value->arg))
{
if (new->increment_by > 0)
- new->max_value = SEQ_MAXVALUE; /* ascending seq */
+ new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
else if (max_value != (DefElem *) NULL)
new->max_value = defGetInt64(max_value);
@@ -975,9 +975,9 @@ init_params(List *options, Form_pg_sequence new)
|| (min_value != (DefElem *) NULL && !min_value->arg))
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
- new->min_value = SEQ_MINVALUE; /* descending seq */
+ new->min_value = SEQ_MINVALUE; /* descending seq */
}
else if (min_value != (DefElem *) NULL)
new->min_value = defGetInt64(min_value);
@@ -996,7 +996,7 @@ init_params(List *options, Form_pg_sequence new)
}
/* START WITH */
- if (new->last_value == 0 && last_value == (DefElem *) NULL)
+ if (new->last_value == 0 && last_value == (DefElem *) NULL)
{
if (new->increment_by > 0)
new->last_value = new->min_value; /* ascending seq */
@@ -1015,8 +1015,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->min_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be less than MINVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be less than MINVALUE (%s)",
+ bufs, bufm)));
}
if (new->last_value > new->max_value)
{
@@ -1027,8 +1027,8 @@ init_params(List *options, Form_pg_sequence new)
snprintf(bufm, sizeof(bufm), INT64_FORMAT, new->max_value);
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
- bufs, bufm)));
+ errmsg("START value (%s) can't be greater than MAXVALUE (%s)",
+ bufs, bufm)));
}
/* CACHE */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index b3108053d9..6e503fdac5 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.76 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/tablecmds.c,v 1.77 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -57,18 +57,19 @@
*/
typedef struct OnCommitItem
{
- Oid relid; /* relid of relation */
- OnCommitAction oncommit; /* what to do at end of xact */
+ Oid relid; /* relid of relation */
+ OnCommitAction oncommit; /* what to do at end of xact */
/*
* If this entry was created during this xact, it should be deleted at
* xact abort. Conversely, if this entry was deleted during this
* xact, it should be removed at xact commit. We leave deleted
- * entries in the list until commit so that we can roll back if needed.
+ * entries in the list until commit so that we can roll back if
+ * needed.
*/
bool created_in_cur_xact;
bool deleted_in_cur_xact;
-} OnCommitItem;
+} OnCommitItem;
static List *on_commits = NIL;
@@ -82,14 +83,14 @@ static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static bool needs_toast_table(Relation rel);
static void AlterTableAddCheckConstraint(Relation rel, Constraint *constr);
static void AlterTableAddForeignKeyConstraint(Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static int transformColumnNameList(Oid relId, List *colList,
- int16 *attnums, Oid *atttypids);
+ int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids);
-static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids);
+static Oid transformFkeyCheckAttrs(Relation pkrel,
+ int numattrs, int16 *attnums);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
@@ -206,8 +207,8 @@ DefineRelation(CreateStmt *stmt, char relkind)
if (strcmp(check[i].ccname, cdef->name) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("duplicate CHECK constraint name \"%s\"",
- cdef->name)));
+ errmsg("duplicate CHECK constraint name \"%s\"",
+ cdef->name)));
}
check[ncheck].ccname = cdef->name;
}
@@ -399,7 +400,7 @@ TruncateRelation(const RangeVar *relation)
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temp tables of other processes")));
+ errmsg("cannot truncate temp tables of other processes")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
@@ -435,8 +436,8 @@ TruncateRelation(const RangeVar *relation)
heap_close(fkeyRel, AccessShareLock);
/*
- * Do the real work using the same technique as cluster, but
- * without the data-copying portion
+ * Do the real work using the same technique as cluster, but without
+ * the data-copying portion
*/
rebuild_relation(rel, InvalidOid);
@@ -570,8 +571,8 @@ MergeAttributes(List *schema, List *supers, bool istemp,
if (!istemp && isTempNamespace(RelationGetNamespace(relation)))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("cannot inherit from temporary relation \"%s\"",
- parent->relname)));
+ errmsg("cannot inherit from temporary relation \"%s\"",
+ parent->relname)));
/*
* We should have an UNDER permission flag for this, but for now,
@@ -652,7 +653,7 @@ MergeAttributes(List *schema, List *supers, bool istemp,
attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- format_type_be(attribute->atttypid))));
+ format_type_be(attribute->atttypid))));
def->inhcount++;
/* Merge of NOT NULL constraints = OR 'em together */
def->is_not_null |= attribute->attnotnull;
@@ -803,11 +804,11 @@ MergeAttributes(List *schema, List *supers, bool istemp,
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("attribute \"%s\" has a type conflict",
- attributeName),
+ errmsg("attribute \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
- TypeNameToString(newdef->typename))));
+ TypeNameToString(newdef->typename))));
/* Mark the column as locally defined */
def->is_local = true;
/* Merge of NOT NULL constraints = OR 'em together */
@@ -1230,8 +1231,8 @@ renameatt(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- newattname, RelationGetRelationName(targetrelation))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
@@ -1257,7 +1258,7 @@ renameatt(Oid myrelid,
/*
* Scan through index columns to see if there's any simple index
- * entries for this attribute. We ignore expressional entries.
+ * entries for this attribute. We ignore expressional entries.
*/
indextup = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(indexoid),
@@ -1270,6 +1271,7 @@ renameatt(Oid myrelid,
{
if (attnum != indexform->indkey[i])
continue;
+
/*
* Found one, rename it.
*/
@@ -1279,6 +1281,7 @@ renameatt(Oid myrelid,
0, 0);
if (!HeapTupleIsValid(atttup))
continue; /* should we raise an error? */
+
/*
* Update the (copied) attribute tuple.
*/
@@ -1366,7 +1369,7 @@ renamerel(Oid myrelid, const char *newrelname)
reltup = SearchSysCacheCopy(RELOID,
PointerGetDatum(myrelid),
0, 0, 0);
- if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
+ if (!HeapTupleIsValid(reltup)) /* shouldn't happen */
elog(ERROR, "cache lookup failed for relation %u", myrelid);
if (get_relname_relid(newrelname, namespaceId) != InvalidOid)
@@ -1743,7 +1746,7 @@ AlterTableAddColumn(Oid myrelid,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- get_rel_name(childrelid), colDef->colname)));
+ get_rel_name(childrelid), colDef->colname)));
/*
* XXX if we supported NOT NULL or defaults, would need to do
@@ -1782,7 +1785,7 @@ AlterTableAddColumn(Oid myrelid,
if (find_inheritance_children(myrelid) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute must be added to child tables too")));
+ errmsg("attribute must be added to child tables too")));
}
/*
@@ -1801,14 +1804,14 @@ AlterTableAddColumn(Oid myrelid,
if (colDef->raw_default || colDef->cooked_default)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("adding columns with defaults is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
+ errmsg("adding columns with defaults is not implemented"),
+ errhint("Add the column, then use ALTER TABLE SET DEFAULT.")));
if (colDef->is_not_null)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("adding NOT NULL columns is not implemented"),
- errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
+ errhint("Add the column, then use ALTER TABLE SET NOT NULL.")));
pgclass = heap_openr(RelationRelationName, RowExclusiveLock);
@@ -1829,8 +1832,8 @@ AlterTableAddColumn(Oid myrelid,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
@@ -2014,8 +2017,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2057,8 +2060,8 @@ AlterTableAlterColumnDropNotNull(Oid myrelid, bool recurse,
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("attribute \"%s\" is in a primary key",
- colName)));
+ errmsg("attribute \"%s\" is in a primary key",
+ colName)));
}
}
@@ -2158,8 +2161,8 @@ AlterTableAlterColumnSetNotNull(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2286,8 +2289,8 @@ AlterTableAlterColumnDefault(Oid myrelid, bool recurse,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum < 0)
@@ -2450,8 +2453,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum < 0)
@@ -2476,8 +2479,8 @@ AlterTableAlterColumnFlags(Oid myrelid, bool recurse,
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("column datatype %s can only have storage \"plain\"",
- format_type_be(attrtuple->atttypid))));
+ errmsg("column datatype %s can only have storage \"plain\"",
+ format_type_be(attrtuple->atttypid))));
}
simple_heap_update(attrelation, &tuple->t_self, tuple);
@@ -2573,7 +2576,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
(errmsg("table \"%s\" is already WITHOUT OIDS",
RelationGetRelationName(rel))));
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
return;
}
@@ -2601,8 +2604,8 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
attrel = heap_open(RelOid_pg_attribute, RowExclusiveLock);
/*
- * Oids are being removed from the relation, so we need
- * to remove the oid pg_attribute record relating.
+ * Oids are being removed from the relation, so we need to remove
+ * the oid pg_attribute record relating.
*/
atttup = SearchSysCache(ATTNUM,
ObjectIdGetDatum(myrelid),
@@ -2621,7 +2624,7 @@ AlterTableAlterOids(Oid myrelid, bool recurse, bool setOid)
heap_close(class_rel, RowExclusiveLock);
- heap_close(rel, NoLock); /* close rel, but keep lock! */
+ heap_close(rel, NoLock); /* close rel, but keep lock! */
}
/*
@@ -2663,8 +2666,8 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("attribute \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("attribute \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Can't drop a system attribute */
/* XXX perhaps someday allow dropping OID? */
@@ -2712,7 +2715,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
childatt->attinhcount--;
@@ -2731,9 +2734,9 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
}
/*
- * Propagate to children if desired. Unlike most other ALTER routines,
- * we have to do this one level of recursion at a time; we can't use
- * find_all_inheritors to do it in one pass.
+ * Propagate to children if desired. Unlike most other ALTER
+ * routines, we have to do this one level of recursion at a time; we
+ * can't use find_all_inheritors to do it in one pass.
*/
if (recurse)
{
@@ -2763,7 +2766,7 @@ AlterTableDropColumn(Oid myrelid, bool recurse, bool recursing,
colName, childrelid);
childatt = (Form_pg_attribute) GETSTRUCT(tuple);
- if (childatt->attinhcount <= 0) /* shouldn't happen */
+ if (childatt->attinhcount <= 0) /* shouldn't happen */
elog(ERROR, "relation %u has non-inherited attribute \"%s\"",
childrelid, colName);
@@ -2882,18 +2885,18 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetNamespace(rel),
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
constr->name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
&counter);
/*
@@ -2923,14 +2926,14 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
if (fkconstraint->constr_name)
{
if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
+ RelationGetRelid(rel),
RelationGetNamespace(rel),
fkconstraint->constr_name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("constraint \"%s\" for relation \"%s\" already exists",
fkconstraint->constr_name,
- RelationGetRelationName(rel))));
+ RelationGetRelationName(rel))));
}
else
fkconstraint->constr_name = GenerateConstraintName(CONSTRAINT_RELATION,
@@ -2959,7 +2962,7 @@ AlterTableAddConstraint(Oid myrelid, bool recurse,
/*
* Add a check constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -2979,13 +2982,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
Node *expr;
/*
- * We need to make a parse state and range
- * table to allow us to do transformExpr()
+ * We need to make a parse state and range table to allow us to do
+ * transformExpr()
*/
pstate = make_parsestate(NULL);
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, true, true);
@@ -3006,8 +3009,8 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (length(pstate->p_rtable) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("CHECK constraint may only reference relation \"%s\"",
- RelationGetRelationName(rel))));
+ errmsg("CHECK constraint may only reference relation \"%s\"",
+ RelationGetRelationName(rel))));
/*
* No subplans or aggregates, either...
@@ -3070,15 +3073,13 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
if (!successful)
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
- errmsg("CHECK constraint \"%s\" is violated at some row(s)",
- constr->name)));
+ errmsg("CHECK constraint \"%s\" is violated at some row(s)",
+ constr->name)));
/*
- * Call AddRelationRawConstraints to do
- * the real adding -- It duplicates some
- * of the above, but does not check the
- * validity of the constraint against
- * tuples already in the table.
+ * Call AddRelationRawConstraints to do the real adding -- It
+ * duplicates some of the above, but does not check the validity of
+ * the constraint against tuples already in the table.
*/
AddRelationRawConstraints(rel, NIL, makeList1(constr));
}
@@ -3086,7 +3087,7 @@ AlterTableAddCheckConstraint(Relation rel, Constraint *constr)
/*
* Add a foreign-key constraint to a single table
*
- * Subroutine for AlterTableAddConstraint. Must already hold exclusive
+ * Subroutine for AlterTableAddConstraint. Must already hold exclusive
* lock on the rel, and have done appropriate validity/permissions checks
* for it.
*/
@@ -3106,12 +3107,11 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
Oid constrOid;
/*
- * Grab an exclusive lock on the pk table, so that
- * someone doesn't delete rows out from under us.
- * (Although a lesser lock would do for that purpose,
- * we'll need exclusive lock anyway to add triggers to
- * the pk table; trying to start with a lesser lock
- * will just create a risk of deadlock.)
+ * Grab an exclusive lock on the pk table, so that someone doesn't
+ * delete rows out from under us. (Although a lesser lock would do for
+ * that purpose, we'll need exclusive lock anyway to add triggers to
+ * the pk table; trying to start with a lesser lock will just create a
+ * risk of deadlock.)
*/
pkrel = heap_openrv(fkconstraint->pktable, AccessExclusiveLock);
@@ -3152,8 +3152,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
errmsg("cannot reference temporary table from permanent table constraint")));
/*
- * Look up the referencing attributes to make sure they
- * exist, and record their attnums and type OIDs.
+ * Look up the referencing attributes to make sure they exist, and
+ * record their attnums and type OIDs.
*/
for (i = 0; i < INDEX_MAX_KEYS; i++)
{
@@ -3166,10 +3166,10 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkattnum, fktypoid);
/*
- * If the attribute list for the referenced table was omitted,
- * lookup the definition of the primary key and use it. Otherwise,
- * validate the supplied attribute list. In either case, discover
- * the index OID and the attnums and type OIDs of the attributes.
+ * If the attribute list for the referenced table was omitted, lookup
+ * the definition of the primary key and use it. Otherwise, validate
+ * the supplied attribute list. In either case, discover the index
+ * OID and the attnums and type OIDs of the attributes.
*/
if (fkconstraint->pk_attrs == NIL)
{
@@ -3208,8 +3208,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
}
/*
- * Check that the constraint is satisfied by existing
- * rows (we can skip this during table creation).
+ * Check that the constraint is satisfied by existing rows (we can
+ * skip this during table creation).
*/
if (!fkconstraint->skip_validation)
validateForeignKeyConstraint(fkconstraint, rel, pkrel);
@@ -3225,7 +3225,8 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
RelationGetRelid(rel),
fkattnum,
numfks,
- InvalidOid, /* not a domain constraint */
+ InvalidOid, /* not a domain
+ * constraint */
RelationGetRelid(pkrel),
pkattnum,
numpks,
@@ -3233,7 +3234,7 @@ AlterTableAddForeignKeyConstraint(Relation rel, FkConstraint *fkconstraint)
fkconstraint->fk_del_action,
fkconstraint->fk_matchtype,
indexOid,
- NULL, /* no check constraint */
+ NULL, /* no check constraint */
NULL,
NULL);
@@ -3276,8 +3277,8 @@ transformColumnNameList(Oid relId, List *colList,
if (attnum >= INDEX_MAX_KEYS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_COLUMNS),
- errmsg("cannot have more than %d keys in a foreign key",
- INDEX_MAX_KEYS)));
+ errmsg("cannot have more than %d keys in a foreign key",
+ INDEX_MAX_KEYS)));
attnums[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->attnum;
atttypids[attnum] = ((Form_pg_attribute) GETSTRUCT(atttuple))->atttypid;
ReleaseSysCache(atttuple);
@@ -3291,7 +3292,7 @@ transformColumnNameList(Oid relId, List *colList,
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Used when the column list in the REFERENCES specification
+ * for the pkrel. Used when the column list in the REFERENCES specification
* is omitted.
*/
static int
@@ -3339,12 +3340,12 @@ transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
if (indexStruct == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ errmsg("there is no PRIMARY KEY for referenced table \"%s\"",
+ RelationGetRelationName(pkrel))));
/*
- * Now build the list of PK attributes from the indkey definition
- * (we assume a primary key cannot have expressional elements)
+ * Now build the list of PK attributes from the indkey definition (we
+ * assume a primary key cannot have expressional elements)
*/
*attnamelist = NIL;
for (i = 0; i < indexStruct->indnatts; i++)
@@ -3389,7 +3390,8 @@ transformFkeyCheckAttrs(Relation pkrel,
{
HeapTuple indexTuple;
Form_pg_index indexStruct;
- int i, j;
+ int i,
+ j;
indexoid = lfirsto(indexoidscan);
indexTuple = SearchSysCache(INDEXRELID,
@@ -3453,7 +3455,7 @@ transformFkeyCheckAttrs(Relation pkrel,
ereport(ERROR,
(errcode(ERRCODE_INVALID_FOREIGN_KEY),
errmsg("there is no UNIQUE constraint matching given keys for referenced table \"%s\"",
- RelationGetRelationName(pkrel))));
+ RelationGetRelationName(pkrel))));
freeList(indexoidlist);
@@ -3969,17 +3971,17 @@ AlterTableOwner(Oid relationOid, int32 newOwnerSysId)
void
AlterTableClusterOn(Oid relOid, const char *indexName)
{
- Relation rel,
- pg_index;
- List *index;
- Oid indexOid;
- HeapTuple indexTuple;
- Form_pg_index indexForm;
-
+ Relation rel,
+ pg_index;
+ List *index;
+ Oid indexOid;
+ HeapTuple indexTuple;
+ Form_pg_index indexForm;
+
rel = heap_open(relOid, AccessExclusiveLock);
indexOid = get_relname_relid(indexName, rel->rd_rel->relnamespace);
-
+
if (!OidIsValid(indexOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -3994,36 +3996,37 @@ AlterTableClusterOn(Oid relOid, const char *indexName)
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
/*
- * If this is the same index the relation was previously
- * clustered on, no need to do anything.
+ * If this is the same index the relation was previously clustered on,
+ * no need to do anything.
*/
if (indexForm->indisclustered)
{
ereport(NOTICE,
- (errmsg("table \"%s\" is already being clustered on index \"%s\"",
- NameStr(rel->rd_rel->relname), indexName)));
+ (errmsg("table \"%s\" is already being clustered on index \"%s\"",
+ NameStr(rel->rd_rel->relname), indexName)));
ReleaseSysCache(indexTuple);
heap_close(rel, NoLock);
return;
}
pg_index = heap_openr(IndexRelationName, RowExclusiveLock);
-
+
/*
* Now check each index in the relation and set the bit where needed.
*/
- foreach (index, RelationGetIndexList(rel))
+ foreach(index, RelationGetIndexList(rel))
{
- HeapTuple idxtuple;
- Form_pg_index idxForm;
-
+ HeapTuple idxtuple;
+ Form_pg_index idxForm;
+
indexOid = lfirsto(index);
idxtuple = SearchSysCacheCopy(INDEXRELID,
- ObjectIdGetDatum(indexOid),
+ ObjectIdGetDatum(indexOid),
0, 0, 0);
if (!HeapTupleIsValid(idxtuple))
elog(ERROR, "cache lookup failed for index %u", indexOid);
idxForm = (Form_pg_index) GETSTRUCT(idxtuple);
+
/*
* Unset the bit if set. We know it's wrong because we checked
* this earlier.
@@ -4100,7 +4103,7 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared relations cannot be toasted after initdb")));
+ errmsg("shared relations cannot be toasted after initdb")));
/*
* Is it already toasted?
@@ -4331,12 +4334,12 @@ needs_toast_table(Relation rel)
void
register_on_commit_action(Oid relid, OnCommitAction action)
{
- OnCommitItem *oc;
+ OnCommitItem *oc;
MemoryContext oldcxt;
/*
- * We needn't bother registering the relation unless there is an ON COMMIT
- * action we need to take.
+ * We needn't bother registering the relation unless there is an ON
+ * COMMIT action we need to take.
*/
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
@@ -4366,7 +4369,7 @@ remove_on_commit_action(Oid relid)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (oc->relid == relid)
{
@@ -4389,7 +4392,7 @@ PreCommit_on_commit_actions(void)
foreach(l, on_commits)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
/* Ignore entry if already dropped in this xact */
if (oc->deleted_in_cur_xact)
@@ -4403,23 +4406,25 @@ PreCommit_on_commit_actions(void)
break;
case ONCOMMIT_DELETE_ROWS:
heap_truncate(oc->relid);
- CommandCounterIncrement(); /* XXX needed? */
+ CommandCounterIncrement(); /* XXX needed? */
break;
case ONCOMMIT_DROP:
- {
- ObjectAddress object;
+ {
+ ObjectAddress object;
- object.classId = RelOid_pg_class;
- object.objectId = oc->relid;
- object.objectSubId = 0;
- performDeletion(&object, DROP_CASCADE);
- /*
- * Note that table deletion will call remove_on_commit_action,
- * so the entry should get marked as deleted.
- */
- Assert(oc->deleted_in_cur_xact);
- break;
- }
+ object.classId = RelOid_pg_class;
+ object.objectId = oc->relid;
+ object.objectSubId = 0;
+ performDeletion(&object, DROP_CASCADE);
+
+ /*
+ * Note that table deletion will call
+ * remove_on_commit_action, so the entry should get
+ * marked as deleted.
+ */
+ Assert(oc->deleted_in_cur_xact);
+ break;
+ }
}
}
}
@@ -4442,7 +4447,7 @@ AtEOXact_on_commit_actions(bool isCommit)
l = on_commits;
while (l != NIL)
{
- OnCommitItem *oc = (OnCommitItem *) lfirst(l);
+ OnCommitItem *oc = (OnCommitItem *) lfirst(l);
if (isCommit ? oc->deleted_in_cur_xact :
oc->created_in_cur_xact)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 6e5b38804f..d3e969c7e4 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.153 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.154 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,17 +41,17 @@
static void InsertTrigger(TriggerDesc *trigdesc, Trigger *trigger, int indx);
static HeapTuple GetTupleForTrigger(EState *estate,
- ResultRelInfo *relinfo,
- ItemPointer tid,
- CommandId cid,
- TupleTableSlot **newSlot);
+ ResultRelInfo *relinfo,
+ ItemPointer tid,
+ CommandId cid,
+ TupleTableSlot **newSlot);
static HeapTuple ExecCallTriggerFunc(TriggerData *trigdata,
FmgrInfo *finfo,
MemoryContext per_tuple_context);
static void DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event,
- bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
+ bool row_trigger, HeapTuple oldtup, HeapTuple newtup);
static void DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context);
@@ -97,18 +97,19 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
else if (stmt->isconstraint)
{
/*
- * If this trigger is a constraint (and a foreign key one)
- * then we really need a constrrelid. Since we don't have one,
- * we'll try to generate one from the argument information.
+ * If this trigger is a constraint (and a foreign key one) then we
+ * really need a constrrelid. Since we don't have one, we'll try
+ * to generate one from the argument information.
*
- * This is really just a workaround for a long-ago pg_dump bug
- * that omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
- * commands. We don't want to bomb out completely here if we can't
- * determine the correct relation, because that would prevent loading
- * the dump file. Instead, NOTICE here and ERROR in the trigger.
+ * This is really just a workaround for a long-ago pg_dump bug that
+ * omitted the FROM clause in dumped CREATE CONSTRAINT TRIGGER
+ * commands. We don't want to bomb out completely here if we
+ * can't determine the correct relation, because that would
+ * prevent loading the dump file. Instead, NOTICE here and ERROR
+ * in the trigger.
*/
- bool needconstrrelid = false;
- void *elem = NULL;
+ bool needconstrrelid = false;
+ void *elem = NULL;
if (strncmp(strVal(llast(stmt->funcname)), "RI_FKey_check_", 14) == 0)
{
@@ -265,8 +266,8 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (namestrcmp(&(pg_trigger->tgname), trigname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- trigname, stmt->relation->relname)));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ trigname, stmt->relation->relname)));
found++;
}
systable_endscan(tgscan);
@@ -280,7 +281,7 @@ CreateTrigger(CreateTrigStmt *stmt, bool forConstraint)
if (funcrettype != TRIGGEROID)
{
/*
- * We allow OPAQUE just so we can load old dump files. When we
+ * We allow OPAQUE just so we can load old dump files. When we
* see a trigger function declared OPAQUE, change it to TRIGGER.
*/
if (funcrettype == OPAQUEOID)
@@ -480,8 +481,8 @@ DropTrigger(Oid relid, const char *trigname, DropBehavior behavior)
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
@@ -576,7 +577,7 @@ RemoveTriggerById(Oid trigOid)
elog(ERROR, "cache lookup failed for relation %u", relid);
classForm = (Form_pg_class) GETSTRUCT(tuple);
- if (classForm->reltriggers == 0) /* should not happen */
+ if (classForm->reltriggers == 0) /* should not happen */
elog(ERROR, "relation \"%s\" has reltriggers = 0",
RelationGetRelationName(rel));
classForm->reltriggers--;
@@ -650,8 +651,8 @@ renametrig(Oid relid,
if (HeapTupleIsValid(tuple = systable_getnext(tgscan)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" already exists",
- newname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" already exists",
+ newname, RelationGetRelationName(targetrel))));
systable_endscan(tgscan);
/*
@@ -693,8 +694,8 @@ renametrig(Oid relid,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for relation \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for relation \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
@@ -762,7 +763,7 @@ RelationBuildTriggers(Relation relation)
build->tgoid = HeapTupleGetOid(htup);
build->tgname = DatumGetCString(DirectFunctionCall1(nameout,
- NameGetDatum(&pg_trigger->tgname)));
+ NameGetDatum(&pg_trigger->tgname)));
build->tgfoid = pg_trigger->tgfoid;
build->tgtype = pg_trigger->tgtype;
build->tgenabled = pg_trigger->tgenabled;
@@ -927,8 +928,8 @@ CopyTriggerDesc(TriggerDesc *trigdesc)
trigger->tgname = pstrdup(trigger->tgname);
if (trigger->tgnargs > 0)
{
- char **newargs;
- int16 j;
+ char **newargs;
+ int16 j;
newargs = (char **) palloc(trigger->tgnargs * sizeof(char *));
for (j = 0; j < trigger->tgnargs; j++)
@@ -1101,7 +1102,7 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
return false;
return true;
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* Call a trigger function.
@@ -1166,10 +1167,10 @@ ExecCallTriggerFunc(TriggerData *trigdata,
void
ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1190,10 +1191,10 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1209,7 +1210,7 @@ ExecBSInsertTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1242,8 +1243,8 @@ ExecBRInsertTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_INSERT |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1279,10 +1280,10 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1303,10 +1304,10 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1322,7 +1323,7 @@ ExecBSDeleteTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1361,8 +1362,8 @@ ExecBRDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_DELETE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
LocTriggerData.tg_newtuple = NULL;
for (i = 0; i < ntrigs; i++)
@@ -1408,10 +1409,10 @@ ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo,
void
ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
{
- TriggerDesc *trigdesc;
- int ntrigs;
- int *tgindx;
- int i;
+ TriggerDesc *trigdesc;
+ int ntrigs;
+ int *tgindx;
+ int i;
TriggerData LocTriggerData;
trigdesc = relinfo->ri_TrigDesc;
@@ -1432,10 +1433,10 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_BEFORE;
- LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
- LocTriggerData.tg_newtuple = NULL;
- LocTriggerData.tg_trigtuple = NULL;
+ TRIGGER_EVENT_BEFORE;
+ LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
+ LocTriggerData.tg_newtuple = NULL;
+ LocTriggerData.tg_trigtuple = NULL;
for (i = 0; i < ntrigs; i++)
{
Trigger *trigger = &trigdesc->triggers[tgindx[i]];
@@ -1451,7 +1452,7 @@ ExecBSUpdateTriggers(EState *estate, ResultRelInfo *relinfo)
if (newtuple)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("BEFORE STATEMENT trigger cannot return a value")));
+ errmsg("BEFORE STATEMENT trigger cannot return a value")));
}
}
@@ -1498,8 +1499,8 @@ ExecBRUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = TRIGGER_EVENT_UPDATE |
- TRIGGER_EVENT_ROW |
- TRIGGER_EVENT_BEFORE;
+ TRIGGER_EVENT_ROW |
+ TRIGGER_EVENT_BEFORE;
LocTriggerData.tg_relation = relinfo->ri_RelationDesc;
for (i = 0; i < ntrigs; i++)
{
@@ -1639,19 +1640,20 @@ ltrmark:;
* ----------
*/
-typedef struct DeferredTriggersData {
- /* Internal data is held in a per-transaction memory context */
- MemoryContext deftrig_cxt;
- /* ALL DEFERRED or ALL IMMEDIATE */
- bool deftrig_all_isset;
- bool deftrig_all_isdeferred;
- /* Per trigger state */
- List *deftrig_trigstates;
- /* List of pending deferred triggers. Previous comment below */
- DeferredTriggerEvent deftrig_events;
- DeferredTriggerEvent deftrig_events_imm;
- DeferredTriggerEvent deftrig_event_tail;
-} DeferredTriggersData;
+typedef struct DeferredTriggersData
+{
+ /* Internal data is held in a per-transaction memory context */
+ MemoryContext deftrig_cxt;
+ /* ALL DEFERRED or ALL IMMEDIATE */
+ bool deftrig_all_isset;
+ bool deftrig_all_isdeferred;
+ /* Per trigger state */
+ List *deftrig_trigstates;
+ /* List of pending deferred triggers. Previous comment below */
+ DeferredTriggerEvent deftrig_events;
+ DeferredTriggerEvent deftrig_events_imm;
+ DeferredTriggerEvent deftrig_event_tail;
+} DeferredTriggersData;
/* ----------
* deftrig_events, deftrig_event_tail:
@@ -1661,8 +1663,8 @@ typedef struct DeferredTriggersData {
* Because this can grow pretty large, we don't use separate List nodes,
* but instead thread the list through the dte_next fields of the member
* nodes. Saves just a few bytes per entry, but that adds up.
- *
- * deftrig_events_imm holds the tail pointer as of the last
+ *
+ * deftrig_events_imm holds the tail pointer as of the last
* deferredTriggerInvokeEvents call; we can use this to avoid rescanning
* entries unnecessarily. It is NULL if deferredTriggerInvokeEvents
* hasn't run since the last state change.
@@ -1674,7 +1676,7 @@ typedef struct DeferredTriggersData {
typedef DeferredTriggersData *DeferredTriggers;
-static DeferredTriggers deferredTriggers;
+static DeferredTriggers deferredTriggers;
/* ----------
* deferredTriggerCheckState()
@@ -1783,7 +1785,7 @@ deferredTriggerAddEvent(DeferredTriggerEvent event)
*/
static void
DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
- Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
+ Relation rel, TriggerDesc *trigdesc, FmgrInfo *finfo,
MemoryContext per_tuple_context)
{
Oid tgoid = event->dte_item[itemno].dti_tgoid;
@@ -1817,7 +1819,7 @@ DeferredTriggerExecute(DeferredTriggerEvent event, int itemno,
*/
LocTriggerData.type = T_TriggerData;
LocTriggerData.tg_event = (event->dte_event & TRIGGER_EVENT_OPMASK) |
- (event->dte_event & TRIGGER_EVENT_ROW);
+ (event->dte_event & TRIGGER_EVENT_ROW);
LocTriggerData.tg_relation = rel;
LocTriggerData.tg_trigger = NULL;
@@ -1899,12 +1901,12 @@ deferredTriggerInvokeEvents(bool immediate_only)
* are going to discard the whole event queue on return anyway, so no
* need to bother with "retail" pfree's.
*
- * If immediate_only is true, we need only scan from where the end of
- * the queue was at the previous deferredTriggerInvokeEvents call;
- * any non-deferred events before that point are already fired.
- * (But if the deferral state changes, we must reset the saved position
- * to the beginning of the queue, so as to process all events once with
- * the new states. See DeferredTriggerSetState.)
+ * If immediate_only is true, we need only scan from where the end of the
+ * queue was at the previous deferredTriggerInvokeEvents call; any
+ * non-deferred events before that point are already fired. (But if
+ * the deferral state changes, we must reset the saved position to the
+ * beginning of the queue, so as to process all events once with the
+ * new states. See DeferredTriggerSetState.)
*/
/* Make a per-tuple memory context for trigger function calls */
@@ -1916,9 +1918,9 @@ deferredTriggerInvokeEvents(bool immediate_only)
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * If immediate_only is true, then the only events that could need firing
- * are those since deftrig_events_imm. (But if deftrig_events_imm is
- * NULL, we must scan the entire list.)
+ * If immediate_only is true, then the only events that could need
+ * firing are those since deftrig_events_imm. (But if
+ * deftrig_events_imm is NULL, we must scan the entire list.)
*/
if (immediate_only && deferredTriggers->deftrig_events_imm != NULL)
{
@@ -1984,17 +1986,18 @@ deferredTriggerInvokeEvents(bool immediate_only)
rel = heap_open(event->dte_relid, NoLock);
/*
- * Copy relation's trigger info so that we have a stable
- * copy no matter what the called triggers do.
+ * Copy relation's trigger info so that we have a
+ * stable copy no matter what the called triggers do.
*/
trigdesc = CopyTriggerDesc(rel->trigdesc);
- if (trigdesc == NULL) /* should not happen */
+ if (trigdesc == NULL) /* should not happen */
elog(ERROR, "relation %u has no triggers",
event->dte_relid);
/*
- * Allocate space to cache fmgr lookup info for triggers.
+ * Allocate space to cache fmgr lookup info for
+ * triggers.
*/
finfo = (FmgrInfo *)
palloc0(trigdesc->numtriggers * sizeof(FmgrInfo));
@@ -2089,21 +2092,23 @@ void
DeferredTriggerBeginXact(void)
{
/*
- * This will be changed to a special context when
- * the nested transactions project moves forward.
+ * This will be changed to a special context when the nested
+ * transactions project moves forward.
*/
MemoryContext cxt = TopTransactionContext;
+
deferredTriggers = (DeferredTriggers) MemoryContextAlloc(TopTransactionContext,
- sizeof(DeferredTriggersData));
+ sizeof(DeferredTriggersData));
/*
* Create the per transaction memory context
*/
deferredTriggers->deftrig_cxt = AllocSetContextCreate(cxt,
- "DeferredTriggerXact",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ "DeferredTriggerXact",
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
+
/*
* If unspecified, constraints default to IMMEDIATE, per SQL
*/
@@ -2174,7 +2179,7 @@ DeferredTriggerAbortXact(void)
* Ignore call if we aren't in a transaction.
*/
if (deferredTriggers == NULL)
- return;
+ return;
/*
* Forget everything we know about deferred triggers.
@@ -2255,7 +2260,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (strlen(cname) == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("unnamed constraints cannot be set explicitly")));
+ errmsg("unnamed constraints cannot be set explicitly")));
/*
* Setup to scan pg_trigger by tgconstrname ...
@@ -2304,7 +2309,7 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
if (!found)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" does not exist", cname)));
+ errmsg("constraint \"%s\" does not exist", cname)));
}
heap_close(tgrel, AccessShareLock);
@@ -2349,9 +2354,10 @@ DeferredTriggerSetState(ConstraintsSetStmt *stmt)
* CONSTRAINTS command applies retroactively. This happens "for free"
* since we have already made the necessary modifications to the
* constraints, and deferredTriggerEndQuery() is called by
- * finish_xact_command(). But we must reset deferredTriggerInvokeEvents'
- * tail pointer to make it rescan the entire list, in case some deferred
- * events are now immediately invokable.
+ * finish_xact_command(). But we must reset
+ * deferredTriggerInvokeEvents' tail pointer to make it rescan the
+ * entire list, in case some deferred events are now immediately
+ * invokable.
*/
deferredTriggers->deftrig_events_imm = NULL;
}
@@ -2416,7 +2422,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
*/
for (i = 0; i < ntriggers; i++)
{
- Trigger *trigger = &trigdesc->triggers[tgindx[i]];
+ Trigger *trigger = &trigdesc->triggers[tgindx[i]];
if (trigger->tgenabled)
n_enabled_triggers++;
@@ -2455,7 +2461,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
ev_item = &(new_event->dte_item[i]);
ev_item->dti_tgoid = trigger->tgoid;
- ev_item->dti_state =
+ ev_item->dti_state =
((trigger->tgdeferrable) ?
TRIGGER_DEFERRED_DEFERRABLE : 0) |
((trigger->tginitdeferred) ?
@@ -2464,9 +2470,7 @@ DeferredTriggerSaveEvent(ResultRelInfo *relinfo, int event, bool row_trigger,
if (row_trigger && (trigdesc->n_before_row[event] > 0))
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
else if (!row_trigger && (trigdesc->n_before_statement[event] > 0))
- {
ev_item->dti_state |= TRIGGER_DEFERRED_HAS_BEFORE;
- }
}
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 275143c151..57bc7c5f71 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.40 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/typecmds.c,v 1.41 2003/08/04 00:43:17 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -66,11 +66,11 @@
/* result structure for get_rels_with_domain() */
typedef struct
{
- Relation rel; /* opened and locked relation */
- int natts; /* number of attributes of interest */
- int *atts; /* attribute numbers */
+ Relation rel; /* opened and locked relation */
+ int natts; /* number of attributes of interest */
+ int *atts; /* attribute numbers */
/* atts[] is of allocated length RelationGetNumberOfAttributes(rel) */
-} RelToCheck;
+} RelToCheck;
static Oid findTypeInputFunction(List *procname, Oid typeOid);
@@ -80,9 +80,9 @@ static Oid findTypeSendFunction(List *procname, Oid typeOid);
static List *get_rels_with_domain(Oid domainOid, LOCKMODE lockmode);
static void domainOwnerCheck(HeapTuple tup, TypeName *typename);
static char *domainAddConstraint(Oid domainOid, Oid domainNamespace,
- Oid baseTypeOid,
- int typMod, Constraint *constr,
- int *counter, char *domainName);
+ Oid baseTypeOid,
+ int typMod, Constraint *constr,
+ int *counter, char *domainName);
/*
@@ -105,7 +105,7 @@ DefineType(List *names, List *parameters)
bool byValue = false;
char delimiter = DEFAULT_TYPDELIM;
char alignment = 'i'; /* default alignment */
- char storage = 'p'; /* default TOAST storage method */
+ char storage = 'p'; /* default TOAST storage method */
Oid inputOid;
Oid outputOid;
Oid receiveOid = InvalidOid;
@@ -237,8 +237,8 @@ DefineType(List *names, List *parameters)
/*
* Look to see if type already exists (presumably as a shell; if not,
- * TypeCreate will complain). If it doesn't, create it as a shell,
- * so that the OID is known for use in the I/O function definitions.
+ * TypeCreate will complain). If it doesn't, create it as a shell, so
+ * that the OID is known for use in the I/O function definitions.
*/
typoid = GetSysCacheOid(TYPENAMENSP,
CStringGetDatum(typeName),
@@ -492,7 +492,7 @@ DefineDomain(CreateDomainStmt *stmt)
List *listptr;
Oid basetypeoid;
Oid domainoid;
- Form_pg_type baseType;
+ Form_pg_type baseType;
int counter = 0;
/* Convert list of names to a name and namespace */
@@ -508,10 +508,11 @@ DefineDomain(CreateDomainStmt *stmt)
/*
* Domainnames, unlike typenames don't need to account for the '_'
- * prefix. So they can be one character longer. (This test is presently
- * useless since the parser will have truncated the name to fit. But
- * leave it here since we may someday support arrays of domains, in
- * which case we'll be back to needing to enforce NAMEDATALEN-2.)
+ * prefix. So they can be one character longer. (This test is
+ * presently useless since the parser will have truncated the name to
+ * fit. But leave it here since we may someday support arrays of
+ * domains, in which case we'll be back to needing to enforce
+ * NAMEDATALEN-2.)
*/
if (strlen(domainName) > (NAMEDATALEN - 1))
ereport(ERROR,
@@ -581,8 +582,8 @@ DefineDomain(CreateDomainStmt *stmt)
basetypelem = baseType->typelem;
/*
- * Run through constraints manually to avoid the additional
- * processing conducted by DefineRelation() and friends.
+ * Run through constraints manually to avoid the additional processing
+ * conducted by DefineRelation() and friends.
*/
foreach(listptr, schema)
{
@@ -594,7 +595,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -606,6 +607,7 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
case CONSTR_DEFAULT:
+
/*
* The inherited default value may be overridden by the
* user with the DEFAULT <expr> statement.
@@ -643,7 +645,7 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && !typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = true;
nullDefined = true;
break;
@@ -652,41 +654,42 @@ DefineDomain(CreateDomainStmt *stmt)
if (nullDefined && typNotNull)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints")));
+ errmsg("conflicting NULL/NOT NULL constraints")));
typNotNull = false;
nullDefined = true;
- break;
+ break;
+
+ case CONSTR_CHECK:
- case CONSTR_CHECK:
/*
- * Check constraints are handled after domain creation, as they
- * require the Oid of the domain
+ * Check constraints are handled after domain creation, as
+ * they require the Oid of the domain
*/
- break;
+ break;
/*
* All else are error cases
*/
- case CONSTR_UNIQUE:
- ereport(ERROR,
+ case CONSTR_UNIQUE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
- break;
+ errmsg("UNIQUE constraints not supported for domains")));
+ break;
- case CONSTR_PRIMARY:
- ereport(ERROR,
+ case CONSTR_PRIMARY:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("PRIMARY KEY constraints not supported for domains")));
- break;
+ break;
- case CONSTR_ATTR_DEFERRABLE:
- case CONSTR_ATTR_NOT_DEFERRABLE:
- case CONSTR_ATTR_DEFERRED:
- case CONSTR_ATTR_IMMEDIATE:
- ereport(ERROR,
+ case CONSTR_ATTR_DEFERRABLE:
+ case CONSTR_ATTR_NOT_DEFERRABLE:
+ case CONSTR_ATTR_DEFERRED:
+ case CONSTR_ATTR_IMMEDIATE:
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("deferrability constraints not supported for domains")));
- break;
+ break;
default:
elog(ERROR, "unrecognized constraint subtype: %d",
@@ -715,15 +718,16 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeoid, /* base type ID */
defaultValue, /* default type value (text) */
defaultValueBin, /* default type value (binary) */
- byValue, /* passed by value */
- alignment, /* required alignment */
- storage, /* TOAST strategy */
- stmt->typename->typmod, /* typeMod value */
- typNDims, /* Array dimensions for base type */
- typNotNull); /* Type NOT NULL */
+ byValue, /* passed by value */
+ alignment, /* required alignment */
+ storage, /* TOAST strategy */
+ stmt->typename->typmod, /* typeMod value */
+ typNDims, /* Array dimensions for base type */
+ typNotNull); /* Type NOT NULL */
/*
- * Process constraints which refer to the domain ID returned by TypeCreate
+ * Process constraints which refer to the domain ID returned by
+ * TypeCreate
*/
foreach(listptr, schema)
{
@@ -733,16 +737,16 @@ DefineDomain(CreateDomainStmt *stmt)
switch (constr->contype)
{
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
domainAddConstraint(domainoid, domainNamespace,
basetypeoid, stmt->typename->typmod,
constr, &counter, domainName);
- break;
+ break;
- /* Other constraint types were fully processed above */
+ /* Other constraint types were fully processed above */
default:
- break;
+ break;
}
}
@@ -834,8 +838,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or
* three arguments (string, element OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING;
- * if we see this, we issue a NOTICE and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a NOTICE and fix up the pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -874,9 +878,10 @@ findTypeInputFunction(List *procname, Oid typeOid)
(errmsg("changing argument type of function %s from OPAQUE to CSTRING",
NameListToString(procname))));
SetFunctionArgType(procOid, 0, CSTRINGOID);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -905,8 +910,8 @@ findTypeOutputFunction(List *procname, Oid typeOid)
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a NOTICE and fix up the
- * pg_proc entry.
+ * type name; if we see this, we issue a NOTICE and fix up the pg_proc
+ * entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
@@ -940,12 +945,13 @@ findTypeOutputFunction(List *procname, Oid typeOid)
{
/* Found, but must complain and fix the pg_proc entry */
ereport(NOTICE,
- (errmsg("changing argument type of function %s from OPAQUE to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from OPAQUE to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
+
/*
- * Need CommandCounterIncrement since DefineType will likely
- * try to alter the pg_proc tuple again.
+ * Need CommandCounterIncrement since DefineType will likely try
+ * to alter the pg_proc tuple again.
*/
CommandCounterIncrement();
@@ -1050,7 +1056,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
if (coldeflist == NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("composite type must have at least one attribute")));
+ errmsg("composite type must have at least one attribute")));
/*
* now create the parameters for keys/inheritance etc. All of them are
@@ -1072,7 +1078,7 @@ DefineCompositeType(const RangeVar *typevar, List *coldeflist)
/*
* AlterDomainDefault
*
- * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
+ * Routine implementing ALTER DOMAIN SET/DROP DEFAULT statements.
*/
void
AlterDomainDefault(List *names, Node *defaultRaw)
@@ -1083,12 +1089,12 @@ AlterDomainDefault(List *names, Node *defaultRaw)
ParseState *pstate;
Relation rel;
char *defaultValue;
- Node *defaultExpr = NULL; /* NULL if no default specified */
+ Node *defaultExpr = NULL; /* NULL if no default specified */
Datum new_record[Natts_pg_type];
char new_record_nulls[Natts_pg_type];
char new_record_repl[Natts_pg_type];
HeapTuple newtuple;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1113,7 +1119,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Setup new tuple */
@@ -1129,9 +1135,10 @@ AlterDomainDefault(List *names, Node *defaultRaw)
{
/* Create a dummy ParseState for transformExpr */
pstate = make_parsestate(NULL);
+
/*
- * Cook the colDef->raw_expr into an expression. Note:
- * Name is strictly for error message
+ * Cook the colDef->raw_expr into an expression. Note: Name is
+ * strictly for error message
*/
defaultExpr = cookDefault(pstate, defaultRaw,
typTup->typbasetype,
@@ -1139,27 +1146,29 @@ AlterDomainDefault(List *names, Node *defaultRaw)
NameStr(typTup->typname));
/*
- * Expression must be stored as a nodeToString result, but
- * we also require a valid textual representation (mainly
- * to make life easier for pg_dump).
+ * Expression must be stored as a nodeToString result, but we also
+ * require a valid textual representation (mainly to make life
+ * easier for pg_dump).
*/
defaultValue = deparse_expression(defaultExpr,
- deparse_context_for(NameStr(typTup->typname),
- InvalidOid),
+ deparse_context_for(NameStr(typTup->typname),
+ InvalidOid),
false, false);
+
/*
* Form an updated tuple with the new default and write it back.
*/
new_record[Anum_pg_type_typdefaultbin - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(
- nodeToString(defaultExpr)));
+ CStringGetDatum(
+ nodeToString(defaultExpr)));
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
new_record[Anum_pg_type_typdefault - 1] = DirectFunctionCall1(textin,
- CStringGetDatum(defaultValue));
+ CStringGetDatum(defaultValue));
new_record_repl[Anum_pg_type_typdefault - 1] = 'r';
}
- else /* Default is NULL, drop it */
+ else
+/* Default is NULL, drop it */
{
new_record_nulls[Anum_pg_type_typdefaultbin - 1] = 'n';
new_record_repl[Anum_pg_type_typdefaultbin - 1] = 'r';
@@ -1168,7 +1177,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
}
newtuple = heap_modifytuple(tup, rel,
- new_record, new_record_nulls, new_record_repl);
+ new_record, new_record_nulls, new_record_repl);
simple_heap_update(rel, &tup->t_self, newtuple);
@@ -1178,7 +1187,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
GenerateTypeDependencies(typTup->typnamespace,
domainoid,
typTup->typrelid,
- 0, /* relation kind is n/a */
+ 0, /* relation kind is n/a */
typTup->typinput,
typTup->typoutput,
typTup->typreceive,
@@ -1186,7 +1195,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
typTup->typelem,
typTup->typbasetype,
defaultExpr,
- true); /* Rebuild is true */
+ true); /* Rebuild is true */
/* Clean up */
heap_close(rel, NoLock);
@@ -1196,7 +1205,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
/*
* AlterDomainNotNull
*
- * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
+ * Routine implementing ALTER DOMAIN SET/DROP NOT NULL statements.
*/
void
AlterDomainNotNull(List *names, bool notNull)
@@ -1205,7 +1214,7 @@ AlterDomainNotNull(List *names, bool notNull)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
@@ -1231,7 +1240,7 @@ AlterDomainNotNull(List *names, bool notNull)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Is the domain already set to the desired constraint? */
@@ -1248,15 +1257,15 @@ AlterDomainNotNull(List *names, bool notNull)
/* Adding a NOT NULL constraint requires checking existing columns */
if (notNull)
{
- List *rels;
- List *rt;
+ List *rels;
+ List *rt;
/* Fetch relation list with attributes based on this domain */
/* ShareLock is sufficient to prevent concurrent data changes */
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1268,14 +1277,14 @@ AlterDomainNotNull(List *names, bool notNull)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1284,7 +1293,7 @@ AlterDomainNotNull(List *names, bool notNull)
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains NULL values",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
}
heap_endscan(scan);
@@ -1295,7 +1304,7 @@ AlterDomainNotNull(List *names, bool notNull)
}
/*
- * Okay to update pg_type row. We can scribble on typTup because it's
+ * Okay to update pg_type row. We can scribble on typTup because it's
* a copy.
*/
typTup->typnotnull = notNull;
@@ -1321,7 +1330,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
Oid domainoid;
HeapTuple tup;
Relation rel;
- Form_pg_type typTup;
+ Form_pg_type typTup;
Relation conrel;
SysScanDesc conscan;
ScanKeyData key[1];
@@ -1350,7 +1359,7 @@ AlterDomainDropConstraint(List *names, const char *constrName, DropBehavior beha
if (!HeapTupleIsValid(tup))
elog(ERROR, "cache lookup failed for type %u", domainoid);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Grab an appropriate lock on the pg_constraint relation */
@@ -1403,15 +1412,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
Oid domainoid;
Relation typrel;
HeapTuple tup;
- Form_pg_type typTup;
- List *rels;
- List *rt;
- EState *estate;
+ Form_pg_type typTup;
+ List *rels;
+ List *rt;
+ EState *estate;
ExprContext *econtext;
- char *ccbin;
- Expr *expr;
- ExprState *exprstate;
- int counter = 0;
+ char *ccbin;
+ Expr *expr;
+ ExprState *exprstate;
+ int counter = 0;
Constraint *constr;
/* Make a TypeName so we can use standard type lookup machinery */
@@ -1438,14 +1447,14 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
elog(ERROR, "cache lookup failed for type %u", domainoid);
typTup = (Form_pg_type) GETSTRUCT(tup);
- /* Doesn't return if user isn't allowed to alter the domain */
+ /* Doesn't return if user isn't allowed to alter the domain */
domainOwnerCheck(tup, typename);
/* Check for unsupported constraint types */
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("FOREIGN KEY constraints not supported for domains")));
+ errmsg("FOREIGN KEY constraints not supported for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
@@ -1469,20 +1478,20 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
errmsg("use ALTER DOMAIN .. [ SET | DROP ] NOT NULL instead")));
break;
- case CONSTR_CHECK:
+ case CONSTR_CHECK:
/* processed below */
- break;
+ break;
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("UNIQUE constraints not supported for domains")));
+ errmsg("UNIQUE constraints not supported for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("PRIMARY KEY constraints not supported for domains")));
+ errmsg("PRIMARY KEY constraints not supported for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
@@ -1501,18 +1510,18 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
}
/*
- * Since all other constraint types throw errors, this must be
- * a check constraint. First, process the constraint expression
- * and add an entry to pg_constraint.
+ * Since all other constraint types throw errors, this must be a check
+ * constraint. First, process the constraint expression and add an
+ * entry to pg_constraint.
*/
ccbin = domainAddConstraint(HeapTupleGetOid(tup), typTup->typnamespace,
typTup->typbasetype, typTup->typtypmod,
- constr, &counter, NameStr(typTup->typname));
+ constr, &counter, NameStr(typTup->typname));
/*
- * Test all values stored in the attributes based on the domain
- * the constraint is being added to.
+ * Test all values stored in the attributes based on the domain the
+ * constraint is being added to.
*/
expr = (Expr *) stringToNode(ccbin);
@@ -1528,7 +1537,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
rels = get_rels_with_domain(domainoid, ShareLock);
- foreach (rt, rels)
+ foreach(rt, rels)
{
RelToCheck *rtc = (RelToCheck *) lfirst(rt);
Relation testrel = rtc->rel;
@@ -1540,15 +1549,15 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
scan = heap_beginscan(testrel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- int i;
+ int i;
/* Test attributes that are of the domain */
for (i = 0; i < rtc->natts; i++)
{
- int attnum = rtc->atts[i];
- Datum d;
- bool isNull;
- Datum conResult;
+ int attnum = rtc->atts[i];
+ Datum d;
+ bool isNull;
+ Datum conResult;
d = heap_getattr(tuple, attnum, tupdesc, &isNull);
@@ -1564,7 +1573,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("relation \"%s\" attribute \"%s\" contains values that violate the new constraint",
RelationGetRelationName(testrel),
- NameStr(tupdesc->attrs[attnum - 1]->attname))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname))));
}
ResetExprContext(econtext);
@@ -1610,7 +1619,7 @@ AlterDomainAddConstraint(List *names, Node *newConstraint)
static List *
get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
{
- List *result = NIL;
+ List *result = NIL;
Relation depRel;
ScanKeyData key[2];
SysScanDesc depScan;
@@ -1634,10 +1643,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
while (HeapTupleIsValid(depTup = systable_getnext(depScan)))
{
- Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend pg_depend = (Form_pg_depend) GETSTRUCT(depTup);
RelToCheck *rtc = NULL;
List *rellist;
- Form_pg_attribute pg_att;
+ Form_pg_attribute pg_att;
int ptr;
/* Ignore dependees that aren't user columns of tables */
@@ -1675,10 +1684,10 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
}
/*
- * Confirm column has not been dropped, and is of the expected type.
- * This defends against an ALTER DROP COLUMN occuring just before
- * we acquired lock ... but if the whole table were dropped, we'd
- * still have a problem.
+ * Confirm column has not been dropped, and is of the expected
+ * type. This defends against an ALTER DROP COLUMN occuring just
+ * before we acquired lock ... but if the whole table were
+ * dropped, we'd still have a problem.
*/
if (pg_depend->objsubid > RelationGetNumberOfAttributes(rtc->rel))
continue;
@@ -1687,16 +1696,16 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
continue;
/*
- * Okay, add column to result. We store the columns in column-number
- * order; this is just a hack to improve predictability of regression
- * test output ...
+ * Okay, add column to result. We store the columns in
+ * column-number order; this is just a hack to improve
+ * predictability of regression test output ...
*/
Assert(rtc->natts < RelationGetNumberOfAttributes(rtc->rel));
ptr = rtc->natts++;
- while (ptr > 0 && rtc->atts[ptr-1] > pg_depend->objsubid)
+ while (ptr > 0 && rtc->atts[ptr - 1] > pg_depend->objsubid)
{
- rtc->atts[ptr] = rtc->atts[ptr-1];
+ rtc->atts[ptr] = rtc->atts[ptr - 1];
ptr--;
}
rtc->atts[ptr] = pg_depend->objsubid;
@@ -1719,7 +1728,7 @@ get_rels_with_domain(Oid domainOid, LOCKMODE lockmode)
static void
domainOwnerCheck(HeapTuple tup, TypeName *typename)
{
- Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
+ Form_pg_type typTup = (Form_pg_type) GETSTRUCT(tup);
/* Check that this is actually a domain */
if (typTup->typtype != 'd')
@@ -1746,7 +1755,7 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
char *ccsrc;
char *ccbin;
ParseState *pstate;
- CoerceToDomainValue *domVal;
+ CoerceToDomainValue *domVal;
/*
* Assign or validate constraint name
@@ -1759,8 +1768,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
constr->name))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for domain \"%s\" already exists",
- constr->name, domainName)));
+ errmsg("constraint \"%s\" for domain \"%s\" already exists",
+ constr->name, domainName)));
}
else
constr->name = GenerateConstraintName(CONSTRAINT_DOMAIN,
@@ -1775,10 +1784,10 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Set up a CoerceToDomainValue to represent the occurrence of VALUE
- * in the expression. Note that it will appear to have the type of the
- * base type, not the domain. This seems correct since within the
- * check expression, we should not assume the input value can be considered
- * a member of the domain.
+ * in the expression. Note that it will appear to have the type of
+ * the base type, not the domain. This seems correct since within the
+ * check expression, we should not assume the input value can be
+ * considered a member of the domain.
*/
domVal = makeNode(CoerceToDomainValue);
domVal->typeId = baseTypeOid;
@@ -1841,13 +1850,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Store the constraint in pg_constraint
*/
- CreateConstraintEntry(constr->name, /* Constraint Name */
- domainNamespace, /* namespace */
+ CreateConstraintEntry(constr->name, /* Constraint Name */
+ domainNamespace, /* namespace */
CONSTRAINT_CHECK, /* Constraint Type */
false, /* Is Deferrable */
false, /* Is Deferred */
- InvalidOid, /* not a relation constraint */
- NULL,
+ InvalidOid, /* not a relation constraint */
+ NULL,
0,
domainOid, /* domain constraint */
InvalidOid, /* Foreign key fields */
@@ -1857,13 +1866,13 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
' ',
' ',
InvalidOid,
- expr, /* Tree form check constraint */
+ expr, /* Tree form check constraint */
ccbin, /* Binary form check constraint */
- ccsrc); /* Source form check constraint */
+ ccsrc); /* Source form check constraint */
/*
- * Return the compiled constraint expression so the calling routine can
- * perform any additional required tests.
+ * Return the compiled constraint expression so the calling routine
+ * can perform any additional required tests.
*/
return ccbin;
}
@@ -1893,7 +1902,7 @@ GetDomainConstraints(Oid typeOid)
Form_pg_type typTup;
ScanKeyData key[1];
SysScanDesc scan;
-
+
tup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(typeOid),
0, 0, 0);
@@ -1915,17 +1924,20 @@ GetDomainConstraints(Oid typeOid)
while (HeapTupleIsValid(conTup = systable_getnext(scan)))
{
- Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
- Datum val;
- bool isNull;
- Expr *check_expr;
+ Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
+ Datum val;
+ bool isNull;
+ Expr *check_expr;
DomainConstraintState *r;
/* Ignore non-CHECK constraints (presently, shouldn't be any) */
if (c->contype != CONSTRAINT_CHECK)
continue;
- /* Not expecting conbin to be NULL, but we'll test for it anyway */
+ /*
+ * Not expecting conbin to be NULL, but we'll test for it
+ * anyway
+ */
val = fastgetattr(conTup, Anum_pg_constraint_conbin,
conRel->rd_att, &isNull);
if (isNull)
@@ -1945,8 +1957,8 @@ GetDomainConstraints(Oid typeOid)
r->check_expr = ExecInitExpr(check_expr, NULL);
/*
- * use lcons() here because constraints of lower domains should
- * be applied earlier.
+ * use lcons() here because constraints of lower domains
+ * should be applied earlier.
*/
result = lcons(r, result);
}
@@ -2003,7 +2015,7 @@ AlterTypeOwner(List *names, AclId newOwnerSysId)
Oid typeOid;
Relation rel;
HeapTuple tup;
- Form_pg_type typTup;
+ Form_pg_type typTup;
/* Make a TypeName so we can use standard type lookup machinery */
typename = makeNode(TypeName);
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 36416a5232..117eef1e75 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.122 2003/08/01 00:15:19 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.123 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,12 +146,12 @@ write_group_file(Relation grel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_group and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_group and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(grel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -212,7 +212,7 @@ write_group_file(Relation grel)
if (usename[j] != '\0')
{
ereport(LOG,
- (errmsg("invalid user name \"%s\"", usename)));
+ (errmsg("invalid user name \"%s\"", usename)));
continue;
}
@@ -245,7 +245,7 @@ write_group_file(Relation grel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -294,12 +294,12 @@ write_user_file(Relation urel)
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
/*
- * Read pg_shadow and write the file. Note we use SnapshotSelf to ensure
- * we see all effects of current transaction. (Perhaps could do a
- * CommandCounterIncrement beforehand, instead?)
+ * Read pg_shadow and write the file. Note we use SnapshotSelf to
+ * ensure we see all effects of current transaction. (Perhaps could
+ * do a CommandCounterIncrement beforehand, instead?)
*/
scan = heap_beginscan(urel, SnapshotSelf, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -376,7 +376,7 @@ write_user_file(Relation urel)
if (ferror(fp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write temp file \"%s\": %m", tempname)));
+ errmsg("could not write temp file \"%s\": %m", tempname)));
FreeFile(fp);
/*
@@ -430,10 +430,10 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
Relation urel = NULL;
Relation grel = NULL;
- if (! (user_file_update_needed || group_file_update_needed))
+ if (!(user_file_update_needed || group_file_update_needed))
return;
- if (! isCommit)
+ if (!isCommit)
{
user_file_update_needed = false;
group_file_update_needed = false;
@@ -441,12 +441,12 @@ AtEOXact_UpdatePasswordFile(bool isCommit)
}
/*
- * We use ExclusiveLock to ensure that only one backend writes the flat
- * file(s) at a time. That's sufficient because it's okay to allow plain
- * reads of the tables in parallel. There is some chance of a deadlock
- * here (if we were triggered by a user update of pg_shadow or pg_group,
- * which likely won't have gotten a strong enough lock), so get the locks
- * we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the
+ * flat file(s) at a time. That's sufficient because it's okay to
+ * allow plain reads of the tables in parallel. There is some chance
+ * of a deadlock here (if we were triggered by a user update of
+ * pg_shadow or pg_group, which likely won't have gotten a strong
+ * enough lock), so get the locks we need before writing anything.
*/
if (user_file_update_needed)
urel = heap_openr(ShadowRelationName, ExclusiveLock);
@@ -1088,7 +1088,7 @@ DropUser(DropUserStmt *stmt)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_IN_USE),
errmsg("user \"%s\" cannot be dropped", user),
- errdetail("The user owns database \"%s\".", dbname)));
+ errdetail("The user owns database \"%s\".", dbname)));
}
heap_endscan(scan);
@@ -1172,10 +1172,10 @@ RenameUser(const char *oldname, const char *newname)
errmsg("user \"%s\" does not exist", oldname)));
/*
- * XXX Client applications probably store the session user
- * somewhere, so renaming it could cause confusion. On the other
- * hand, there may not be an actual problem besides a little
- * confusion, so think about this and decide.
+ * XXX Client applications probably store the session user somewhere,
+ * so renaming it could cause confusion. On the other hand, there may
+ * not be an actual problem besides a little confusion, so think about
+ * this and decide.
*/
if (((Form_pg_shadow) GETSTRUCT(tup))->usesysid == GetSessionUserId())
ereport(ERROR,
@@ -1221,14 +1221,14 @@ CheckPgUserAclNotNull(void)
htup = SearchSysCache(RELOID,
ObjectIdGetDatum(RelOid_pg_shadow),
0, 0, 0);
- if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
+ if (!HeapTupleIsValid(htup)) /* should not happen, we hope */
elog(ERROR, "cache lookup failed for relation %u", RelOid_pg_shadow);
if (heap_attisnull(htup, Anum_pg_class_relacl))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("before using passwords you must revoke permissions on %s",
- ShadowRelationName),
+ errmsg("before using passwords you must revoke permissions on %s",
+ ShadowRelationName),
errdetail("This restriction is to prevent unprivileged users from reading the passwords."),
errhint("Try 'REVOKE ALL ON \"%s\" FROM PUBLIC'.",
ShadowRelationName)));
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index e73ace27c2..9dc0d9a899 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.257 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.258 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -287,24 +287,25 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
- if (! vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If we vacuumed, use new transaction for analyze.
- * Otherwise, we can use the outer transaction, but we still
- * need to call analyze_rel in a memory context that will be
- * cleaned up on return (else we leak memory while processing
- * multiple tables).
+ * If we vacuumed, use new transaction for analyze. Otherwise,
+ * we can use the outer transaction, but we still need to call
+ * analyze_rel in a memory context that will be cleaned up on
+ * return (else we leak memory while processing multiple
+ * tables).
*/
if (vacstmt->vacuum)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
@@ -734,7 +735,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions in indexes */
+ SetQuerySnapshot(); /* might be needed for functions in
+ * indexes */
/*
* Check for user-requested abort. Note we want this to be inside a
@@ -812,7 +814,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp tables */
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
@@ -860,7 +863,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
*/
if (toast_relid != InvalidOid)
{
- if (! vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
+ if (!vacuum_rel(toast_relid, vacstmt, RELKIND_TOASTVALUE))
result = false; /* failed to vacuum the TOAST table? */
}
@@ -1087,8 +1090,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
if (PageIsNew(page))
{
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
@@ -1314,7 +1317,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move destination.
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
*/
if (notup)
{
@@ -1382,9 +1386,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead tuples cannot be removed yet.\n"
- "Nonremovable tuples range from %lu to %lu bytes long.\n"
+ "Nonremovable tuples range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable tuples) is %.0f bytes.\n"
+ "Total free space (including removable tuples) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
@@ -2380,8 +2384,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
- * vacuuming. So do it here and ignore the relatively small amount
- * of processing that occurs below.
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
*/
ereport(elevel,
(errmsg("\"%s\": moved %u tuples, truncated %u to %u pages",
@@ -2735,7 +2739,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -2752,7 +2756,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f tuples, but table contains %.0f tuples",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
@@ -2837,13 +2841,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and
- * (b) FSM uses the number of pages reported as a statistic for guiding
- * space management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index f0be98a23e..65af960be8 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.29 2003/07/20 21:56:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.30 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,7 +79,7 @@ typedef struct LVRelStats
bool fs_is_heap; /* are we using heap organization? */
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
- PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
+ PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
} LVRelStats;
@@ -162,7 +162,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
*/
possibly_freeable = vacrelstats->rel_pages - vacrelstats->nonempty_pages;
if (possibly_freeable >= REL_TRUNCATE_MINIMUM ||
- possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
+ possibly_freeable >= vacrelstats->rel_pages / REL_TRUNCATE_FRACTION)
lazy_truncate_heap(onerel, vacrelstats);
/* Update shared free space map with final free space info */
@@ -659,7 +659,7 @@ lazy_vacuum_index(Relation indrel, LVRelStats *vacrelstats)
stats->num_index_tuples,
stats->num_pages),
errdetail("%.0f index tuples were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
"%s",
stats->tuples_removed,
stats->pages_deleted, stats->pages_free,
@@ -966,16 +966,18 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*
* A page with less than stats->threshold free space will be forgotten
* immediately, and never passed to the free space map. Removing the
- * uselessly small entries early saves cycles, and in particular reduces
- * the amount of time we spend holding the FSM lock when we finally call
- * RecordRelationFreeSpace. Since the FSM will probably drop pages with
- * little free space anyway, there's no point in making this really small.
+ * uselessly small entries early saves cycles, and in particular
+ * reduces the amount of time we spend holding the FSM lock when we
+ * finally call RecordRelationFreeSpace. Since the FSM will probably
+ * drop pages with little free space anyway, there's no point in
+ * making this really small.
*
- * XXX Is it worth trying to measure average tuple size, and using that to
- * adjust the threshold? Would be worthwhile if FSM has no stats yet
- * for this relation. But changing the threshold as we scan the rel
- * might lead to bizarre behavior, too. Also, it's probably better if
- * vacuum.c has the same thresholding behavior as we do here.
+ * XXX Is it worth trying to measure average tuple size, and using that
+ * to adjust the threshold? Would be worthwhile if FSM has no stats
+ * yet for this relation. But changing the threshold as we scan the
+ * rel might lead to bizarre behavior, too. Also, it's probably
+ * better if vacuum.c has the same thresholding behavior as we do
+ * here.
*/
if (avail < vacrelstats->threshold)
return;
@@ -996,7 +998,7 @@ lazy_record_free_space(LVRelStats *vacrelstats,
/*----------
* The rest of this routine works with "heap" organization of the
* free space arrays, wherein we maintain the heap property
- * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
+ * avail[(j-1) div 2] <= avail[j] for 0 < j < n.
* In particular, the zero'th element always has the smallest available
* space and can be discarded to make room for a new page with more space.
* See Knuth's discussion of heap-based priority queues, sec 5.2.3;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index e0b041636e..07dfca13c8 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.85 2003/07/29 00:03:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.86 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
* to duplicate the test in AC_STRUCT_TIMEZONE.
*/
#ifdef HAVE_TZNAME
-#ifndef tzname /* For SGI. */
+#ifndef tzname /* For SGI. */
extern char *tzname[];
#endif
#endif
@@ -273,12 +273,11 @@ static void
clear_tz(void)
{
/*
- * unsetenv() works fine, but is BSD, not POSIX, and is not
- * available under Solaris, among others. Apparently putenv()
- * called as below clears the process-specific environment
- * variables. Other reasonable arguments to putenv() (e.g.
- * "TZ=", "TZ", "") result in a core dump (under Linux
- * anyway). - thomas 1998-01-26
+ * unsetenv() works fine, but is BSD, not POSIX, and is not available
+ * under Solaris, among others. Apparently putenv() called as below
+ * clears the process-specific environment variables. Other
+ * reasonable arguments to putenv() (e.g. "TZ=", "TZ", "") result in a
+ * core dump (under Linux anyway). - thomas 1998-01-26
*/
if (tzbuf[0] == 'T')
{
@@ -298,14 +297,14 @@ clear_tz(void)
*
* If tzname[1] is a nonempty string, *or* the global timezone variable is
* not zero, then tzset must have recognized the TZ value as something
- * different from UTC. Return true.
+ * different from UTC. Return true.
*
* Otherwise, check to see if the TZ name is a known spelling of "UTC"
* (ie, appears in our internal tables as a timezone equivalent to UTC).
* If so, accept it.
*
* This will reject nonstandard spellings of UTC unless tzset() chose to
- * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
+ * set tzname[1] as well as tzname[0]. The glibc version of tzset() will
* do so, but on other systems we may be tightening the spec a little.
*
* Another problem is that on some platforms (eg HPUX), if tzset thinks the
@@ -337,8 +336,8 @@ tzset_succeeded(const char *tz)
return true;
/*
- * Check for known spellings of "UTC". Note we must downcase the input
- * before passing it to DecodePosixTimezone().
+ * Check for known spellings of "UTC". Note we must downcase the
+ * input before passing it to DecodePosixTimezone().
*/
StrNCpy(tztmp, tz, sizeof(tztmp));
for (cp = tztmp; *cp; cp++)
@@ -368,7 +367,7 @@ tz_acceptable(void)
/*
* To detect leap-second timekeeping, compute the time_t value for
- * local midnight, 2000-01-01. Insist that this be a multiple of 60;
+ * local midnight, 2000-01-01. Insist that this be a multiple of 60;
* any partial-minute offset has to be due to leap seconds.
*/
MemSet(&tt, 0, sizeof(tt));
@@ -399,7 +398,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
*/
if (!have_saved_tz)
{
- char *orig_tz = getenv("TZ");
+ char *orig_tz = getenv("TZ");
if (orig_tz)
StrNCpy(orig_tzbuf, orig_tz, sizeof(orig_tzbuf));
@@ -434,9 +433,9 @@ assign_timezone(const char *value, bool doit, bool interactive)
/*
* Try to parse it. XXX an invalid interval format will result in
- * ereport, which is not desirable for GUC. We did what we could to
- * guard against this in flatten_set_variable_args, but a string
- * coming in from postgresql.conf might contain anything.
+ * ereport, which is not desirable for GUC. We did what we could
+ * to guard against this in flatten_set_variable_args, but a
+ * string coming in from postgresql.conf might contain anything.
*/
interval = DatumGetIntervalP(DirectFunctionCall3(interval_in,
CStringGetDatum(val),
@@ -455,7 +454,7 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - interval->time;
+ CTimeZone = -interval->time;
HasCTZSet = true;
}
pfree(interval);
@@ -471,22 +470,22 @@ assign_timezone(const char *value, bool doit, bool interactive)
if (doit)
{
/* Here we change from SQL to Unix sign convention */
- CTimeZone = - hours * 3600;
+ CTimeZone = -hours * 3600;
HasCTZSet = true;
}
}
else if (strcasecmp(value, "UNKNOWN") == 0)
{
/*
- * UNKNOWN is the value shown as the "default" for TimeZone
- * in guc.c. We interpret it as meaning the original TZ
- * inherited from the environment. Note that if there is an
- * original TZ setting, we will return that rather than UNKNOWN
- * as the canonical spelling.
+ * UNKNOWN is the value shown as the "default" for TimeZone in
+ * guc.c. We interpret it as meaning the original TZ
+ * inherited from the environment. Note that if there is an
+ * original TZ setting, we will return that rather than
+ * UNKNOWN as the canonical spelling.
*/
if (doit)
{
- bool ok;
+ bool ok;
/* Revert to original setting of TZ, whatever it was */
if (orig_tzbuf[0])
@@ -516,14 +515,14 @@ assign_timezone(const char *value, bool doit, bool interactive)
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. Note that it's possible the old setting is in
- * tzbuf, so we'd better copy it.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. Note that it's possible the old setting
+ * is in tzbuf, so we'd better copy it.
*/
- char save_tzbuf[TZBUF_LEN];
- char *save_tz;
- bool known,
- acceptable;
+ char save_tzbuf[TZBUF_LEN];
+ char *save_tz;
+ bool known,
+ acceptable;
save_tz = getenv("TZ");
if (save_tz)
@@ -563,8 +562,8 @@ assign_timezone(const char *value, bool doit, bool interactive)
{
ereport(interactive ? ERROR : LOG,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timezone \"%s\" appears to use leap seconds",
- value),
+ errmsg("timezone \"%s\" appears to use leap seconds",
+ value),
errdetail("PostgreSQL does not support leap seconds")));
return NULL;
}
@@ -609,7 +608,7 @@ show_timezone(void)
Interval interval;
interval.month = 0;
- interval.time = - CTimeZone;
+ interval.time = -CTimeZone;
tzn = DatumGetCString(DirectFunctionCall1(interval_out,
IntervalPGetDatum(&interval)));
@@ -703,16 +702,16 @@ assign_client_encoding(const char *value, bool doit, bool interactive)
/*
* Note: if we are in startup phase then SetClientEncoding may not be
* able to really set the encoding. In this case we will assume that
- * the encoding is okay, and InitializeClientEncoding() will fix things
- * once initialization is complete.
+ * the encoding is okay, and InitializeClientEncoding() will fix
+ * things once initialization is complete.
*/
if (SetClientEncoding(encoding, doit) < 0)
{
if (interactive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("conversion between %s and %s is not supported",
- value, GetDatabaseEncodingName())));
+ errmsg("conversion between %s and %s is not supported",
+ value, GetDatabaseEncodingName())));
return NULL;
}
return value;
@@ -758,12 +757,12 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
/* not a saved ID, so look it up */
HeapTuple userTup;
- if (! IsTransactionState())
+ if (!IsTransactionState())
{
/*
* Can't do catalog lookups, so fail. The upshot of this is
- * that session_authorization cannot be set in postgresql.conf,
- * which seems like a good thing anyway.
+ * that session_authorization cannot be set in
+ * postgresql.conf, which seems like a good thing anyway.
*/
return NULL;
}
@@ -782,7 +781,7 @@ assign_session_authorization(const char *value, bool doit, bool interactive)
usesysid = ((Form_pg_shadow) GETSTRUCT(userTup))->usesysid;
is_superuser = ((Form_pg_shadow) GETSTRUCT(userTup))->usesuper;
-
+
ReleaseSysCache(userTup);
}
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index e12ae0af68..9c3b372b3f 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.75 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.76 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -190,8 +190,8 @@ checkViewTupleDesc(TupleDesc newdesc, TupleDesc olddesc)
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change datatype of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change datatype of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 61974827b3..9267d362dd 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.72 2003/07/21 17:05:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.73 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -48,7 +48,7 @@
* ----------------------------------------------------------------
*/
void
-ExecReScan(PlanState *node, ExprContext *exprCtxt)
+ExecReScan(PlanState * node, ExprContext *exprCtxt)
{
/* If collecting timing stats, update them */
if (node->instrument)
@@ -61,7 +61,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
foreach(lst, node->initPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL) /* don't care about child
@@ -72,7 +72,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
}
foreach(lst, node->subPlan)
{
- SubPlanState *sstate = (SubPlanState *) lfirst(lst);
+ SubPlanState *sstate = (SubPlanState *) lfirst(lst);
PlanState *splan = sstate->planstate;
if (splan->plan->extParam != NULL)
@@ -177,7 +177,7 @@ ExecReScan(PlanState *node, ExprContext *exprCtxt)
* Marks the current scan position.
*/
void
-ExecMarkPos(PlanState *node)
+ExecMarkPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -218,7 +218,7 @@ ExecMarkPos(PlanState *node)
* restores the scan position previously saved with ExecMarkPos()
*/
void
-ExecRestrPos(PlanState *node)
+ExecRestrPos(PlanState * node)
{
switch (nodeTag(node))
{
@@ -302,16 +302,16 @@ ExecSupportsBackwardScan(Plan *node)
return false;
case T_Append:
- {
- List *l;
-
- foreach(l, ((Append *) node)->appendplans)
{
- if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
- return false;
+ List *l;
+
+ foreach(l, ((Append *) node)->appendplans)
+ {
+ if (!ExecSupportsBackwardScan((Plan *) lfirst(l)))
+ return false;
+ }
+ return true;
}
- return true;
- }
case T_SeqScan:
case T_IndexScan:
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 3f9c6d0d47..8b0962ba9b 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.4 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execGrouping.c,v 1.5 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -249,7 +249,7 @@ execTuplesHashPrepare(TupleDesc tupdesc,
eq_function = oprfuncid(optup);
ReleaseSysCache(optup);
hash_function = get_op_hash_function(eq_opr);
- if (!OidIsValid(hash_function)) /* should not happen */
+ if (!OidIsValid(hash_function)) /* should not happen */
elog(ERROR, "could not find hash function for hash operator %u",
eq_opr);
fmgr_info(eq_function, &(*eqfunctions)[i]);
@@ -289,8 +289,8 @@ BuildTupleHashTable(int numCols, AttrNumber *keyColIdx,
int nbuckets, Size entrysize,
MemoryContext tablecxt, MemoryContext tempcxt)
{
- TupleHashTable hashtable;
- Size tabsize;
+ TupleHashTable hashtable;
+ Size tabsize;
Assert(nbuckets > 0);
Assert(entrysize >= sizeof(TupleHashEntryData));
@@ -411,9 +411,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
* Iterator state must be initialized with ResetTupleHashIterator() macro.
*/
TupleHashEntry
-ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator *state)
+ScanTupleHashTable(TupleHashTable hashtable, TupleHashIterator * state)
{
- TupleHashEntry entry;
+ TupleHashEntry entry;
entry = state->next_entry;
while (entry == NULL)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ded748d5bf..ae58bb130f 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.212 2003/08/01 00:15:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.213 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -68,7 +68,7 @@ static void initResultRelInfo(ResultRelInfo *resultRelInfo,
Index resultRelationIndex,
List *rangeTable,
CmdType operation);
-static TupleTableSlot *ExecutePlan(EState *estate, PlanState *planstate,
+static TupleTableSlot *ExecutePlan(EState *estate, PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
@@ -87,7 +87,7 @@ static void EndEvalPlanQual(EState *estate);
static void ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation);
static void ExecCheckXactReadOnly(Query *parsetree, CmdType operation);
static void EvalPlanQualStart(evalPlanQual *epq, EState *estate,
- evalPlanQual *priorepq);
+ evalPlanQual *priorepq);
static void EvalPlanQualStop(evalPlanQual *epq);
/* end of local decls */
@@ -100,7 +100,7 @@ static void EvalPlanQualStop(evalPlanQual *epq);
* query plan
*
* Takes a QueryDesc previously created by CreateQueryDesc (it's not real
- * clear why we bother to separate the two functions, but...). The tupDesc
+ * clear why we bother to separate the two functions, but...). The tupDesc
* field of the QueryDesc is filled in to describe the tuples that will be
* returned, and the internal fields (estate and planstate) are set up.
*
@@ -122,8 +122,8 @@ ExecutorStart(QueryDesc *queryDesc, bool explainOnly)
Assert(queryDesc->estate == NULL);
/*
- * If the transaction is read-only, we need to check if any writes
- * are planned to non-temporary tables.
+ * If the transaction is read-only, we need to check if any writes are
+ * planned to non-temporary tables.
*/
if (!explainOnly)
ExecCheckXactReadOnly(queryDesc->parsetree, queryDesc->operation);
@@ -362,8 +362,8 @@ ExecCheckRTEPerms(RangeTblEntry *rte, CmdType operation)
/*
* Otherwise, only plain-relation RTEs need to be checked here.
- * Function RTEs are checked by init_fcache when the function is prepared
- * for execution. Join and special RTEs need no checks.
+ * Function RTEs are checked by init_fcache when the function is
+ * prepared for execution. Join and special RTEs need no checks.
*/
if (rte->rtekind != RTE_RELATION)
return;
@@ -435,7 +435,7 @@ ExecCheckXactReadOnly(Query *parsetree, CmdType operation)
if (operation == CMD_DELETE || operation == CMD_INSERT
|| operation == CMD_UPDATE)
{
- List *lp;
+ List *lp;
foreach(lp, parsetree->rtable)
{
@@ -474,9 +474,9 @@ static void
InitPlan(QueryDesc *queryDesc, bool explainOnly)
{
CmdType operation = queryDesc->operation;
- Query *parseTree = queryDesc->parsetree;
- Plan *plan = queryDesc->plantree;
- EState *estate = queryDesc->estate;
+ Query *parseTree = queryDesc->parsetree;
+ Plan *plan = queryDesc->plantree;
+ EState *estate = queryDesc->estate;
PlanState *planstate;
List *rangeTable;
Relation intoRelationDesc;
@@ -484,8 +484,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
TupleDesc tupType;
/*
- * Do permissions checks. It's sufficient to examine the query's
- * top rangetable here --- subplan RTEs will be checked during
+ * Do permissions checks. It's sufficient to examine the query's top
+ * rangetable here --- subplan RTEs will be checked during
* ExecInitSubPlan().
*/
ExecCheckRTPerms(parseTree->rtable, operation);
@@ -570,10 +570,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
if (operation == CMD_SELECT && parseTree->into != NULL)
{
do_select_into = true;
+
/*
- * For now, always create OIDs in SELECT INTO; this is for backwards
- * compatibility with pre-7.3 behavior. Eventually we might want
- * to allow the user to choose.
+ * For now, always create OIDs in SELECT INTO; this is for
+ * backwards compatibility with pre-7.3 behavior. Eventually we
+ * might want to allow the user to choose.
*/
estate->es_force_oids = true;
}
@@ -640,12 +641,12 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
tupType = ExecGetResultType(planstate);
/*
- * Initialize the junk filter if needed. SELECT and INSERT queries need a
- * filter if there are any junk attrs in the tlist. INSERT and SELECT
- * INTO also need a filter if the top plan node is a scan node that's not
- * doing projection (else we'll be scribbling on the scan tuple!) UPDATE
- * and DELETE always need a filter, since there's always a junk 'ctid'
- * attribute present --- no need to look first.
+ * Initialize the junk filter if needed. SELECT and INSERT queries
+ * need a filter if there are any junk attrs in the tlist. INSERT and
+ * SELECT INTO also need a filter if the top plan node is a scan node
+ * that's not doing projection (else we'll be scribbling on the scan
+ * tuple!) UPDATE and DELETE always need a filter, since there's
+ * always a junk 'ctid' attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
@@ -752,8 +753,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* If doing SELECT INTO, initialize the "into" relation. We must wait
- * till now so we have the "clean" result tuple type to create the
- * new table from.
+ * till now so we have the "clean" result tuple type to create the new
+ * table from.
*
* If EXPLAIN, skip creating the "into" relation.
*/
@@ -795,16 +796,16 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
FreeTupleDesc(tupdesc);
/*
- * Advance command counter so that the newly-created
- * relation's catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's
+ * catalog tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
- * If necessary, create a TOAST table for the into
- * relation. Note that AlterTableCreateToastTable ends
- * with CommandCounterIncrement(), so that the TOAST table
- * will be visible for insertion.
+ * If necessary, create a TOAST table for the into relation. Note
+ * that AlterTableCreateToastTable ends with
+ * CommandCounterIncrement(), so that the TOAST table will be
+ * visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId, true);
@@ -841,19 +842,19 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change sequence relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_TOASTVALUE:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change toast relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
case RELKIND_VIEW:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot change view relation \"%s\"",
- RelationGetRelationName(resultRelationDesc))));
+ RelationGetRelationName(resultRelationDesc))));
break;
}
@@ -894,7 +895,7 @@ initResultRelInfo(ResultRelInfo *resultRelInfo,
* ----------------------------------------------------------------
*/
void
-ExecEndPlan(PlanState *planstate, EState *estate)
+ExecEndPlan(PlanState * planstate, EState *estate)
{
ResultRelInfo *resultRelInfo;
int i;
@@ -964,18 +965,18 @@ ExecEndPlan(PlanState *planstate, EState *estate)
*/
static TupleTableSlot *
ExecutePlan(EState *estate,
- PlanState *planstate,
+ PlanState * planstate,
CmdType operation,
long numberTuples,
ScanDirection direction,
DestReceiver *dest)
{
- JunkFilter *junkfilter;
- TupleTableSlot *slot;
- ItemPointer tupleid = NULL;
- ItemPointerData tuple_ctid;
- long current_tuple_count;
- TupleTableSlot *result;
+ JunkFilter *junkfilter;
+ TupleTableSlot *slot;
+ ItemPointer tupleid = NULL;
+ ItemPointerData tuple_ctid;
+ long current_tuple_count;
+ TupleTableSlot *result;
/*
* initialize local variables
@@ -1199,7 +1200,7 @@ lnext: ;
/*
* check our tuple count.. if we've processed the proper number
- * then quit, else loop again and process more tuples. Zero
+ * then quit, else loop again and process more tuples. Zero
* numberTuples means no limit.
*/
current_tuple_count++;
@@ -1309,7 +1310,7 @@ ExecInsert(TupleTableSlot *slot,
/* BEFORE ROW INSERT Triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
+ resultRelInfo->ri_TrigDesc->n_before_row[TRIGGER_EVENT_INSERT] > 0)
{
HeapTuple newtuple;
@@ -1686,13 +1687,13 @@ ExecConstraints(ResultRelInfo *resultRelInfo,
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("null value for attribute \"%s\" violates NOT NULL constraint",
- NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
+ NameStr(rel->rd_att->attrs[attrChk - 1]->attname))));
}
}
if (constr->num_check > 0)
{
- const char *failed;
+ const char *failed;
if ((failed = ExecRelCheck(resultRelInfo, slot, estate)) != NULL)
ereport(ERROR,
@@ -1884,10 +1885,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
* integrated with the Param mechanism somehow, so that the upper plan
* nodes know that their children's outputs have changed.
*
- * Note that the stack of free evalPlanQual nodes is quite useless at
- * the moment, since it only saves us from pallocing/releasing the
+ * Note that the stack of free evalPlanQual nodes is quite useless at the
+ * moment, since it only saves us from pallocing/releasing the
* evalPlanQual nodes themselves. But it will be useful once we
- * implement ReScan instead of end/restart for re-using PlanQual nodes.
+ * implement ReScan instead of end/restart for re-using PlanQual
+ * nodes.
*/
if (endNode)
{
@@ -1898,10 +1900,11 @@ EvalPlanQual(EState *estate, Index rti, ItemPointer tid)
/*
* Initialize new recheck query.
*
- * Note: if we were re-using PlanQual plans via ExecReScan, we'd need
- * to instead copy down changeable state from the top plan (including
- * es_result_relation_info, es_junkFilter) and reset locally changeable
- * state in the epq (including es_param_exec_vals, es_evTupleNull).
+ * Note: if we were re-using PlanQual plans via ExecReScan, we'd need to
+ * instead copy down changeable state from the top plan (including
+ * es_result_relation_info, es_junkFilter) and reset locally
+ * changeable state in the epq (including es_param_exec_vals,
+ * es_evTupleNull).
*/
EvalPlanQualStart(epq, estate, epq->next);
@@ -2016,9 +2019,9 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
/*
* The epqstates share the top query's copy of unchanging state such
- * as the snapshot, rangetable, result-rel info, and external Param info.
- * They need their own copies of local state, including a tuple table,
- * es_param_exec_vals, etc.
+ * as the snapshot, rangetable, result-rel info, and external Param
+ * info. They need their own copies of local state, including a tuple
+ * table, es_param_exec_vals, etc.
*/
epqstate->es_direction = ForwardScanDirection;
epqstate->es_snapshot = estate->es_snapshot;
@@ -2036,11 +2039,11 @@ EvalPlanQualStart(evalPlanQual *epq, EState *estate, evalPlanQual *priorepq)
epqstate->es_instrument = estate->es_instrument;
epqstate->es_force_oids = estate->es_force_oids;
epqstate->es_topPlan = estate->es_topPlan;
+
/*
- * Each epqstate must have its own es_evTupleNull state, but
- * all the stack entries share es_evTuple state. This allows
- * sub-rechecks to inherit the value being examined by an
- * outer recheck.
+ * Each epqstate must have its own es_evTupleNull state, but all the
+ * stack entries share es_evTuple state. This allows sub-rechecks to
+ * inherit the value being examined by an outer recheck.
*/
epqstate->es_evTupleNull = (bool *) palloc0(rtsize * sizeof(bool));
if (priorepq == NULL)
diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c
index f73f2d7185..1c34e1d1a4 100644
--- a/src/backend/executor/execProcnode.c
+++ b/src/backend/executor/execProcnode.c
@@ -12,7 +12,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.37 2003/07/21 17:05:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.38 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@
* * ExecInitNode() notices that it is looking at a nest loop and
* as the code below demonstrates, it calls ExecInitNestLoop().
* Eventually this calls ExecInitNode() on the right and left subplans
- * and so forth until the entire plan is initialized. The result
+ * and so forth until the entire plan is initialized. The result
* of ExecInitNode() is a plan state tree built with the same structure
* as the underlying plan tree.
*
@@ -226,7 +226,7 @@ ExecInitNode(Plan *node, EState *estate)
subps = NIL;
foreach(subp, node->initPlan)
{
- SubPlan *subplan = (SubPlan *) lfirst(subp);
+ SubPlan *subplan = (SubPlan *) lfirst(subp);
SubPlanState *sstate;
Assert(IsA(subplan, SubPlan));
@@ -237,9 +237,9 @@ ExecInitNode(Plan *node, EState *estate)
result->initPlan = subps;
/*
- * Initialize any subPlans present in this node. These were found
- * by ExecInitExpr during initialization of the PlanState. Note we
- * must do this after initializing initPlans, in case their arguments
+ * Initialize any subPlans present in this node. These were found by
+ * ExecInitExpr during initialization of the PlanState. Note we must
+ * do this after initializing initPlans, in case their arguments
* contain subPlans (is that actually possible? perhaps not).
*/
subps = NIL;
@@ -268,7 +268,7 @@ ExecInitNode(Plan *node, EState *estate)
* ----------------------------------------------------------------
*/
TupleTableSlot *
-ExecProcNode(PlanState *node)
+ExecProcNode(PlanState * node)
{
TupleTableSlot *result;
@@ -280,7 +280,7 @@ ExecProcNode(PlanState *node)
if (node == NULL)
return NULL;
- if (node->chgParam != NULL) /* something changed */
+ if (node->chgParam != NULL) /* something changed */
ExecReScan(node, NULL); /* let ReScan handle this */
if (node->instrument)
@@ -484,7 +484,7 @@ ExecCountSlotsNode(Plan *node)
* ----------------------------------------------------------------
*/
void
-ExecEndNode(PlanState *node)
+ExecEndNode(PlanState * node)
{
List *subp;
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 891019f0ae..d509122f29 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.138 2003/08/01 00:15:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.139 2003/08/04 00:43:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -50,55 +50,55 @@
/* static function decls */
-static Datum ExecEvalAggref(AggrefExprState *aggref,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalArrayRef(ArrayRefExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+static Datum ExecEvalAggref(AggrefExprState * aggref,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalArrayRef(ArrayRefExprState * astate,
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull);
static Datum ExecEvalParam(Param *expression, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalFunc(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalOper(FuncExprState * fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
+static Datum ExecEvalDistinct(FuncExprState * fcache, ExprContext *econtext,
bool *isNull);
-static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
- ExprContext *econtext, bool *isNull);
+static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState * sstate,
+ ExprContext *econtext, bool *isNull);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
List *argList, ExprContext *econtext);
-static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
+static Datum ExecEvalNot(BoolExprState * notclause, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalOr(BoolExprState * orExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalAnd(BoolExprState * andExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCase(CaseExprState * caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalArray(ArrayExprState *astate,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
- ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullIf(FuncExprState *nullIfExpr, ExprContext *econtext,
- bool *isNull);
-static Datum ExecEvalNullTest(GenericExprState *nstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalBooleanTest(GenericExprState *bstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
-static Datum ExecEvalCoerceToDomain(CoerceToDomainState *cstate,
+static Datum ExecEvalArray(ArrayExprState * astate,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalCoalesce(CoalesceExprState * coalesceExpr,
+ ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullIf(FuncExprState * nullIfExpr, ExprContext *econtext,
+ bool *isNull);
+static Datum ExecEvalNullTest(GenericExprState * nstate,