From d31084e9d1118b25fd16580d9d8c2924b5740dff Mon Sep 17 00:00:00 2001 From: "Marc G. Fournier" Date: Tue, 9 Jul 1996 06:22:35 +0000 Subject: [PATCH] Postgres95 1.01 Distribution - Virgin Sources --- src/Makefile | 48 + src/Makefile.global | 306 + src/backend/Makefile | 289 + src/backend/access/Makefile.inc | 35 + src/backend/access/attnum.h | 61 + src/backend/access/common/Makefile.inc | 16 + src/backend/access/common/heaptuple.c | 1011 ++ src/backend/access/common/heapvalid.c | 134 + src/backend/access/common/indextuple.c | 427 + src/backend/access/common/indexvalid.c | 84 + src/backend/access/common/printtup.c | 306 + src/backend/access/common/scankey.c | 68 + src/backend/access/common/tupdesc.c | 398 + src/backend/access/funcindex.h | 43 + src/backend/access/genam.h | 60 + src/backend/access/hash.h | 336 + src/backend/access/hash/Makefile.inc | 18 + src/backend/access/hash/hash.c | 467 + src/backend/access/hash/hashfunc.c | 276 + src/backend/access/hash/hashinsert.c | 239 + src/backend/access/hash/hashovfl.c | 614 + src/backend/access/hash/hashpage.c | 669 ++ src/backend/access/hash/hashscan.c | 172 + src/backend/access/hash/hashsearch.c | 425 + src/backend/access/hash/hashstrat.c | 104 + src/backend/access/hash/hashutil.c | 147 + src/backend/access/heap/Makefile.inc | 14 + src/backend/access/heap/heapam.c | 1507 +++ src/backend/access/heap/hio.c | 195 + src/backend/access/heap/stats.c | 329 + src/backend/access/heapam.h | 149 + src/backend/access/hio.h | 26 + src/backend/access/htup.h | 115 + src/backend/access/ibit.h | 34 + src/backend/access/index/Makefile.inc | 14 + src/backend/access/index/genam.c | 275 + src/backend/access/index/indexam.c | 411 + src/backend/access/index/istrat.c | 679 ++ src/backend/access/iqual.h | 32 + src/backend/access/istrat.h | 80 + src/backend/access/itup.h | 104 + src/backend/access/nbtree.h | 264 + src/backend/access/nbtree/Makefile.inc | 15 + src/backend/access/nbtree/README | 68 + src/backend/access/nbtree/nbtcompare.c | 173 + src/backend/access/nbtree/nbtinsert.c | 831 ++ src/backend/access/nbtree/nbtpage.c | 523 + src/backend/access/nbtree/nbtree.c | 516 + src/backend/access/nbtree/nbtscan.c | 164 + src/backend/access/nbtree/nbtsearch.c | 1133 ++ src/backend/access/nbtree/nbtsort.c | 1196 ++ src/backend/access/nbtree/nbtstrat.c | 134 + src/backend/access/nbtree/nbtutils.c | 239 + src/backend/access/printtup.h | 26 + src/backend/access/relscan.h | 87 + src/backend/access/rtree.h | 98 + src/backend/access/rtree/Makefile.inc | 14 + src/backend/access/rtree/rtget.c | 320 + src/backend/access/rtree/rtproc.c | 150 + src/backend/access/rtree/rtree.c | 955 ++ src/backend/access/rtree/rtscan.c | 392 + src/backend/access/rtree/rtstrat.c | 239 + src/backend/access/rtscan.h | 17 + src/backend/access/rtstrat.h | 18 + src/backend/access/sdir.h | 57 + src/backend/access/skey.h | 52 + src/backend/access/strat.h | 86 + src/backend/access/transam.h | 213 + src/backend/access/transam/Makefile.inc | 14 + src/backend/access/transam/transam.c | 675 ++ src/backend/access/transam/transsup.c | 663 + src/backend/access/transam/varsup.c | 606 + src/backend/access/transam/xact.c | 1314 ++ src/backend/access/transam/xid.c | 156 + src/backend/access/tupdesc.h | 53 + src/backend/access/tupmacs.h | 43 + src/backend/access/valid.h | 37 + src/backend/access/xact.h | 115 + src/backend/bootstrap/Makefile.inc | 63 + src/backend/bootstrap/boot.sed | 9 + src/backend/bootstrap/bootparse.y | 293 + src/backend/bootstrap/bootscanner.l | 108 + src/backend/bootstrap/bootstrap.c | 1049 ++ src/backend/bootstrap/bootstrap.h | 78 + src/backend/catalog/Makefile.inc | 69 + src/backend/catalog/README | 66 + src/backend/catalog/catalog.c | 205 + src/backend/catalog/catalog.h | 24 + src/backend/catalog/catname.h | 52 + src/backend/catalog/genbki.sh | 218 + src/backend/catalog/heap.c | 1428 +++ src/backend/catalog/heap.h | 42 + src/backend/catalog/index.c | 1655 +++ src/backend/catalog/index.h | 59 + src/backend/catalog/indexing.c | 561 + src/backend/catalog/indexing.h | 103 + src/backend/catalog/pg_aggregate.c | 325 + src/backend/catalog/pg_aggregate.h | 132 + src/backend/catalog/pg_am.h | 115 + src/backend/catalog/pg_amop.h | 546 + src/backend/catalog/pg_amproc.h | 134 + src/backend/catalog/pg_attribute.h | 512 + src/backend/catalog/pg_class.h | 162 + src/backend/catalog/pg_database.h | 57 + src/backend/catalog/pg_defaults.h | 55 + src/backend/catalog/pg_demon.h | 58 + src/backend/catalog/pg_group.h | 42 + src/backend/catalog/pg_hosts.h | 44 + src/backend/catalog/pg_index.h | 71 + src/backend/catalog/pg_inheritproc.h | 59 + src/backend/catalog/pg_inherits.h | 57 + src/backend/catalog/pg_ipl.h | 57 + src/backend/catalog/pg_language.h | 75 + src/backend/catalog/pg_listener.h | 56 + src/backend/catalog/pg_log.h | 40 + src/backend/catalog/pg_magic.h | 54 + src/backend/catalog/pg_opclass.h | 85 + src/backend/catalog/pg_operator.c | 1077 ++ src/backend/catalog/pg_operator.h | 480 + src/backend/catalog/pg_parg.h | 116 + src/backend/catalog/pg_proc.c | 265 + src/backend/catalog/pg_proc.h | 769 ++ src/backend/catalog/pg_rewrite.h | 64 + src/backend/catalog/pg_server.h | 56 + src/backend/catalog/pg_statistic.h | 60 + src/backend/catalog/pg_time.h | 41 + src/backend/catalog/pg_type.c | 595 + src/backend/catalog/pg_type.h | 267 + src/backend/catalog/pg_user.h | 99 + src/backend/catalog/pg_variable.h | 40 + src/backend/catalog/pg_version.h | 58 + src/backend/catalog/unused_oids | 41 + src/backend/commands/Makefile.inc | 25 + src/backend/commands/_deadcode/version.c | 336 + src/backend/commands/async.c | 605 + src/backend/commands/async.h | 33 + src/backend/commands/cluster.c | 370 + src/backend/commands/cluster.h | 30 + src/backend/commands/command.c | 511 + src/backend/commands/command.h | 56 + src/backend/commands/copy.c | 782 ++ src/backend/commands/copy.h | 21 + src/backend/commands/creatinh.c | 564 + src/backend/commands/creatinh.h | 20 + src/backend/commands/defind.c | 505 + src/backend/commands/define.c | 564 + src/backend/commands/defrem.h | 53 + src/backend/commands/explain.c | 219 + src/backend/commands/explain.h | 17 + src/backend/commands/purge.c | 168 + src/backend/commands/purge.h | 20 + src/backend/commands/recipe.c | 1181 ++ src/backend/commands/recipe.h | 17 + src/backend/commands/remove.c | 435 + src/backend/commands/rename.c | 275 + src/backend/commands/rename.h | 24 + src/backend/commands/vacuum.c | 853 ++ src/backend/commands/vacuum.h | 48 + src/backend/commands/version.h | 26 + src/backend/commands/view.c | 325 + src/backend/commands/view.h | 20 + src/backend/executor/Makefile.inc | 29 + src/backend/executor/execAmi.c | 439 + src/backend/executor/execFlatten.c | 236 + src/backend/executor/execFlatten.h | 26 + src/backend/executor/execJunk.c | 389 + src/backend/executor/execMain.c | 1023 ++ src/backend/executor/execProcnode.c | 477 + src/backend/executor/execQual.c | 1504 +++ src/backend/executor/execScan.c | 136 + src/backend/executor/execTuples.c | 1013 ++ src/backend/executor/execUtils.c | 1092 ++ src/backend/executor/execdebug.h | 377 + src/backend/executor/execdefs.h | 54 + src/backend/executor/execdesc.h | 38 + src/backend/executor/executor.h | 229 + src/backend/executor/functions.c | 388 + src/backend/executor/functions.h | 22 + src/backend/executor/hashjoin.h | 82 + src/backend/executor/nodeAgg.c | 558 + src/backend/executor/nodeAgg.h | 21 + src/backend/executor/nodeAppend.c | 483 + src/backend/executor/nodeAppend.h | 22 + src/backend/executor/nodeGroup.c | 407 + src/backend/executor/nodeGroup.h | 21 + src/backend/executor/nodeHash.c | 828 ++ src/backend/executor/nodeHash.h | 35 + src/backend/executor/nodeHashjoin.c | 792 ++ src/backend/executor/nodeHashjoin.h | 33 + src/backend/executor/nodeIndexscan.c | 902 ++ src/backend/executor/nodeIndexscan.h | 32 + src/backend/executor/nodeMaterial.c | 392 + src/backend/executor/nodeMaterial.h | 23 + src/backend/executor/nodeMergejoin.c | 1194 ++ src/backend/executor/nodeMergejoin.h | 40 + src/backend/executor/nodeNestloop.c | 370 + src/backend/executor/nodeNestloop.h | 21 + src/backend/executor/nodeResult.c | 288 + src/backend/executor/nodeResult.h | 21 + src/backend/executor/nodeSeqscan.c | 449 + src/backend/executor/nodeSeqscan.h | 27 + src/backend/executor/nodeSort.c | 523 + src/backend/executor/nodeSort.h | 23 + src/backend/executor/nodeTee.c | 503 + src/backend/executor/nodeTee.h | 22 + src/backend/executor/nodeUnique.c | 316 + src/backend/executor/nodeUnique.h | 21 + src/backend/executor/tuptable.h | 72 + src/backend/include/Makefile.inc | 16 + src/backend/include/c.h | 768 ++ src/backend/include/miscadmin.h | 193 + src/backend/include/postgres.h | 224 + src/backend/lib/Makefile.inc | 20 + src/backend/lib/bit.c | 45 + src/backend/lib/dllist.c | 204 + src/backend/lib/dllist.h | 72 + src/backend/lib/fstack.c | 153 + src/backend/lib/fstack.h | 113 + src/backend/lib/hasht.c | 47 + src/backend/lib/hasht.h | 23 + src/backend/lib/lispsort.c | 56 + src/backend/lib/lispsort.h | 18 + src/backend/lib/qsort.c | 281 + src/backend/lib/qsort.h | 24 + src/backend/lib/stringinfo.c | 116 + src/backend/lib/stringinfo.h | 47 + src/backend/libpq/Makefile.inc | 26 + src/backend/libpq/auth.c | 668 ++ src/backend/libpq/auth.h | 49 + src/backend/libpq/be-dumpdata.c | 323 + src/backend/libpq/be-fsstubs.c | 351 + src/backend/libpq/be-fsstubs.h | 32 + src/backend/libpq/be-pqexec.c | 382 + src/backend/libpq/libpq-be.h | 51 + src/backend/libpq/libpq-fs.h | 119 + src/backend/libpq/libpq.h | 261 + src/backend/libpq/portal.c | 783 ++ src/backend/libpq/portalbuf.c | 511 + src/backend/libpq/pqcomm.c | 724 ++ src/backend/libpq/pqcomm.h | 124 + src/backend/libpq/pqpacket.c | 283 + src/backend/libpq/pqsignal.c | 40 + src/backend/libpq/pqsignal.h | 32 + src/backend/main/Makefile.inc | 16 + src/backend/main/main.c | 45 + src/backend/makeID | 17 + src/backend/nodes/Makefile.inc | 33 + src/backend/nodes/README | 65 + src/backend/nodes/copyfuncs.c | 1675 +++ src/backend/nodes/equalfuncs.c | 703 ++ src/backend/nodes/execnodes.h | 689 ++ src/backend/nodes/list.c | 438 + src/backend/nodes/makefuncs.c | 117 + src/backend/nodes/makefuncs.h | 48 + src/backend/nodes/memnodes.h | 101 + src/backend/nodes/nodeFuncs.c | 116 + src/backend/nodes/nodeFuncs.h | 23 + src/backend/nodes/nodes.c | 45 + src/backend/nodes/nodes.h | 299 + src/backend/nodes/outfuncs.c | 1670 +++ src/backend/nodes/params.h | 90 + src/backend/nodes/parsenodes.h | 731 ++ src/backend/nodes/pg_list.h | 112 + src/backend/nodes/plannodes.h | 330 + src/backend/nodes/primnodes.h | 318 + src/backend/nodes/print.c | 377 + src/backend/nodes/read.c | 270 + src/backend/nodes/readfuncs.c | 1948 +++ src/backend/nodes/readfuncs.h | 27 + src/backend/nodes/relation.h | 279 + src/backend/optimizer/Makefile.inc | 29 + src/backend/optimizer/clauseinfo.h | 24 + src/backend/optimizer/clauses.h | 54 + src/backend/optimizer/cost.h | 59 + src/backend/optimizer/internal.h | 92 + src/backend/optimizer/joininfo.h | 20 + src/backend/optimizer/keys.h | 22 + src/backend/optimizer/ordering.h | 24 + src/backend/optimizer/path/Makefile.inc | 21 + src/backend/optimizer/path/allpaths.c | 351 + src/backend/optimizer/path/clausesel.c | 331 + src/backend/optimizer/path/costsize.c | 456 + src/backend/optimizer/path/hashutils.c | 120 + src/backend/optimizer/path/indxpath.c | 1206 ++ src/backend/optimizer/path/joinpath.c | 623 + src/backend/optimizer/path/joinrels.c | 528 + src/backend/optimizer/path/joinutils.c | 432 + src/backend/optimizer/path/mergeutils.c | 122 + src/backend/optimizer/path/orindxpath.c | 271 + src/backend/optimizer/path/predmig.c | 773 ++ src/backend/optimizer/path/prune.c | 203 + src/backend/optimizer/path/xfunc.c | 1360 +++ src/backend/optimizer/pathnode.h | 50 + src/backend/optimizer/paths.h | 89 + src/backend/optimizer/plan/Makefile.inc | 15 + src/backend/optimizer/plan/createplan.c | 1097 ++ src/backend/optimizer/plan/initsplan.c | 391 + src/backend/optimizer/plan/planmain.c | 422 + src/backend/optimizer/plan/planner.c | 408 + src/backend/optimizer/plan/setrefs.c | 706 ++ src/backend/optimizer/plancat.h | 65 + src/backend/optimizer/planmain.h | 60 + src/backend/optimizer/planner.h | 24 + src/backend/optimizer/prep.h | 51 + src/backend/optimizer/prep/Makefile.inc | 14 + src/backend/optimizer/prep/archive.c | 66 + src/backend/optimizer/prep/prepqual.c | 582 + src/backend/optimizer/prep/preptlist.c | 322 + src/backend/optimizer/prep/prepunion.c | 400 + src/backend/optimizer/tlist.h | 36 + src/backend/optimizer/util/Makefile.inc | 15 + src/backend/optimizer/util/clauseinfo.c | 187 + src/backend/optimizer/util/clauses.c | 736 ++ src/backend/optimizer/util/indexnode.c | 92 + src/backend/optimizer/util/internal.c | 61 + src/backend/optimizer/util/joininfo.c | 107 + src/backend/optimizer/util/keys.c | 193 + src/backend/optimizer/util/ordering.c | 117 + src/backend/optimizer/util/pathnode.c | 566 + src/backend/optimizer/util/plancat.c | 582 + src/backend/optimizer/util/relnode.c | 123 + src/backend/optimizer/util/tlist.c | 577 + src/backend/optimizer/util/var.c | 189 + src/backend/optimizer/var.h | 21 + src/backend/optimizer/xfunc.h | 84 + src/backend/parser/Makefile.inc | 46 + src/backend/parser/analyze.c | 2467 ++++ src/backend/parser/catalog_utils.c | 1470 +++ src/backend/parser/catalog_utils.h | 64 + src/backend/parser/dbcommands.c | 259 + src/backend/parser/dbcommands.h | 28 + src/backend/parser/gram.y | 2113 ++++ src/backend/parser/keywords.c | 179 + src/backend/parser/keywords.h | 25 + src/backend/parser/parse_query.c | 653 + src/backend/parser/parse_query.h | 72 + src/backend/parser/parse_state.h | 27 + src/backend/parser/parser.c | 449 + src/backend/parser/parsetree.h | 80 + src/backend/parser/scan.l | 255 + src/backend/parser/scansup.c | 148 + src/backend/parser/scansup.h | 17 + src/backend/port/BSD44_derived/Makefile.inc | 28 + src/backend/port/BSD44_derived/README | 4 + src/backend/port/BSD44_derived/dl.c | 88 + src/backend/port/BSD44_derived/float.h | 30 + src/backend/port/BSD44_derived/machine.h | 19 + src/backend/port/BSD44_derived/port-protos.h | 41 + src/backend/port/Makefile.inc | 21 + src/backend/port/aix/Makefile.inc | 40 + src/backend/port/aix/README.dlfcn | 167 + src/backend/port/aix/dlfcn.c | 528 + src/backend/port/aix/dlfcn.h | 46 + src/backend/port/aix/machine.h | 19 + src/backend/port/aix/mkldexport.sh | 42 + src/backend/port/aix/port-protos.h | 25 + src/backend/port/alpha/Makefile.inc | 27 + src/backend/port/alpha/machine.h | 19 + src/backend/port/alpha/port-protos.h | 39 + src/backend/port/alpha/port.c | 34 + src/backend/port/bsdi/Makefile.inc | 15 + src/backend/port/bsdi/dynloader.c | 93 + src/backend/port/bsdi/machine.h | 18 + src/backend/port/bsdi/port-protos.h | 33 + src/backend/port/bsdi/port.c | 13 + src/backend/port/hpux/Makefile.inc | 68 + src/backend/port/hpux/dynloader.c | 57 + src/backend/port/hpux/fixade.h | 63 + src/backend/port/hpux/machine.h | 18 + src/backend/port/hpux/port-protos.h | 34 + src/backend/port/hpux/port.c | 47 + src/backend/port/hpux/tas.c.template | 36 + src/backend/port/hpux/tas.s | 28 + src/backend/port/irix5/Makefile.inc | 20 + src/backend/port/irix5/README | 2 + src/backend/port/irix5/machine.h | 19 + src/backend/port/irix5/port-protos.h | 36 + src/backend/port/irix5/port.c | 16 + src/backend/port/linux/Makefile.inc | 36 + src/backend/port/linux/dynloader.c | 93 + src/backend/port/linux/machine.h | 18 + src/backend/port/linux/port-protos.h | 37 + src/backend/port/linux/port.c | 13 + src/backend/port/sparc/Makefile.inc | 23 + src/backend/port/sparc/float.h | 30 + src/backend/port/sparc/machine.h | 19 + src/backend/port/sparc/port-protos.h | 34 + src/backend/port/sparc/strtol.c | 130 + src/backend/port/sparc_solaris/Makefile.inc | 20 + src/backend/port/sparc_solaris/machine.h | 19 + src/backend/port/sparc_solaris/port-protos.h | 38 + src/backend/port/sparc_solaris/port.c | 66 + src/backend/port/sparc_solaris/rusagestub.h | 30 + src/backend/port/sparc_solaris/tas.s | 50 + src/backend/port/ultrix4/Makefile.inc | 27 + src/backend/port/ultrix4/dl.h | 117 + src/backend/port/ultrix4/dynloader.c | 68 + src/backend/port/ultrix4/machine.h | 19 + src/backend/port/ultrix4/port-protos.h | 36 + src/backend/port/ultrix4/port.c | 25 + src/backend/port/ultrix4/strdup.c | 23 + src/backend/port/win32/machine.h | 2 + src/backend/port/win32/nt.c | 625 + src/backend/port/win32/nt.h | 54 + src/backend/port/win32/pglite.mak | 3323 +++++ src/backend/port/win32/port-protos.h | 1 + src/backend/port/win32/pwd.h | 1 + src/backend/port/win32/regex/COPYRIGHT | 56 + src/backend/port/win32/regex/Makefile.inc | 14 + src/backend/port/win32/regex/WHATSNEW | 94 + src/backend/port/win32/regex/cclass.h | 70 + src/backend/port/win32/regex/cname.h | 141 + src/backend/port/win32/regex/engine.c | 1091 ++ src/backend/port/win32/regex/re_format.7 | 269 + src/backend/port/win32/regex/regcomp.c | 1698 +++ src/backend/port/win32/regex/regerror.c | 180 + src/backend/port/win32/regex/regex.3 | 538 + src/backend/port/win32/regex/regex.h | 106 + src/backend/port/win32/regex/regex2.h | 175 + src/backend/port/win32/regex/regexec.c | 181 + src/backend/port/win32/regex/regexp.h | 69 + src/backend/port/win32/regex/regfree.c | 80 + src/backend/port/win32/regex/utils.h | 57 + src/backend/port/win32/rusagestub.h | 29 + src/backend/port/win32/sys/cdefs.h | 124 + src/backend/port/win32/sys/file.h | 1 + src/backend/port/win32/sys/ipc.h | 1 + src/backend/port/win32/sys/param.h | 1 + src/backend/port/win32/sys/sem.h | 1 + src/backend/port/win32/sys/shm.h | 1 + src/backend/port/win32/sys/time.h | 1 + src/backend/postmaster/Makefile.inc | 16 + src/backend/postmaster/postmaster.c | 1122 ++ src/backend/regex/COPYRIGHT | 56 + src/backend/regex/Makefile.inc | 14 + src/backend/regex/WHATSNEW | 94 + src/backend/regex/cclass.h | 70 + src/backend/regex/cdefs.h | 144 + src/backend/regex/cname.h | 141 + src/backend/regex/engine.c | 1092 ++ src/backend/regex/re_format.7 | 269 + src/backend/regex/regcomp.c | 1701 +++ src/backend/regex/regerror.c | 178 + src/backend/regex/regex.3 | 538 + src/backend/regex/regex.h | 108 + src/backend/regex/regex2.h | 175 + src/backend/regex/regexec.c | 181 + src/backend/regex/regexp.h | 71 + src/backend/regex/regfree.c | 80 + src/backend/regex/utils.h | 57 + src/backend/rewrite/Makefile.inc | 22 + src/backend/rewrite/locks.c | 131 + src/backend/rewrite/locks.h | 21 + src/backend/rewrite/prs2lock.h | 43 + src/backend/rewrite/rewriteDefine.c | 255 + src/backend/rewrite/rewriteDefine.h | 18 + src/backend/rewrite/rewriteHandler.c | 622 + src/backend/rewrite/rewriteHandler.h | 35 + src/backend/rewrite/rewriteManip.c | 435 + src/backend/rewrite/rewriteManip.h | 31 + src/backend/rewrite/rewriteRemove.c | 181 + src/backend/rewrite/rewriteRemove.h | 20 + src/backend/rewrite/rewriteSupport.c | 270 + src/backend/rewrite/rewriteSupport.h | 27 + src/backend/storage/Makefile.inc | 31 + src/backend/storage/backendid.h | 32 + src/backend/storage/block.h | 114 + src/backend/storage/buf.h | 47 + src/backend/storage/buf_internals.h | 220 + src/backend/storage/buffer/Makefile.inc | 16 + src/backend/storage/buffer/buf_init.c | 280 + src/backend/storage/buffer/buf_table.c | 162 + src/backend/storage/buffer/bufmgr.c | 1581 +++ src/backend/storage/buffer/freelist.c | 285 + src/backend/storage/buffer/localbuf.c | 284 + src/backend/storage/bufmgr.h | 112 + src/backend/storage/bufpage.h | 256 + src/backend/storage/fd.h | 96 + src/backend/storage/file/Makefile.inc | 14 + src/backend/storage/file/fd.c | 888 ++ src/backend/storage/ipc.h | 285 + src/backend/storage/ipc/Makefile.inc | 15 + src/backend/storage/ipc/README | 31 + src/backend/storage/ipc/ipc.c | 718 ++ src/backend/storage/ipc/ipci.c | 149 + src/backend/storage/ipc/s_lock.c | 440 + src/backend/storage/ipc/shmem.c | 561 + src/backend/storage/ipc/shmqueue.c | 251 + src/backend/storage/ipc/sinval.c | 169 + src/backend/storage/ipc/sinvaladt.c | 797 ++ src/backend/storage/ipc/spin.c | 247 + src/backend/storage/item.h | 20 + src/backend/storage/itemid.h | 75 + src/backend/storage/itempos.h | 44 + src/backend/storage/itemptr.h | 115 + src/backend/storage/large_object.h | 58 + src/backend/storage/large_object/Makefile.inc | 14 + src/backend/storage/large_object/inv_api.c | 1165 ++ src/backend/storage/lmgr.h | 84 + src/backend/storage/lmgr/Makefile.inc | 14 + src/backend/storage/lmgr/README | 93 + src/backend/storage/lmgr/lmgr.c | 933 ++ src/backend/storage/lmgr/lock.c | 1020 ++ src/backend/storage/lmgr/multi.c | 415 + src/backend/storage/lmgr/proc.c | 826 ++ src/backend/storage/lmgr/single.c | 86 + src/backend/storage/lock.h | 218 + src/backend/storage/multilev.h | 64 + src/backend/storage/off.h | 60 + src/backend/storage/page.h | 26 + src/backend/storage/page/Makefile.inc | 16 + src/backend/storage/page/bufpage.c | 519 + src/backend/storage/page/itemptr.c | 40 + src/backend/storage/pagenum.h | 33 + src/backend/storage/pos.h | 64 + src/backend/storage/proc.h | 127 + src/backend/storage/shmem.h | 104 + src/backend/storage/sinval.h | 33 + src/backend/storage/sinvaladt.h | 126 + src/backend/storage/smgr.h | 84 + src/backend/storage/smgr/Makefile.inc | 14 + src/backend/storage/smgr/README | 40 + src/backend/storage/smgr/md.c | 697 ++ src/backend/storage/smgr/mm.c | 586 + src/backend/storage/smgr/smgr.c | 371 + src/backend/storage/smgr/smgrtype.c | 82 + src/backend/storage/spin.h | 38 + src/backend/tcop/Makefile.inc | 18 + src/backend/tcop/aclchk.c | 555 + src/backend/tcop/dest.c | 354 + src/backend/tcop/dest.h | 78 + src/backend/tcop/fastpath.c | 353 + src/backend/tcop/fastpath.h | 31 + src/backend/tcop/postgres.c | 1500 +++ src/backend/tcop/pquery.c | 362 + src/backend/tcop/pquery.h | 36 + src/backend/tcop/tcopdebug.h | 43 + src/backend/tcop/tcopprot.h | 40 + src/backend/tcop/utility.c | 646 + src/backend/tcop/utility.h | 18 + src/backend/tioga/Arr_TgRecipe.h | 120 + src/backend/tioga/Makefile.inc | 21 + src/backend/tioga/Varray.c | 48 + src/backend/tioga/Varray.h | 45 + src/backend/tioga/tgRecipe.c | 694 ++ src/backend/tioga/tgRecipe.h | 121 + src/backend/utils/Gen_fmgrtab.sh | 265 + src/backend/utils/Makefile.inc | 62 + src/backend/utils/acl.h | 163 + src/backend/utils/adt/Makefile.inc | 20 + src/backend/utils/adt/acl.c | 618 + src/backend/utils/adt/arrayfuncs.c | 1375 +++ src/backend/utils/adt/arrayutils.c | 111 + src/backend/utils/adt/bool.c | 65 + src/backend/utils/adt/char.c | 392 + src/backend/utils/adt/chunk.c | 587 + src/backend/utils/adt/date.c | 891 ++ src/backend/utils/adt/datetimes.c | 350 + src/backend/utils/adt/datum.c | 201 + src/backend/utils/adt/dt.c | 58 + src/backend/utils/adt/filename.c | 120 + src/backend/utils/adt/float.c | 1320 ++ src/backend/utils/adt/geo-ops.c | 1947 +++ src/backend/utils/adt/geo-selfuncs.c | 124 + src/backend/utils/adt/int.c | 343 + src/backend/utils/adt/like.c | 225 + src/backend/utils/adt/misc.c | 96 + src/backend/utils/adt/nabstime.c | 866 ++ src/backend/utils/adt/name.c | 198 + src/backend/utils/adt/not_in.c | 124 + src/backend/utils/adt/numutils.c | 401 + src/backend/utils/adt/oid.c | 127 + src/backend/utils/adt/oidint2.c | 120 + src/backend/utils/adt/oidint4.c | 111 + src/backend/utils/adt/oidname.c | 123 + src/backend/utils/adt/regexp.c | 343 + src/backend/utils/adt/regproc.c | 159 + src/backend/utils/adt/selfuncs.c | 585 + src/backend/utils/adt/sets.c | 164 + src/backend/utils/adt/tid.c | 92 + src/backend/utils/adt/varchar.c | 496 + src/backend/utils/adt/varlena.c | 488 + src/backend/utils/array.h | 166 + src/backend/utils/bit.h | 39 + src/backend/utils/builtins.h | 433 + src/backend/utils/cache/Makefile.inc | 15 + src/backend/utils/cache/catcache.c | 1023 ++ src/backend/utils/cache/fcache.c | 297 + src/backend/utils/cache/inval.c | 612 + src/backend/utils/cache/lsyscache.c | 484 + src/backend/utils/cache/rel.c | 77 + src/backend/utils/cache/relcache.c | 1795 +++ src/backend/utils/cache/syscache.c | 630 + src/backend/utils/catcache.h | 85 + src/backend/utils/datum.h | 64 + src/backend/utils/dynamic_loader.h | 53 + src/backend/utils/elog.h | 38 + src/backend/utils/error/Makefile.inc | 14 + src/backend/utils/error/assert.c | 64 + src/backend/utils/error/elog.c | 237 + src/backend/utils/error/exc.c | 183 + src/backend/utils/error/excabort.c | 28 + src/backend/utils/error/excid.c | 64 + src/backend/utils/error/format.c | 40 + src/backend/utils/exc.h | 101 + src/backend/utils/excid.h | 31 + src/backend/utils/fcache.h | 55 + src/backend/utils/fcache2.h | 19 + src/backend/utils/fmgr/Makefile.inc | 15 + src/backend/utils/fmgr/dfmgr.c | 269 + src/backend/utils/fmgr/fmgr.c | 254 + src/backend/utils/fmgrtab.h | 29 + src/backend/utils/geo-decls.h | 248 + src/backend/utils/hash/Makefile.inc | 14 + src/backend/utils/hash/dynahash.c | 868 ++ src/backend/utils/hash/hashfn.c | 156 + src/backend/utils/hsearch.h | 141 + src/backend/utils/init/Makefile.inc | 14 + src/backend/utils/init/enbl.c | 45 + src/backend/utils/init/findbe.c | 251 + src/backend/utils/init/globals.c | 108 + src/backend/utils/init/magic.c | 167 + src/backend/utils/init/miscinit.c | 378 + src/backend/utils/init/postinit.c | 648 + src/backend/utils/inval.h | 56 + src/backend/utils/lselect.h | 40 + src/backend/utils/lsyscache.h | 45 + src/backend/utils/mcxt.h | 56 + src/backend/utils/memutils.h | 281 + src/backend/utils/mmgr/Makefile.inc | 15 + src/backend/utils/mmgr/aset.c | 381 + src/backend/utils/mmgr/mcxt.c | 510 + src/backend/utils/mmgr/oset.c | 173 + src/backend/utils/mmgr/palloc.c | 117 + src/backend/utils/mmgr/portalmem.c | 980 ++ src/backend/utils/module.h | 25 + src/backend/utils/nabstime.h | 165 + src/backend/utils/oidcompos.h | 52 + src/backend/utils/palloc.h | 26 + src/backend/utils/portal.h | 97 + src/backend/utils/psort.h | 86 + src/backend/utils/rel.h | 170 + src/backend/utils/rel2.h | 23 + src/backend/utils/relcache.h | 47 + src/backend/utils/sets.h | 22 + src/backend/utils/sort/Makefile.inc | 14 + src/backend/utils/sort/lselect.c | 365 + src/backend/utils/sort/psort.c | 617 + src/backend/utils/syscache.h | 89 + src/backend/utils/time/Makefile.inc | 14 + src/backend/utils/time/tqual.c | 815 ++ src/backend/utils/tqual.h | 55 + src/bin/Makefile | 30 + src/bin/Makefile.global | 33 + src/bin/cleardbdir/Makefile | 21 + src/bin/cleardbdir/cleardbdir.sh | 37 + src/bin/createdb/Makefile | 21 + src/bin/createdb/createdb.sh | 66 + src/bin/createuser/Makefile | 21 + src/bin/createuser/createuser.sh | 225 + src/bin/destroydb/Makefile | 21 + src/bin/destroydb/destroydb.sh | 69 + src/bin/destroyuser/Makefile | 21 + src/bin/destroyuser/destroyuser.sh | 192 + src/bin/initdb/Makefile | 21 + src/bin/initdb/initdb.sh | 222 + src/bin/ipcclean/Makefile | 21 + src/bin/ipcclean/ipcclean.sh | 8 + src/bin/monitor/Makefile | 23 + src/bin/monitor/monitor.c | 1058 ++ src/bin/pg4_dump/Makefile | 13 + src/bin/pg4_dump/README | 87 + src/bin/pg4_dump/common.c | 417 + src/bin/pg4_dump/pg4_dump.c | 1602 +++ src/bin/pg4_dump/pg_dump.h | 195 + src/bin/pg_dump/Makefile | 23 + src/bin/pg_dump/README | 73 + src/bin/pg_dump/common.c | 397 + src/bin/pg_dump/pg_dump.c | 1443 +++ src/bin/pg_dump/pg_dump.h | 195 + src/bin/pg_id/Makefile | 23 + src/bin/pg_id/pg_id.c | 52 + src/bin/pg_version/Makefile | 26 + src/bin/pg_version/pg_version.c | 35 + src/bin/pgtclsh/Makefile | 46 + src/bin/pgtclsh/README | 21 + src/bin/pgtclsh/pgtclAppInit.c | 114 + src/bin/pgtclsh/pgtclUtils.tcl | 16 + src/bin/pgtclsh/pgtkAppInit.c | 117 + src/bin/pgtclsh/updateStats.tcl | 71 + src/bin/psql/Makefile | 65 + src/bin/psql/psql.c | 1230 ++ src/bin/psql/psqlHelp.h | 168 + src/bin/psql/rlstubs.c | 41 + src/bin/psql/stringutils.c | 104 + src/bin/psql/stringutils.h | 51 + src/interfaces/libpgtcl/Makefile | 38 + src/interfaces/libpgtcl/README | 7 + src/interfaces/libpgtcl/libpgtcl.h | 21 + src/interfaces/libpgtcl/pgtcl.c | 105 + src/interfaces/libpgtcl/pgtclCmds.c | 812 ++ src/interfaces/libpgtcl/pgtclCmds.h | 52 + src/interfaces/libpgtcl/pgtclId.c | 51 + src/interfaces/libpgtcl/pgtclId.h | 18 + src/interfaces/libpq++/Makefile | 54 + src/interfaces/libpq++/README | 22 + src/interfaces/libpq++/examples/Makefile | 70 + src/interfaces/libpq++/examples/testlibpq0.cc | 49 + src/interfaces/libpq++/examples/testlibpq1.cc | 84 + src/interfaces/libpq++/examples/testlibpq2.cc | 71 + .../libpq++/examples/testlibpq2.sql | 5 + src/interfaces/libpq++/examples/testlibpq3.cc | 131 + .../libpq++/examples/testlibpq3.sql | 6 + src/interfaces/libpq++/examples/testlibpq4.cc | 69 + src/interfaces/libpq++/examples/testlo.cc | 63 + src/interfaces/libpq++/libpq++.H | 173 + src/interfaces/libpq++/man/libpq++.3 | 434 + src/interfaces/libpq++/pgconnection.cc | 94 + src/interfaces/libpq++/pgenv.cc | 109 + src/interfaces/libpq++/pglobject.cc | 152 + src/interfaces/libpq/Makefile | 98 + src/interfaces/libpq/README | 1 + src/interfaces/libpq/fe-auth.c | 544 + src/interfaces/libpq/fe-auth.h | 38 + src/interfaces/libpq/fe-connect.c | 460 + src/interfaces/libpq/fe-exec.c | 1061 ++ src/interfaces/libpq/fe-lobj.c | 381 + src/interfaces/libpq/fe-misc.c | 193 + src/interfaces/libpq/libpq-fe.h | 251 + src/interfaces/libpq/pg_hba | 13 + src/interfaces/libpq/pqsignal.c | 40 + src/interfaces/libpq/pqsignal.h | 32 + src/mk/port/postgres.mk.BSD44_derived | 40 + src/mk/port/postgres.mk.aix | 55 + src/mk/port/postgres.mk.alpha | 52 + src/mk/port/postgres.mk.bsdi | 40 + src/mk/port/postgres.mk.hpux | 64 + src/mk/port/postgres.mk.irix5 | 49 + src/mk/port/postgres.mk.linux | 54 + src/mk/port/postgres.mk.sparc | 38 + src/mk/port/postgres.mk.sparc_solaris | 57 + src/mk/port/postgres.mk.svr4 | 35 + src/mk/port/postgres.mk.ultrix4 | 34 + src/mk/postgres.lib.mk | 51 + src/mk/postgres.mk | 150 + src/mk/postgres.prog.mk | 26 + src/mk/postgres.shell.mk | 62 + src/mk/postgres.subdir.mk | 21 + src/mk/postgres.user.mk | 79 + src/test/Makefile | 18 + src/test/bench/Makefile | 62 + src/test/bench/WISC-README | 28 + src/test/bench/create.sh | 24 + src/test/bench/create.source | 17 + src/test/bench/perquery | 12 + src/test/bench/query01 | 4 + src/test/bench/query02 | 4 + src/test/bench/query03 | 4 + src/test/bench/query04 | 4 + src/test/bench/query05 | 4 + src/test/bench/query06 | 4 + src/test/bench/query07 | 2 + src/test/bench/query08 | 2 + src/test/bench/query09 | 4 + src/test/bench/query10 | 4 + src/test/bench/query11 | 4 + src/test/bench/query12 | 4 + src/test/bench/query13 | 4 + src/test/bench/query14 | 4 + src/test/bench/query15 | 4 + src/test/bench/query16 | 4 + src/test/bench/query17 | 4 + src/test/bench/query18 | 4 + src/test/bench/query19 | 4 + src/test/bench/query20 | 4 + src/test/bench/query21 | 0 src/test/bench/query22 | 0 src/test/bench/query23 | 4 + src/test/bench/query24 | 0 src/test/bench/query25 | 0 src/test/bench/query26 | 2 + src/test/bench/query27 | 2 + src/test/bench/query28 | 2 + src/test/bench/query29 | 2 + src/test/bench/query30 | 2 + src/test/bench/query31 | 2 + src/test/bench/query32 | 2 + src/test/bench/runwisc.sh | 17 + src/test/bench/wholebench.sh | 5 + src/test/examples/Makefile | 74 + src/test/examples/testlibpq.c | 118 + src/test/examples/testlibpq2.c | 93 + src/test/examples/testlibpq2.sql | 5 + src/test/examples/testlibpq3.c | 154 + src/test/examples/testlibpq3.sql | 6 + src/test/examples/testlibpq4.c | 127 + src/test/examples/testlo.c | 232 + src/test/examples/testlo2.c | 233 + src/test/regress/Makefile | 62 + src/test/regress/create.source | 765 ++ src/test/regress/data/dept.data | 2 + src/test/regress/data/desc.data | 10000 ++++++++++++++++ src/test/regress/data/emp.data | 3 + src/test/regress/data/hash.data | 10000 ++++++++++++++++ src/test/regress/data/onek.data | 1000 ++ src/test/regress/data/person.data | 50 + src/test/regress/data/real_city.data | 5 + src/test/regress/data/rect.data | 3100 +++++ src/test/regress/data/streets.data | 5124 ++++++++ src/test/regress/data/stud_emp.data | 3 + src/test/regress/data/student.data | 2 + src/test/regress/data/tenk.data | 10000 ++++++++++++++++ src/test/regress/destroy.source | 285 + src/test/regress/errors.source | 275 + src/test/regress/queries.source | 2614 ++++ src/test/regress/regress.c | 271 + src/test/regress/regress.sh | 64 + src/test/regress/sample.regress.out | 6362 ++++++++++ src/test/regress/security.source | 64 + src/test/suite/README | 5 + src/test/suite/agg.sql | 76 + src/test/suite/date.sql | 30 + src/test/suite/float.sql | 113 + src/test/suite/group.sql | 100 + src/test/suite/group_err.sql | 29 + src/test/suite/inh.sql | 73 + src/test/suite/join.sql | 40 + src/test/suite/oper.sql | 27 + src/test/suite/parse.sql | 45 + src/test/suite/quote.sql | 18 + src/test/suite/results/agg.sql.out | 147 + src/test/suite/results/date.sql.out | 72 + src/test/suite/results/float.sql.out | 330 + src/test/suite/results/group.sql.out | 262 + src/test/suite/results/group_err.sql.out | 18 + src/test/suite/results/inh.sql.out | 86 + src/test/suite/results/join.sql.out | 40 + src/test/suite/results/oper.sql.out | 18 + src/test/suite/results/parse.sql.out | 60 + src/test/suite/results/quote.sql.out | 32 + src/test/suite/results/rules.sql.out | 22 + src/test/suite/results/select.sql.out | 31 + src/test/suite/results/sort.sql.out | 229 + src/test/suite/results/sqlcompat.sql.out | 100 + src/test/suite/results/time.sql.out | 72 + src/test/suite/results/varchar.sql.out | 226 + src/test/suite/results/views.sql.out | 125 + src/test/suite/rules.sql | 29 + src/test/suite/runall | 8 + src/test/suite/select.sql | 31 + src/test/suite/sort.sql | 57 + src/test/suite/sqlcompat.sql | 57 + src/test/suite/time.sql | 30 + src/test/suite/varchar.sql | 64 + src/test/suite/views.sql | 77 + src/tools/mkldexport/Makefile | 20 + src/tools/mkldexport/README | 12 + src/tools/mkldexport/mkldexport.sh | 36 + src/tutorial/C-code/beard.c | 64 + src/tutorial/C-code/complex.c | 150 + src/tutorial/C-code/funcs.c | 56 + src/tutorial/Makefile | 39 + src/tutorial/README | 24 + src/tutorial/advanced.source | 125 + src/tutorial/basics.source | 188 + src/tutorial/complex.source | 251 + src/tutorial/funcs.source | 158 + src/tutorial/syscat.source | 151 + 868 files changed, 242656 insertions(+) create mode 100644 src/Makefile create mode 100644 src/Makefile.global create mode 100644 src/backend/Makefile create mode 100644 src/backend/access/Makefile.inc create mode 100644 src/backend/access/attnum.h create mode 100644 src/backend/access/common/Makefile.inc create mode 100644 src/backend/access/common/heaptuple.c create mode 100644 src/backend/access/common/heapvalid.c create mode 100644 src/backend/access/common/indextuple.c create mode 100644 src/backend/access/common/indexvalid.c create mode 100644 src/backend/access/common/printtup.c create mode 100644 src/backend/access/common/scankey.c create mode 100644 src/backend/access/common/tupdesc.c create mode 100644 src/backend/access/funcindex.h create mode 100644 src/backend/access/genam.h create mode 100644 src/backend/access/hash.h create mode 100644 src/backend/access/hash/Makefile.inc create mode 100644 src/backend/access/hash/hash.c create mode 100644 src/backend/access/hash/hashfunc.c create mode 100644 src/backend/access/hash/hashinsert.c create mode 100644 src/backend/access/hash/hashovfl.c create mode 100644 src/backend/access/hash/hashpage.c create mode 100644 src/backend/access/hash/hashscan.c create mode 100644 src/backend/access/hash/hashsearch.c create mode 100644 src/backend/access/hash/hashstrat.c create mode 100644 src/backend/access/hash/hashutil.c create mode 100644 src/backend/access/heap/Makefile.inc create mode 100644 src/backend/access/heap/heapam.c create mode 100644 src/backend/access/heap/hio.c create mode 100644 src/backend/access/heap/stats.c create mode 100644 src/backend/access/heapam.h create mode 100644 src/backend/access/hio.h create mode 100644 src/backend/access/htup.h create mode 100644 src/backend/access/ibit.h create mode 100644 src/backend/access/index/Makefile.inc create mode 100644 src/backend/access/index/genam.c create mode 100644 src/backend/access/index/indexam.c create mode 100644 src/backend/access/index/istrat.c create mode 100644 src/backend/access/iqual.h create mode 100644 src/backend/access/istrat.h create mode 100644 src/backend/access/itup.h create mode 100644 src/backend/access/nbtree.h create mode 100644 src/backend/access/nbtree/Makefile.inc create mode 100644 src/backend/access/nbtree/README create mode 100644 src/backend/access/nbtree/nbtcompare.c create mode 100644 src/backend/access/nbtree/nbtinsert.c create mode 100644 src/backend/access/nbtree/nbtpage.c create mode 100644 src/backend/access/nbtree/nbtree.c create mode 100644 src/backend/access/nbtree/nbtscan.c create mode 100644 src/backend/access/nbtree/nbtsearch.c create mode 100644 src/backend/access/nbtree/nbtsort.c create mode 100644 src/backend/access/nbtree/nbtstrat.c create mode 100644 src/backend/access/nbtree/nbtutils.c create mode 100644 src/backend/access/printtup.h create mode 100644 src/backend/access/relscan.h create mode 100644 src/backend/access/rtree.h create mode 100644 src/backend/access/rtree/Makefile.inc create mode 100644 src/backend/access/rtree/rtget.c create mode 100644 src/backend/access/rtree/rtproc.c create mode 100644 src/backend/access/rtree/rtree.c create mode 100644 src/backend/access/rtree/rtscan.c create mode 100644 src/backend/access/rtree/rtstrat.c create mode 100644 src/backend/access/rtscan.h create mode 100644 src/backend/access/rtstrat.h create mode 100644 src/backend/access/sdir.h create mode 100644 src/backend/access/skey.h create mode 100644 src/backend/access/strat.h create mode 100644 src/backend/access/transam.h create mode 100644 src/backend/access/transam/Makefile.inc create mode 100644 src/backend/access/transam/transam.c create mode 100644 src/backend/access/transam/transsup.c create mode 100644 src/backend/access/transam/varsup.c create mode 100644 src/backend/access/transam/xact.c create mode 100644 src/backend/access/transam/xid.c create mode 100644 src/backend/access/tupdesc.h create mode 100644 src/backend/access/tupmacs.h create mode 100644 src/backend/access/valid.h create mode 100644 src/backend/access/xact.h create mode 100644 src/backend/bootstrap/Makefile.inc create mode 100644 src/backend/bootstrap/boot.sed create mode 100644 src/backend/bootstrap/bootparse.y create mode 100644 src/backend/bootstrap/bootscanner.l create mode 100644 src/backend/bootstrap/bootstrap.c create mode 100644 src/backend/bootstrap/bootstrap.h create mode 100644 src/backend/catalog/Makefile.inc create mode 100644 src/backend/catalog/README create mode 100644 src/backend/catalog/catalog.c create mode 100644 src/backend/catalog/catalog.h create mode 100644 src/backend/catalog/catname.h create mode 100644 src/backend/catalog/genbki.sh create mode 100644 src/backend/catalog/heap.c create mode 100644 src/backend/catalog/heap.h create mode 100644 src/backend/catalog/index.c create mode 100644 src/backend/catalog/index.h create mode 100644 src/backend/catalog/indexing.c create mode 100644 src/backend/catalog/indexing.h create mode 100644 src/backend/catalog/pg_aggregate.c create mode 100644 src/backend/catalog/pg_aggregate.h create mode 100644 src/backend/catalog/pg_am.h create mode 100644 src/backend/catalog/pg_amop.h create mode 100644 src/backend/catalog/pg_amproc.h create mode 100644 src/backend/catalog/pg_attribute.h create mode 100644 src/backend/catalog/pg_class.h create mode 100644 src/backend/catalog/pg_database.h create mode 100644 src/backend/catalog/pg_defaults.h create mode 100644 src/backend/catalog/pg_demon.h create mode 100644 src/backend/catalog/pg_group.h create mode 100644 src/backend/catalog/pg_hosts.h create mode 100644 src/backend/catalog/pg_index.h create mode 100644 src/backend/catalog/pg_inheritproc.h create mode 100644 src/backend/catalog/pg_inherits.h create mode 100644 src/backend/catalog/pg_ipl.h create mode 100644 src/backend/catalog/pg_language.h create mode 100644 src/backend/catalog/pg_listener.h create mode 100644 src/backend/catalog/pg_log.h create mode 100644 src/backend/catalog/pg_magic.h create mode 100644 src/backend/catalog/pg_opclass.h create mode 100644 src/backend/catalog/pg_operator.c create mode 100644 src/backend/catalog/pg_operator.h create mode 100644 src/backend/catalog/pg_parg.h create mode 100644 src/backend/catalog/pg_proc.c create mode 100644 src/backend/catalog/pg_proc.h create mode 100644 src/backend/catalog/pg_rewrite.h create mode 100644 src/backend/catalog/pg_server.h create mode 100644 src/backend/catalog/pg_statistic.h create mode 100644 src/backend/catalog/pg_time.h create mode 100644 src/backend/catalog/pg_type.c create mode 100644 src/backend/catalog/pg_type.h create mode 100644 src/backend/catalog/pg_user.h create mode 100644 src/backend/catalog/pg_variable.h create mode 100644 src/backend/catalog/pg_version.h create mode 100644 src/backend/catalog/unused_oids create mode 100644 src/backend/commands/Makefile.inc create mode 100644 src/backend/commands/_deadcode/version.c create mode 100644 src/backend/commands/async.c create mode 100644 src/backend/commands/async.h create mode 100644 src/backend/commands/cluster.c create mode 100644 src/backend/commands/cluster.h create mode 100644 src/backend/commands/command.c create mode 100644 src/backend/commands/command.h create mode 100644 src/backend/commands/copy.c create mode 100644 src/backend/commands/copy.h create mode 100644 src/backend/commands/creatinh.c create mode 100644 src/backend/commands/creatinh.h create mode 100644 src/backend/commands/defind.c create mode 100644 src/backend/commands/define.c create mode 100644 src/backend/commands/defrem.h create mode 100644 src/backend/commands/explain.c create mode 100644 src/backend/commands/explain.h create mode 100644 src/backend/commands/purge.c create mode 100644 src/backend/commands/purge.h create mode 100644 src/backend/commands/recipe.c create mode 100644 src/backend/commands/recipe.h create mode 100644 src/backend/commands/remove.c create mode 100644 src/backend/commands/rename.c create mode 100644 src/backend/commands/rename.h create mode 100644 src/backend/commands/vacuum.c create mode 100644 src/backend/commands/vacuum.h create mode 100644 src/backend/commands/version.h create mode 100644 src/backend/commands/view.c create mode 100644 src/backend/commands/view.h create mode 100644 src/backend/executor/Makefile.inc create mode 100644 src/backend/executor/execAmi.c create mode 100644 src/backend/executor/execFlatten.c create mode 100644 src/backend/executor/execFlatten.h create mode 100644 src/backend/executor/execJunk.c create mode 100644 src/backend/executor/execMain.c create mode 100644 src/backend/executor/execProcnode.c create mode 100644 src/backend/executor/execQual.c create mode 100644 src/backend/executor/execScan.c create mode 100644 src/backend/executor/execTuples.c create mode 100644 src/backend/executor/execUtils.c create mode 100644 src/backend/executor/execdebug.h create mode 100644 src/backend/executor/execdefs.h create mode 100644 src/backend/executor/execdesc.h create mode 100644 src/backend/executor/executor.h create mode 100644 src/backend/executor/functions.c create mode 100644 src/backend/executor/functions.h create mode 100644 src/backend/executor/hashjoin.h create mode 100644 src/backend/executor/nodeAgg.c create mode 100644 src/backend/executor/nodeAgg.h create mode 100644 src/backend/executor/nodeAppend.c create mode 100644 src/backend/executor/nodeAppend.h create mode 100644 src/backend/executor/nodeGroup.c create mode 100644 src/backend/executor/nodeGroup.h create mode 100644 src/backend/executor/nodeHash.c create mode 100644 src/backend/executor/nodeHash.h create mode 100644 src/backend/executor/nodeHashjoin.c create mode 100644 src/backend/executor/nodeHashjoin.h create mode 100644 src/backend/executor/nodeIndexscan.c create mode 100644 src/backend/executor/nodeIndexscan.h create mode 100644 src/backend/executor/nodeMaterial.c create mode 100644 src/backend/executor/nodeMaterial.h create mode 100644 src/backend/executor/nodeMergejoin.c create mode 100644 src/backend/executor/nodeMergejoin.h create mode 100644 src/backend/executor/nodeNestloop.c create mode 100644 src/backend/executor/nodeNestloop.h create mode 100644 src/backend/executor/nodeResult.c create mode 100644 src/backend/executor/nodeResult.h create mode 100644 src/backend/executor/nodeSeqscan.c create mode 100644 src/backend/executor/nodeSeqscan.h create mode 100644 src/backend/executor/nodeSort.c create mode 100644 src/backend/executor/nodeSort.h create mode 100644 src/backend/executor/nodeTee.c create mode 100644 src/backend/executor/nodeTee.h create mode 100644 src/backend/executor/nodeUnique.c create mode 100644 src/backend/executor/nodeUnique.h create mode 100644 src/backend/executor/tuptable.h create mode 100644 src/backend/include/Makefile.inc create mode 100644 src/backend/include/c.h create mode 100644 src/backend/include/miscadmin.h create mode 100644 src/backend/include/postgres.h create mode 100644 src/backend/lib/Makefile.inc create mode 100644 src/backend/lib/bit.c create mode 100644 src/backend/lib/dllist.c create mode 100644 src/backend/lib/dllist.h create mode 100644 src/backend/lib/fstack.c create mode 100644 src/backend/lib/fstack.h create mode 100644 src/backend/lib/hasht.c create mode 100644 src/backend/lib/hasht.h create mode 100644 src/backend/lib/lispsort.c create mode 100644 src/backend/lib/lispsort.h create mode 100644 src/backend/lib/qsort.c create mode 100644 src/backend/lib/qsort.h create mode 100644 src/backend/lib/stringinfo.c create mode 100644 src/backend/lib/stringinfo.h create mode 100644 src/backend/libpq/Makefile.inc create mode 100644 src/backend/libpq/auth.c create mode 100644 src/backend/libpq/auth.h create mode 100644 src/backend/libpq/be-dumpdata.c create mode 100644 src/backend/libpq/be-fsstubs.c create mode 100644 src/backend/libpq/be-fsstubs.h create mode 100644 src/backend/libpq/be-pqexec.c create mode 100644 src/backend/libpq/libpq-be.h create mode 100644 src/backend/libpq/libpq-fs.h create mode 100644 src/backend/libpq/libpq.h create mode 100644 src/backend/libpq/portal.c create mode 100644 src/backend/libpq/portalbuf.c create mode 100644 src/backend/libpq/pqcomm.c create mode 100644 src/backend/libpq/pqcomm.h create mode 100644 src/backend/libpq/pqpacket.c create mode 100644 src/backend/libpq/pqsignal.c create mode 100644 src/backend/libpq/pqsignal.h create mode 100644 src/backend/main/Makefile.inc create mode 100644 src/backend/main/main.c create mode 100644 src/backend/makeID create mode 100644 src/backend/nodes/Makefile.inc create mode 100644 src/backend/nodes/README create mode 100644 src/backend/nodes/copyfuncs.c create mode 100644 src/backend/nodes/equalfuncs.c create mode 100644 src/backend/nodes/execnodes.h create mode 100644 src/backend/nodes/list.c create mode 100644 src/backend/nodes/makefuncs.c create mode 100644 src/backend/nodes/makefuncs.h create mode 100644 src/backend/nodes/memnodes.h create mode 100644 src/backend/nodes/nodeFuncs.c create mode 100644 src/backend/nodes/nodeFuncs.h create mode 100644 src/backend/nodes/nodes.c create mode 100644 src/backend/nodes/nodes.h create mode 100644 src/backend/nodes/outfuncs.c create mode 100644 src/backend/nodes/params.h create mode 100644 src/backend/nodes/parsenodes.h create mode 100644 src/backend/nodes/pg_list.h create mode 100644 src/backend/nodes/plannodes.h create mode 100644 src/backend/nodes/primnodes.h create mode 100644 src/backend/nodes/print.c create mode 100644 src/backend/nodes/read.c create mode 100644 src/backend/nodes/readfuncs.c create mode 100644 src/backend/nodes/readfuncs.h create mode 100644 src/backend/nodes/relation.h create mode 100644 src/backend/optimizer/Makefile.inc create mode 100644 src/backend/optimizer/clauseinfo.h create mode 100644 src/backend/optimizer/clauses.h create mode 100644 src/backend/optimizer/cost.h create mode 100644 src/backend/optimizer/internal.h create mode 100644 src/backend/optimizer/joininfo.h create mode 100644 src/backend/optimizer/keys.h create mode 100644 src/backend/optimizer/ordering.h create mode 100644 src/backend/optimizer/path/Makefile.inc create mode 100644 src/backend/optimizer/path/allpaths.c create mode 100644 src/backend/optimizer/path/clausesel.c create mode 100644 src/backend/optimizer/path/costsize.c create mode 100644 src/backend/optimizer/path/hashutils.c create mode 100644 src/backend/optimizer/path/indxpath.c create mode 100644 src/backend/optimizer/path/joinpath.c create mode 100644 src/backend/optimizer/path/joinrels.c create mode 100644 src/backend/optimizer/path/joinutils.c create mode 100644 src/backend/optimizer/path/mergeutils.c create mode 100644 src/backend/optimizer/path/orindxpath.c create mode 100644 src/backend/optimizer/path/predmig.c create mode 100644 src/backend/optimizer/path/prune.c create mode 100644 src/backend/optimizer/path/xfunc.c create mode 100644 src/backend/optimizer/pathnode.h create mode 100644 src/backend/optimizer/paths.h create mode 100644 src/backend/optimizer/plan/Makefile.inc create mode 100644 src/backend/optimizer/plan/createplan.c create mode 100644 src/backend/optimizer/plan/initsplan.c create mode 100644 src/backend/optimizer/plan/planmain.c create mode 100644 src/backend/optimizer/plan/planner.c create mode 100644 src/backend/optimizer/plan/setrefs.c create mode 100644 src/backend/optimizer/plancat.h create mode 100644 src/backend/optimizer/planmain.h create mode 100644 src/backend/optimizer/planner.h create mode 100644 src/backend/optimizer/prep.h create mode 100644 src/backend/optimizer/prep/Makefile.inc create mode 100644 src/backend/optimizer/prep/archive.c create mode 100644 src/backend/optimizer/prep/prepqual.c create mode 100644 src/backend/optimizer/prep/preptlist.c create mode 100644 src/backend/optimizer/prep/prepunion.c create mode 100644 src/backend/optimizer/tlist.h create mode 100644 src/backend/optimizer/util/Makefile.inc create mode 100644 src/backend/optimizer/util/clauseinfo.c create mode 100644 src/backend/optimizer/util/clauses.c create mode 100644 src/backend/optimizer/util/indexnode.c create mode 100644 src/backend/optimizer/util/internal.c create mode 100644 src/backend/optimizer/util/joininfo.c create mode 100644 src/backend/optimizer/util/keys.c create mode 100644 src/backend/optimizer/util/ordering.c create mode 100644 src/backend/optimizer/util/pathnode.c create mode 100644 src/backend/optimizer/util/plancat.c create mode 100644 src/backend/optimizer/util/relnode.c create mode 100644 src/backend/optimizer/util/tlist.c create mode 100644 src/backend/optimizer/util/var.c create mode 100644 src/backend/optimizer/var.h create mode 100644 src/backend/optimizer/xfunc.h create mode 100644 src/backend/parser/Makefile.inc create mode 100644 src/backend/parser/analyze.c create mode 100644 src/backend/parser/catalog_utils.c create mode 100644 src/backend/parser/catalog_utils.h create mode 100644 src/backend/parser/dbcommands.c create mode 100644 src/backend/parser/dbcommands.h create mode 100644 src/backend/parser/gram.y create mode 100644 src/backend/parser/keywords.c create mode 100644 src/backend/parser/keywords.h create mode 100644 src/backend/parser/parse_query.c create mode 100644 src/backend/parser/parse_query.h create mode 100644 src/backend/parser/parse_state.h create mode 100644 src/backend/parser/parser.c create mode 100644 src/backend/parser/parsetree.h create mode 100644 src/backend/parser/scan.l create mode 100644 src/backend/parser/scansup.c create mode 100644 src/backend/parser/scansup.h create mode 100644 src/backend/port/BSD44_derived/Makefile.inc create mode 100644 src/backend/port/BSD44_derived/README create mode 100644 src/backend/port/BSD44_derived/dl.c create mode 100644 src/backend/port/BSD44_derived/float.h create mode 100644 src/backend/port/BSD44_derived/machine.h create mode 100644 src/backend/port/BSD44_derived/port-protos.h create mode 100644 src/backend/port/Makefile.inc create mode 100644 src/backend/port/aix/Makefile.inc create mode 100644 src/backend/port/aix/README.dlfcn create mode 100644 src/backend/port/aix/dlfcn.c create mode 100644 src/backend/port/aix/dlfcn.h create mode 100644 src/backend/port/aix/machine.h create mode 100755 src/backend/port/aix/mkldexport.sh create mode 100644 src/backend/port/aix/port-protos.h create mode 100644 src/backend/port/alpha/Makefile.inc create mode 100644 src/backend/port/alpha/machine.h create mode 100644 src/backend/port/alpha/port-protos.h create mode 100644 src/backend/port/alpha/port.c create mode 100644 src/backend/port/bsdi/Makefile.inc create mode 100644 src/backend/port/bsdi/dynloader.c create mode 100644 src/backend/port/bsdi/machine.h create mode 100644 src/backend/port/bsdi/port-protos.h create mode 100644 src/backend/port/bsdi/port.c create mode 100644 src/backend/port/hpux/Makefile.inc create mode 100644 src/backend/port/hpux/dynloader.c create mode 100644 src/backend/port/hpux/fixade.h create mode 100644 src/backend/port/hpux/machine.h create mode 100644 src/backend/port/hpux/port-protos.h create mode 100644 src/backend/port/hpux/port.c create mode 100644 src/backend/port/hpux/tas.c.template create mode 100644 src/backend/port/hpux/tas.s create mode 100644 src/backend/port/irix5/Makefile.inc create mode 100644 src/backend/port/irix5/README create mode 100644 src/backend/port/irix5/machine.h create mode 100644 src/backend/port/irix5/port-protos.h create mode 100644 src/backend/port/irix5/port.c create mode 100644 src/backend/port/linux/Makefile.inc create mode 100644 src/backend/port/linux/dynloader.c create mode 100644 src/backend/port/linux/machine.h create mode 100644 src/backend/port/linux/port-protos.h create mode 100644 src/backend/port/linux/port.c create mode 100644 src/backend/port/sparc/Makefile.inc create mode 100644 src/backend/port/sparc/float.h create mode 100644 src/backend/port/sparc/machine.h create mode 100644 src/backend/port/sparc/port-protos.h create mode 100644 src/backend/port/sparc/strtol.c create mode 100644 src/backend/port/sparc_solaris/Makefile.inc create mode 100644 src/backend/port/sparc_solaris/machine.h create mode 100644 src/backend/port/sparc_solaris/port-protos.h create mode 100644 src/backend/port/sparc_solaris/port.c create mode 100644 src/backend/port/sparc_solaris/rusagestub.h create mode 100644 src/backend/port/sparc_solaris/tas.s create mode 100644 src/backend/port/ultrix4/Makefile.inc create mode 100644 src/backend/port/ultrix4/dl.h create mode 100644 src/backend/port/ultrix4/dynloader.c create mode 100644 src/backend/port/ultrix4/machine.h create mode 100644 src/backend/port/ultrix4/port-protos.h create mode 100644 src/backend/port/ultrix4/port.c create mode 100644 src/backend/port/ultrix4/strdup.c create mode 100644 src/backend/port/win32/machine.h create mode 100644 src/backend/port/win32/nt.c create mode 100644 src/backend/port/win32/nt.h create mode 100644 src/backend/port/win32/pglite.mak create mode 100644 src/backend/port/win32/port-protos.h create mode 100644 src/backend/port/win32/pwd.h create mode 100644 src/backend/port/win32/regex/COPYRIGHT create mode 100644 src/backend/port/win32/regex/Makefile.inc create mode 100644 src/backend/port/win32/regex/WHATSNEW create mode 100644 src/backend/port/win32/regex/cclass.h create mode 100644 src/backend/port/win32/regex/cname.h create mode 100644 src/backend/port/win32/regex/engine.c create mode 100644 src/backend/port/win32/regex/re_format.7 create mode 100644 src/backend/port/win32/regex/regcomp.c create mode 100644 src/backend/port/win32/regex/regerror.c create mode 100644 src/backend/port/win32/regex/regex.3 create mode 100644 src/backend/port/win32/regex/regex.h create mode 100644 src/backend/port/win32/regex/regex2.h create mode 100644 src/backend/port/win32/regex/regexec.c create mode 100644 src/backend/port/win32/regex/regexp.h create mode 100644 src/backend/port/win32/regex/regfree.c create mode 100644 src/backend/port/win32/regex/utils.h create mode 100644 src/backend/port/win32/rusagestub.h create mode 100644 src/backend/port/win32/sys/cdefs.h create mode 100644 src/backend/port/win32/sys/file.h create mode 100644 src/backend/port/win32/sys/ipc.h create mode 100644 src/backend/port/win32/sys/param.h create mode 100644 src/backend/port/win32/sys/sem.h create mode 100644 src/backend/port/win32/sys/shm.h create mode 100644 src/backend/port/win32/sys/time.h create mode 100644 src/backend/postmaster/Makefile.inc create mode 100644 src/backend/postmaster/postmaster.c create mode 100644 src/backend/regex/COPYRIGHT create mode 100644 src/backend/regex/Makefile.inc create mode 100644 src/backend/regex/WHATSNEW create mode 100644 src/backend/regex/cclass.h create mode 100644 src/backend/regex/cdefs.h create mode 100644 src/backend/regex/cname.h create mode 100644 src/backend/regex/engine.c create mode 100644 src/backend/regex/re_format.7 create mode 100644 src/backend/regex/regcomp.c create mode 100644 src/backend/regex/regerror.c create mode 100644 src/backend/regex/regex.3 create mode 100644 src/backend/regex/regex.h create mode 100644 src/backend/regex/regex2.h create mode 100644 src/backend/regex/regexec.c create mode 100644 src/backend/regex/regexp.h create mode 100644 src/backend/regex/regfree.c create mode 100644 src/backend/regex/utils.h create mode 100644 src/backend/rewrite/Makefile.inc create mode 100644 src/backend/rewrite/locks.c create mode 100644 src/backend/rewrite/locks.h create mode 100644 src/backend/rewrite/prs2lock.h create mode 100644 src/backend/rewrite/rewriteDefine.c create mode 100644 src/backend/rewrite/rewriteDefine.h create mode 100644 src/backend/rewrite/rewriteHandler.c create mode 100644 src/backend/rewrite/rewriteHandler.h create mode 100644 src/backend/rewrite/rewriteManip.c create mode 100644 src/backend/rewrite/rewriteManip.h create mode 100644 src/backend/rewrite/rewriteRemove.c create mode 100644 src/backend/rewrite/rewriteRemove.h create mode 100644 src/backend/rewrite/rewriteSupport.c create mode 100644 src/backend/rewrite/rewriteSupport.h create mode 100644 src/backend/storage/Makefile.inc create mode 100644 src/backend/storage/backendid.h create mode 100644 src/backend/storage/block.h create mode 100644 src/backend/storage/buf.h create mode 100644 src/backend/storage/buf_internals.h create mode 100644 src/backend/storage/buffer/Makefile.inc create mode 100644 src/backend/storage/buffer/buf_init.c create mode 100644 src/backend/storage/buffer/buf_table.c create mode 100644 src/backend/storage/buffer/bufmgr.c create mode 100644 src/backend/storage/buffer/freelist.c create mode 100644 src/backend/storage/buffer/localbuf.c create mode 100644 src/backend/storage/bufmgr.h create mode 100644 src/backend/storage/bufpage.h create mode 100644 src/backend/storage/fd.h create mode 100644 src/backend/storage/file/Makefile.inc create mode 100644 src/backend/storage/file/fd.c create mode 100644 src/backend/storage/ipc.h create mode 100644 src/backend/storage/ipc/Makefile.inc create mode 100644 src/backend/storage/ipc/README create mode 100644 src/backend/storage/ipc/ipc.c create mode 100644 src/backend/storage/ipc/ipci.c create mode 100644 src/backend/storage/ipc/s_lock.c create mode 100644 src/backend/storage/ipc/shmem.c create mode 100644 src/backend/storage/ipc/shmqueue.c create mode 100644 src/backend/storage/ipc/sinval.c create mode 100644 src/backend/storage/ipc/sinvaladt.c create mode 100644 src/backend/storage/ipc/spin.c create mode 100644 src/backend/storage/item.h create mode 100644 src/backend/storage/itemid.h create mode 100644 src/backend/storage/itempos.h create mode 100644 src/backend/storage/itemptr.h create mode 100644 src/backend/storage/large_object.h create mode 100644 src/backend/storage/large_object/Makefile.inc create mode 100644 src/backend/storage/large_object/inv_api.c create mode 100644 src/backend/storage/lmgr.h create mode 100644 src/backend/storage/lmgr/Makefile.inc create mode 100644 src/backend/storage/lmgr/README create mode 100644 src/backend/storage/lmgr/lmgr.c create mode 100644 src/backend/storage/lmgr/lock.c create mode 100644 src/backend/storage/lmgr/multi.c create mode 100644 src/backend/storage/lmgr/proc.c create mode 100644 src/backend/storage/lmgr/single.c create mode 100644 src/backend/storage/lock.h create mode 100644 src/backend/storage/multilev.h create mode 100644 src/backend/storage/off.h create mode 100644 src/backend/storage/page.h create mode 100644 src/backend/storage/page/Makefile.inc create mode 100644 src/backend/storage/page/bufpage.c create mode 100644 src/backend/storage/page/itemptr.c create mode 100644 src/backend/storage/pagenum.h create mode 100644 src/backend/storage/pos.h create mode 100644 src/backend/storage/proc.h create mode 100644 src/backend/storage/shmem.h create mode 100644 src/backend/storage/sinval.h create mode 100644 src/backend/storage/sinvaladt.h create mode 100644 src/backend/storage/smgr.h create mode 100644 src/backend/storage/smgr/Makefile.inc create mode 100644 src/backend/storage/smgr/README create mode 100644 src/backend/storage/smgr/md.c create mode 100644 src/backend/storage/smgr/mm.c create mode 100644 src/backend/storage/smgr/smgr.c create mode 100644 src/backend/storage/smgr/smgrtype.c create mode 100644 src/backend/storage/spin.h create mode 100644 src/backend/tcop/Makefile.inc create mode 100644 src/backend/tcop/aclchk.c create mode 100644 src/backend/tcop/dest.c create mode 100644 src/backend/tcop/dest.h create mode 100644 src/backend/tcop/fastpath.c create mode 100644 src/backend/tcop/fastpath.h create mode 100644 src/backend/tcop/postgres.c create mode 100644 src/backend/tcop/pquery.c create mode 100644 src/backend/tcop/pquery.h create mode 100644 src/backend/tcop/tcopdebug.h create mode 100644 src/backend/tcop/tcopprot.h create mode 100644 src/backend/tcop/utility.c create mode 100644 src/backend/tcop/utility.h create mode 100644 src/backend/tioga/Arr_TgRecipe.h create mode 100644 src/backend/tioga/Makefile.inc create mode 100644 src/backend/tioga/Varray.c create mode 100644 src/backend/tioga/Varray.h create mode 100644 src/backend/tioga/tgRecipe.c create mode 100644 src/backend/tioga/tgRecipe.h create mode 100644 src/backend/utils/Gen_fmgrtab.sh create mode 100644 src/backend/utils/Makefile.inc create mode 100644 src/backend/utils/acl.h create mode 100644 src/backend/utils/adt/Makefile.inc create mode 100644 src/backend/utils/adt/acl.c create mode 100644 src/backend/utils/adt/arrayfuncs.c create mode 100644 src/backend/utils/adt/arrayutils.c create mode 100644 src/backend/utils/adt/bool.c create mode 100644 src/backend/utils/adt/char.c create mode 100644 src/backend/utils/adt/chunk.c create mode 100644 src/backend/utils/adt/date.c create mode 100644 src/backend/utils/adt/datetimes.c create mode 100644 src/backend/utils/adt/datum.c create mode 100644 src/backend/utils/adt/dt.c create mode 100644 src/backend/utils/adt/filename.c create mode 100644 src/backend/utils/adt/float.c create mode 100644 src/backend/utils/adt/geo-ops.c create mode 100644 src/backend/utils/adt/geo-selfuncs.c create mode 100644 src/backend/utils/adt/int.c create mode 100644 src/backend/utils/adt/like.c create mode 100644 src/backend/utils/adt/misc.c create mode 100644 src/backend/utils/adt/nabstime.c create mode 100644 src/backend/utils/adt/name.c create mode 100644 src/backend/utils/adt/not_in.c create mode 100644 src/backend/utils/adt/numutils.c create mode 100644 src/backend/utils/adt/oid.c create mode 100644 src/backend/utils/adt/oidint2.c create mode 100644 src/backend/utils/adt/oidint4.c create mode 100644 src/backend/utils/adt/oidname.c create mode 100644 src/backend/utils/adt/regexp.c create mode 100644 src/backend/utils/adt/regproc.c create mode 100644 src/backend/utils/adt/selfuncs.c create mode 100644 src/backend/utils/adt/sets.c create mode 100644 src/backend/utils/adt/tid.c create mode 100644 src/backend/utils/adt/varchar.c create mode 100644 src/backend/utils/adt/varlena.c create mode 100644 src/backend/utils/array.h create mode 100644 src/backend/utils/bit.h create mode 100644 src/backend/utils/builtins.h create mode 100644 src/backend/utils/cache/Makefile.inc create mode 100644 src/backend/utils/cache/catcache.c create mode 100644 src/backend/utils/cache/fcache.c create mode 100644 src/backend/utils/cache/inval.c create mode 100644 src/backend/utils/cache/lsyscache.c create mode 100644 src/backend/utils/cache/rel.c create mode 100644 src/backend/utils/cache/relcache.c create mode 100644 src/backend/utils/cache/syscache.c create mode 100644 src/backend/utils/catcache.h create mode 100644 src/backend/utils/datum.h create mode 100644 src/backend/utils/dynamic_loader.h create mode 100644 src/backend/utils/elog.h create mode 100644 src/backend/utils/error/Makefile.inc create mode 100644 src/backend/utils/error/assert.c create mode 100644 src/backend/utils/error/elog.c create mode 100644 src/backend/utils/error/exc.c create mode 100644 src/backend/utils/error/excabort.c create mode 100644 src/backend/utils/error/excid.c create mode 100644 src/backend/utils/error/format.c create mode 100644 src/backend/utils/exc.h create mode 100644 src/backend/utils/excid.h create mode 100644 src/backend/utils/fcache.h create mode 100644 src/backend/utils/fcache2.h create mode 100644 src/backend/utils/fmgr/Makefile.inc create mode 100644 src/backend/utils/fmgr/dfmgr.c create mode 100644 src/backend/utils/fmgr/fmgr.c create mode 100644 src/backend/utils/fmgrtab.h create mode 100644 src/backend/utils/geo-decls.h create mode 100644 src/backend/utils/hash/Makefile.inc create mode 100644 src/backend/utils/hash/dynahash.c create mode 100644 src/backend/utils/hash/hashfn.c create mode 100644 src/backend/utils/hsearch.h create mode 100644 src/backend/utils/init/Makefile.inc create mode 100644 src/backend/utils/init/enbl.c create mode 100644 src/backend/utils/init/findbe.c create mode 100644 src/backend/utils/init/globals.c create mode 100644 src/backend/utils/init/magic.c create mode 100644 src/backend/utils/init/miscinit.c create mode 100644 src/backend/utils/init/postinit.c create mode 100644 src/backend/utils/inval.h create mode 100644 src/backend/utils/lselect.h create mode 100644 src/backend/utils/lsyscache.h create mode 100644 src/backend/utils/mcxt.h create mode 100644 src/backend/utils/memutils.h create mode 100644 src/backend/utils/mmgr/Makefile.inc create mode 100644 src/backend/utils/mmgr/aset.c create mode 100644 src/backend/utils/mmgr/mcxt.c create mode 100644 src/backend/utils/mmgr/oset.c create mode 100644 src/backend/utils/mmgr/palloc.c create mode 100644 src/backend/utils/mmgr/portalmem.c create mode 100644 src/backend/utils/module.h create mode 100644 src/backend/utils/nabstime.h create mode 100644 src/backend/utils/oidcompos.h create mode 100644 src/backend/utils/palloc.h create mode 100644 src/backend/utils/portal.h create mode 100644 src/backend/utils/psort.h create mode 100644 src/backend/utils/rel.h create mode 100644 src/backend/utils/rel2.h create mode 100644 src/backend/utils/relcache.h create mode 100644 src/backend/utils/sets.h create mode 100644 src/backend/utils/sort/Makefile.inc create mode 100644 src/backend/utils/sort/lselect.c create mode 100644 src/backend/utils/sort/psort.c create mode 100644 src/backend/utils/syscache.h create mode 100644 src/backend/utils/time/Makefile.inc create mode 100644 src/backend/utils/time/tqual.c create mode 100644 src/backend/utils/tqual.h create mode 100644 src/bin/Makefile create mode 100644 src/bin/Makefile.global create mode 100644 src/bin/cleardbdir/Makefile create mode 100644 src/bin/cleardbdir/cleardbdir.sh create mode 100644 src/bin/createdb/Makefile create mode 100644 src/bin/createdb/createdb.sh create mode 100644 src/bin/createuser/Makefile create mode 100644 src/bin/createuser/createuser.sh create mode 100644 src/bin/destroydb/Makefile create mode 100644 src/bin/destroydb/destroydb.sh create mode 100644 src/bin/destroyuser/Makefile create mode 100644 src/bin/destroyuser/destroyuser.sh create mode 100644 src/bin/initdb/Makefile create mode 100644 src/bin/initdb/initdb.sh create mode 100644 src/bin/ipcclean/Makefile create mode 100644 src/bin/ipcclean/ipcclean.sh create mode 100644 src/bin/monitor/Makefile create mode 100644 src/bin/monitor/monitor.c create mode 100644 src/bin/pg4_dump/Makefile create mode 100644 src/bin/pg4_dump/README create mode 100644 src/bin/pg4_dump/common.c create mode 100644 src/bin/pg4_dump/pg4_dump.c create mode 100644 src/bin/pg4_dump/pg_dump.h create mode 100644 src/bin/pg_dump/Makefile create mode 100644 src/bin/pg_dump/README create mode 100644 src/bin/pg_dump/common.c create mode 100644 src/bin/pg_dump/pg_dump.c create mode 100644 src/bin/pg_dump/pg_dump.h create mode 100644 src/bin/pg_id/Makefile create mode 100644 src/bin/pg_id/pg_id.c create mode 100644 src/bin/pg_version/Makefile create mode 100644 src/bin/pg_version/pg_version.c create mode 100644 src/bin/pgtclsh/Makefile create mode 100644 src/bin/pgtclsh/README create mode 100644 src/bin/pgtclsh/pgtclAppInit.c create mode 100644 src/bin/pgtclsh/pgtclUtils.tcl create mode 100644 src/bin/pgtclsh/pgtkAppInit.c create mode 100644 src/bin/pgtclsh/updateStats.tcl create mode 100644 src/bin/psql/Makefile create mode 100644 src/bin/psql/psql.c create mode 100644 src/bin/psql/psqlHelp.h create mode 100644 src/bin/psql/rlstubs.c create mode 100644 src/bin/psql/stringutils.c create mode 100644 src/bin/psql/stringutils.h create mode 100644 src/interfaces/libpgtcl/Makefile create mode 100644 src/interfaces/libpgtcl/README create mode 100644 src/interfaces/libpgtcl/libpgtcl.h create mode 100644 src/interfaces/libpgtcl/pgtcl.c create mode 100644 src/interfaces/libpgtcl/pgtclCmds.c create mode 100644 src/interfaces/libpgtcl/pgtclCmds.h create mode 100644 src/interfaces/libpgtcl/pgtclId.c create mode 100644 src/interfaces/libpgtcl/pgtclId.h create mode 100644 src/interfaces/libpq++/Makefile create mode 100644 src/interfaces/libpq++/README create mode 100644 src/interfaces/libpq++/examples/Makefile create mode 100644 src/interfaces/libpq++/examples/testlibpq0.cc create mode 100644 src/interfaces/libpq++/examples/testlibpq1.cc create mode 100644 src/interfaces/libpq++/examples/testlibpq2.cc create mode 100644 src/interfaces/libpq++/examples/testlibpq2.sql create mode 100644 src/interfaces/libpq++/examples/testlibpq3.cc create mode 100644 src/interfaces/libpq++/examples/testlibpq3.sql create mode 100644 src/interfaces/libpq++/examples/testlibpq4.cc create mode 100644 src/interfaces/libpq++/examples/testlo.cc create mode 100644 src/interfaces/libpq++/libpq++.H create mode 100644 src/interfaces/libpq++/man/libpq++.3 create mode 100644 src/interfaces/libpq++/pgconnection.cc create mode 100644 src/interfaces/libpq++/pgenv.cc create mode 100644 src/interfaces/libpq++/pglobject.cc create mode 100644 src/interfaces/libpq/Makefile create mode 100644 src/interfaces/libpq/README create mode 100644 src/interfaces/libpq/fe-auth.c create mode 100644 src/interfaces/libpq/fe-auth.h create mode 100644 src/interfaces/libpq/fe-connect.c create mode 100644 src/interfaces/libpq/fe-exec.c create mode 100644 src/interfaces/libpq/fe-lobj.c create mode 100644 src/interfaces/libpq/fe-misc.c create mode 100644 src/interfaces/libpq/libpq-fe.h create mode 100644 src/interfaces/libpq/pg_hba create mode 100644 src/interfaces/libpq/pqsignal.c create mode 100644 src/interfaces/libpq/pqsignal.h create mode 100644 src/mk/port/postgres.mk.BSD44_derived create mode 100644 src/mk/port/postgres.mk.aix create mode 100644 src/mk/port/postgres.mk.alpha create mode 100644 src/mk/port/postgres.mk.bsdi create mode 100644 src/mk/port/postgres.mk.hpux create mode 100644 src/mk/port/postgres.mk.irix5 create mode 100644 src/mk/port/postgres.mk.linux create mode 100644 src/mk/port/postgres.mk.sparc create mode 100644 src/mk/port/postgres.mk.sparc_solaris create mode 100644 src/mk/port/postgres.mk.svr4 create mode 100644 src/mk/port/postgres.mk.ultrix4 create mode 100644 src/mk/postgres.lib.mk create mode 100644 src/mk/postgres.mk create mode 100644 src/mk/postgres.prog.mk create mode 100644 src/mk/postgres.shell.mk create mode 100644 src/mk/postgres.subdir.mk create mode 100644 src/mk/postgres.user.mk create mode 100644 src/test/Makefile create mode 100644 src/test/bench/Makefile create mode 100644 src/test/bench/WISC-README create mode 100755 src/test/bench/create.sh create mode 100644 src/test/bench/create.source create mode 100644 src/test/bench/perquery create mode 100644 src/test/bench/query01 create mode 100644 src/test/bench/query02 create mode 100644 src/test/bench/query03 create mode 100644 src/test/bench/query04 create mode 100644 src/test/bench/query05 create mode 100644 src/test/bench/query06 create mode 100644 src/test/bench/query07 create mode 100644 src/test/bench/query08 create mode 100644 src/test/bench/query09 create mode 100644 src/test/bench/query10 create mode 100644 src/test/bench/query11 create mode 100644 src/test/bench/query12 create mode 100644 src/test/bench/query13 create mode 100644 src/test/bench/query14 create mode 100644 src/test/bench/query15 create mode 100644 src/test/bench/query16 create mode 100644 src/test/bench/query17 create mode 100644 src/test/bench/query18 create mode 100644 src/test/bench/query19 create mode 100644 src/test/bench/query20 create mode 100644 src/test/bench/query21 create mode 100644 src/test/bench/query22 create mode 100644 src/test/bench/query23 create mode 100644 src/test/bench/query24 create mode 100644 src/test/bench/query25 create mode 100644 src/test/bench/query26 create mode 100644 src/test/bench/query27 create mode 100644 src/test/bench/query28 create mode 100644 src/test/bench/query29 create mode 100644 src/test/bench/query30 create mode 100644 src/test/bench/query31 create mode 100644 src/test/bench/query32 create mode 100755 src/test/bench/runwisc.sh create mode 100755 src/test/bench/wholebench.sh create mode 100644 src/test/examples/Makefile create mode 100644 src/test/examples/testlibpq.c create mode 100644 src/test/examples/testlibpq2.c create mode 100644 src/test/examples/testlibpq2.sql create mode 100644 src/test/examples/testlibpq3.c create mode 100644 src/test/examples/testlibpq3.sql create mode 100644 src/test/examples/testlibpq4.c create mode 100644 src/test/examples/testlo.c create mode 100644 src/test/examples/testlo2.c create mode 100644 src/test/regress/Makefile create mode 100644 src/test/regress/create.source create mode 100644 src/test/regress/data/dept.data create mode 100644 src/test/regress/data/desc.data create mode 100644 src/test/regress/data/emp.data create mode 100644 src/test/regress/data/hash.data create mode 100644 src/test/regress/data/onek.data create mode 100644 src/test/regress/data/person.data create mode 100644 src/test/regress/data/real_city.data create mode 100644 src/test/regress/data/rect.data create mode 100644 src/test/regress/data/streets.data create mode 100644 src/test/regress/data/stud_emp.data create mode 100644 src/test/regress/data/student.data create mode 100644 src/test/regress/data/tenk.data create mode 100644 src/test/regress/destroy.source create mode 100644 src/test/regress/errors.source create mode 100644 src/test/regress/queries.source create mode 100644 src/test/regress/regress.c create mode 100755 src/test/regress/regress.sh create mode 100644 src/test/regress/sample.regress.out create mode 100644 src/test/regress/security.source create mode 100644 src/test/suite/README create mode 100644 src/test/suite/agg.sql create mode 100644 src/test/suite/date.sql create mode 100644 src/test/suite/float.sql create mode 100644 src/test/suite/group.sql create mode 100644 src/test/suite/group_err.sql create mode 100644 src/test/suite/inh.sql create mode 100644 src/test/suite/join.sql create mode 100644 src/test/suite/oper.sql create mode 100644 src/test/suite/parse.sql create mode 100644 src/test/suite/quote.sql create mode 100644 src/test/suite/results/agg.sql.out create mode 100644 src/test/suite/results/date.sql.out create mode 100644 src/test/suite/results/float.sql.out create mode 100644 src/test/suite/results/group.sql.out create mode 100644 src/test/suite/results/group_err.sql.out create mode 100644 src/test/suite/results/inh.sql.out create mode 100644 src/test/suite/results/join.sql.out create mode 100644 src/test/suite/results/oper.sql.out create mode 100644 src/test/suite/results/parse.sql.out create mode 100644 src/test/suite/results/quote.sql.out create mode 100644 src/test/suite/results/rules.sql.out create mode 100644 src/test/suite/results/select.sql.out create mode 100644 src/test/suite/results/sort.sql.out create mode 100644 src/test/suite/results/sqlcompat.sql.out create mode 100644 src/test/suite/results/time.sql.out create mode 100644 src/test/suite/results/varchar.sql.out create mode 100644 src/test/suite/results/views.sql.out create mode 100644 src/test/suite/rules.sql create mode 100755 src/test/suite/runall create mode 100644 src/test/suite/select.sql create mode 100644 src/test/suite/sort.sql create mode 100644 src/test/suite/sqlcompat.sql create mode 100644 src/test/suite/time.sql create mode 100644 src/test/suite/varchar.sql create mode 100644 src/test/suite/views.sql create mode 100644 src/tools/mkldexport/Makefile create mode 100644 src/tools/mkldexport/README create mode 100644 src/tools/mkldexport/mkldexport.sh create mode 100644 src/tutorial/C-code/beard.c create mode 100644 src/tutorial/C-code/complex.c create mode 100644 src/tutorial/C-code/funcs.c create mode 100644 src/tutorial/Makefile create mode 100644 src/tutorial/README create mode 100644 src/tutorial/advanced.source create mode 100644 src/tutorial/basics.source create mode 100644 src/tutorial/complex.source create mode 100644 src/tutorial/funcs.source create mode 100644 src/tutorial/syscat.source diff --git a/src/Makefile b/src/Makefile new file mode 100644 index 00000000000..7e047c0cce8 --- /dev/null +++ b/src/Makefile @@ -0,0 +1,48 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Build and install postgres. +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/Makefile,v 1.1.1.1 1996/07/09 06:21:07 scrappy Exp $ +# +# NOTES +# objdir - location of the objects and generated files (eg. obj) +# +#------------------------------------------------------------------------- + +SUBDIR= backend libpq bin + +FIND = find +# assuming gnu tar and split here +TAR = tar +SPLIT = split + +ETAGS = etags +XARGS = xargs + +ifeq ($(USE_TCL), true) +SUBDIR += libpgtcl +endif + +include mk/postgres.subdir.mk + +TAGS: + rm -f TAGS; \ + for i in backend libpq bin; do \ + $(FIND) $$i -name '*.[chyl]' -print | $(XARGS) $(ETAGS) -a ; \ + done + +# target to generate a backup tar file and split files that can be +# saved to 1.44M floppy +BACKUP: + rm -f BACKUP.filelist BACKUP.tgz; \ + $(FIND) . -not -path '*obj/*' -not -path '*data/*' -type f -print > BACKUP.filelist; \ + $(TAR) --files-from BACKUP.filelist -c -z -v -f BACKUP.tgz + $(SPLIT) --bytes=1400k BACKUP.tgz pgBACKUP. + +.PHONY: TAGS +.PHONY: BACKUP diff --git a/src/Makefile.global b/src/Makefile.global new file mode 100644 index 00000000000..1ecd62accef --- /dev/null +++ b/src/Makefile.global @@ -0,0 +1,306 @@ +#------------------------------------------------------------------------- +# +# Makefile.global-- +# global configuration for the Makefiles +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/Attic/Makefile.global,v 1.1.1.1 1996/07/09 06:21:07 scrappy Exp $ +# +# NOTES +# This is seen by any Makefiles that include mk/postgres.mk. To +# override the default setting, create a Makefile.custom in this +# directory and put your defines there. (Makefile.custom is included +# at the end of this file.) +# +# If you change any of these defines you probably have to +# gmake clean; gmake +# since no dependecies are created for these. (of course you can +# be crafty and check what files really depend on them and just remake +# those). +# +#------------------------------------------------------------------------- + + +############################################################################## +# +# CONFIGURATION SECTION +# +# Following are settings pertaining to the postgres build and +# installation. The most important one is obviously the name +# of the port. + +# The name of the port. Valid choices are: +# alpha - DEC Alpha AXP on OSF/1 2.0 +# hpux - HP PA-RISC on HP-UX 9.0 +# sparc_solaris - SUN SPARC on Solaris 2.4 +# sparc - SUN SPARC on SunOS 4.1.3 +# ultrix4 - DEC MIPS on Ultrix 4.4 +# linux - Intel x86 on Linux 1.2 and Linux ELF +# (For non-ELF Linux, you need to comment out +# "LINUX_ELF=1" in src/mk/port/postgres.mk.linux) +# BSD44_derived - OSs derived from 4.4-lite BSD (NetBSD, FreeBSD) +# bsdi - BSD/OS 2.0 and 2.01 +# aix - IBM on AIX 3.2.5 +# irix5 - SGI MIPS on IRIX 5.3 +# Some hooks are provided for +# svr4 - Intel x86 on Intel SVR4 +# next - Motorola MC68K or Intel x86 on NeXTSTEP 3.2 +# but these are guaranteed not to work as of yet. +# +# XXX Note that you MUST set PORTNAME here (or on the command line) so +# that port-dependent variables are correctly set within this file. +# Makefile.custom does not take effect (for ifeq purposes) +# until after this file is processed! +# make sure that you have no whitespaces after the PORTNAME setting +# or the makefiles can get confused +PORTNAME= alpha + +# POSTGRESLOGIN is the login name of the user who gets special +# privileges within the database. By default it is "postgres", but +# you can change it to any existing login name (such as your own +# login if you are compiling a private version or don't have root +# access). +POSTGRESLOGIN= postgres + +# For convenience, POSTGRESDIR is where DATADIR, BINDIR, and LIBDIR +# and other target destinations are rooted. Of course, each of these is +# changable separately. +POSTGRESDIR= /private/postgres95 + +# SRCDIR specifies where the source files are. +SRCDIR= $(POSTGRESDIR)/src + +# DATADIR specifies where the postmaster expects to find its database. +# This may be overridden by command line options or the PGDATA environment +# variable. +DATADIR= $(POSTGRESDIR)/data + +# Where the postgres executables live (changeable by just putting them +# somewhere else and putting that directory in your shell PATH) +BINDIR= $(POSTGRESDIR)/bin + +# Where libpq.a gets installed. You must put it where your loader will +# look for it if you wish to use the -lpq convention. Otherwise you +# can just put the absolute pathname to the library at the end of your +# command line. +LIBDIR= $(POSTGRESDIR)/lib + +# This is the directory where IPC utilities ipcs and ipcrm are located +# +IPCSDIR= /usr/bin + +# Where the man pages (suitable for use with "man") get installed. +POSTMANDIR= $(POSTGRESDIR)/man + +# Where the formatted documents (e.g., the reference manual) get installed. +POSTDOCDIR= $(POSTGRESDIR)/doc + +# Where the header files necessary to build frontend programs get installed. +HEADERDIR= $(POSTGRESDIR)/include + +# NAMEDATALEN is the max length for system identifiers (e.g. table names, +# attribute names, function names, etc.) +# +# These MUST be set here. DO NOT COMMENT THESE OUT +# Setting these too high will result in excess space usage for system catalogs +# Setting them too low will make the system unusable. +# values between 16 and 64 that are multiples of four are recommended. +# +# NOTE also that databases with different NAMEDATALEN's cannot interoperate! +# +NAMEDATALEN = 32 +# OIDNAMELEN should be set to NAMEDATALEN + sizeof(Oid) +OIDNAMELEN = 36 + +CFLAGS+= -DNAMEDATALEN=$(NAMEDATALEN) -DOIDNAMELEN=$(OIDNAMELEN) + +############################################################################## +# +# FEATURES +# +# To disable a feature, comment out the entire definition +# (that is, prepend '#', don't set it to "0" or "no"). + +# Comment out ENFORCE_ALIGNMENT if you do NOT want unaligned access to +# multi-byte types to generate a bus error. +ENFORCE_ALIGNMENT= true + +# Comment out CDEBUG to turn off debugging and sanity-checking. +# +# XXX on MIPS, use -g3 if you want to compile with -O +CDEBUG= -g + +# turn this on if you prefer European style dates instead of American +# style dates +# EUROPEAN_DATES = 1 + +# Comment out PROFILE to disable profiling. +# +# XXX define on MIPS if you want to be able to use pixie. +# note that this disables dynamic loading! +#PROFILE= -p -non_shared + +# About the use of readline in psql: +# psql does not require the GNU readline and history libraries. Hence, we +# do not compile with them by default. However, there are hooks in the +# program which supports the use of GNU readline and history. Should you +# decide to use them, change USE_READLINE to true and change READLINE_INCDIR +# and READLINE_LIBDIR to reflect the location of the readline and histroy +# headers and libraries. +# +#USE_READLINE= true + +# directories for the readline and history libraries. +READLINE_INCDIR= /usr/local/include +HISTORY_INCDIR= /usr/local/include +READLINE_LIBDIR= /usr/local/lib +HISTORY_LIBDIR= /usr/local/lib + +# If you do not plan to use Host based authentication, +# comment out the following line +HBA = 1 + +ifdef HBA +HBAFLAGS= -DHBA +endif + + + +# If you plan to use Kerberos for authentication... +# +# Comment out KRBVERS if you do not use Kerberos. +# Set KRBVERS to "4" for Kerberos v4, "5" for Kerberos v5. +# XXX Edit the default Kerberos variables below! +# +#KRBVERS= 5 + + +# Globally pass Kerberos file locations. +# these are used in the postmaster and all libpq applications. +# +# Adjust KRBINCS and KRBLIBS to reflect where you have Kerberos +# include files and libraries installed. +# PG_KRB_SRVNAM is the name under which POSTGRES is registered in +# the Kerberos database (KDC). +# PG_KRB_SRVTAB is the location of the server's keytab file. +# +ifdef KRBVERS +KRBINCS= -I/usr/athena/include +KRBLIBS= -L/usr/athena/lib +KRBFLAGS+= $(KRBINCS) -DPG_KRB_SRVNAM='"postgres_dbms"' + ifeq ($(KRBVERS), 4) +KRBFLAGS+= -DKRB4 +KRBFLAGS+= -DPG_KRB_SRVTAB='"/etc/srvtab"' +KRBLIBS+= -lkrb -ldes + else + ifeq ($(KRBVERS), 5) +KRBFLAGS+= -DKRB5 +KRBFLAGS+= -DPG_KRB_SRVTAB='"FILE:/krb5/srvtab.postgres"' +KRBLIBS+= -lkrb5 -lcrypto -lcom_err -lisode + endif + endif +endif + +# +# location of Tcl/Tk headers and libraries +# +# Uncomment this to build the tcl utilities. +USE_TCL= true +# customize these to your site's needs +# +TCL_INCDIR= /usr/local/devel/tcl7.4/include +TCL_LIBDIR= /usr/local/devel/tcl7.4/lib +TCL_LIB = -ltcl7.4 +TK_INCDIR= /usr/local/devel/tk4.0/include +TK_LIBDIR= /usr/local/devel/tk4.0/lib +TK_LIB = -ltk4.0 + +# +# include port specific rules and variables. For instance: +# +# signal(2) handling - this is here because it affects some of +# the frontend commands as well as the backend server. +# +# Ultrix and SunOS provide BSD signal(2) semantics by default. +# +# SVID2 and POSIX signal(2) semantics differ from BSD signal(2) +# semantics. We can use the POSIX sigaction(2) on systems that +# allow us to request restartable signals (SA_RESTART). +# +# Some systems don't allow restartable signals at all unless we +# link to a special BSD library. +# +# We devoutly hope that there aren't any systems that provide +# neither POSIX signals nor BSD signals. The alternative +# is to do signal-handler reinstallation, which doesn't work well +# at all. +# +-include $(MKDIR)/port/postgres.mk.$(PORTNAME) + +############################################################################## +# +# Flags for CC and LD. (depend on CDEBUG and PROFILE) +# + +# Globally pass debugging/optimization/profiling flags based +# on the options selected above. +ifdef CDEBUG + CFLAGS+= $(CDEBUG) + LDFLAGS+= $(CDEBUG) +else + ifndef CFLAGS_OPT + CFLAGS_OPT= -O + endif + CFLAGS+= $(CFLAGS_OPT) +# +# Uncommenting this will make things go a LOT faster, but you will +# also lose a lot of useful error-checking. +# + CFLAGS+= -DNO_ASSERT_CHECKING +endif + +ifdef PROFILE +CFLAGS+= $(PROFILE) +LDFLAGS+= $(PROFILE) +endif + +# Globally pass PORTNAME +CFLAGS+= -DPORTNAME_$(PORTNAME) + +# Globally pass the default TCP port for postmaster(1). +CFLAGS+= -DPOSTPORT='"5432"' + +# include flags from mk/port/postgres.mk.$(PORTNAME) +CFLAGS+= $(CFLAGS_BE) +LDADD+= $(LDADD_BE) +LDFLAGS+= $(LDFLAGS_BE) + + +############################################################################## +# +# Miscellaneous configuration +# + +# This is the time, in seconds, at which a given backend server +# will wait on a lock before deciding to abort the transaction +# (this is what we do in lieu of deadlock detection). +# +# Low numbers are not recommended as they will tend to cause +# false aborts if many transactions are long-lived. +CFLAGS+= -DDEADLOCK_TIMEOUT=60 + +srcdir= $(SRCDIR) +includedir= $(HEADERDIR) +objdir= obj + + +############################################################################## +# +# Customization. +# +-include $(MKDIR)/../Makefile.custom + + diff --git a/src/backend/Makefile b/src/backend/Makefile new file mode 100644 index 00000000000..4cdc7adaf43 --- /dev/null +++ b/src/backend/Makefile @@ -0,0 +1,289 @@ +#------------------------------------------------------------------------- +# +# Makefile-- +# Makefile for the postgres backend (and the postmaster) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/Makefile,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ +# +#------------------------------------------------------------------------- + +# +# The following turns on intermediate linking of partial objects to speed +# the link cycle during development. (To turn this off, put "BIGOBJS=false" +# in your custom makefile, ../Makefile.custom.) +BIGOBJS= true + + +PROG= postgres + +MKDIR= ../mk +include $(MKDIR)/postgres.mk + + +include $(CURDIR)/access/Makefile.inc +include $(CURDIR)/bootstrap/Makefile.inc +include $(CURDIR)/catalog/Makefile.inc +include $(CURDIR)/commands/Makefile.inc +include $(CURDIR)/executor/Makefile.inc +include $(CURDIR)/include/Makefile.inc +include $(CURDIR)/lib/Makefile.inc +include $(CURDIR)/libpq/Makefile.inc +include $(CURDIR)/main/Makefile.inc +include $(CURDIR)/nodes/Makefile.inc +include $(CURDIR)/optimizer/Makefile.inc +include $(CURDIR)/parser/Makefile.inc +include $(CURDIR)/port/Makefile.inc +include $(CURDIR)/postmaster/Makefile.inc +include $(CURDIR)/regex/Makefile.inc +include $(CURDIR)/rewrite/Makefile.inc +include $(CURDIR)/storage/Makefile.inc +include $(CURDIR)/tcop/Makefile.inc +include $(CURDIR)/tioga/Makefile.inc +include $(CURDIR)/utils/Makefile.inc + +SRCS:= ${SRCS_ACCESS} ${SRCS_BOOTSTRAP} $(SRCS_CATALOG) ${SRCS_COMMANDS} \ + ${SRCS_EXECUTOR} $(SRCS_LIB) $(SRCS_LIBPQ) ${SRCS_MAIN} \ + ${SRCS_NODES} ${SRCS_OPTIMIZER} ${SRCS_PARSER} ${SRCS_PORT} \ + $(SRCS_POSTMASTER) ${SRCS_REGEX} ${SRCS_REWRITE} ${SRCS_STORAGE} \ + ${SRCS_TCOP} ${SRCS_UTILS} + +ifeq ($(BIGOBJS), true) +OBJS= ACCESS.o BOOTSTRAP.o COMMANDS.o EXECUTOR.o MAIN.o MISC.o NODES.o \ + PARSER.o OPTIMIZER.o REGEX.o REWRITE.o STORAGE.o TCOP.o UTILS.o +CLEANFILES+= $(subst .s,.o,$(SRCS:.c=.o)) $(OBJS) +else +OBJS:= $(subst .s,.o,$(SRCS:%.c=$(objdir)/%.o)) +CLEANFILES+= $(notdir $(OBJS)) +endif + +############################################################################# +# +# TIOGA stuff +# +ifdef TIOGA +SRCS+= $(SRCS_TIOGA) + ifeq ($(BIGOBJS), true) +TIOGA.o: $(SRCS_TIOGA:%.c=$(objdir)/%.o) + $(make_partial) +OBJS+= TIOGA.o +CLEANFILES+= $(SRCS_TIOGA:%.c=%.o) TIOGA.o + else +OBJS+= $(SRCS_TIOGA:%.c=$(objdir)/%.o) + endif +endif + + +############################################################################# +# +# Compiling the postgres backend. +# +CFLAGS+= -DPOSTGRESDIR='"$(POSTGRESDIR)"' \ + -DPGDATADIR='"$(DATADIR)"' \ + -I$(CURDIR)/. -I$(CURDIR)/$(objdir) \ + -I$(CURDIR)/include \ + -I$(CURDIR)/port/$(PORTNAME) + +# turn this on if you prefer European style dates instead of American +# style dates +ifdef EUROPEAN_DATES +CFLAGS += -DEUROPEAN_STYLE +endif + +# kerberos flags +ifdef KRBVERS +CFLAGS+= $(KRBFLAGS) +LDADD+= $(KRBLIBS) +endif + +# host based access flags +ifdef HBA +CFLAGS+= $(HBAFLAGS) +endif + + + +# +# All systems except NEXTSTEP require the math library. +# Loader flags for system-dependent libraries are appended in +# src/backend/port/$(PORTNAME)/Makefile.inc +# +ifneq ($(PORTNAME), next) +LDADD+= -lm +endif + +# statically link in libc for linux +ifeq ($(PORTNAME), linux) +LDADD+= -lc +endif + +postgres: $(POSTGRES_DEPEND) $(OBJS) $(EXPORTS) + $(CC) $(LDFLAGS) -o $(objdir)/$(@F) $(addprefix $(objdir)/,$(notdir $(OBJS))) $(LDADD) + +# Make this target first if you are doing a parallel make. +# The targets in 'first' need to be made sequentially because of dependencies. +# Then, you can make 'all' with parallelism turned on. +first: $(POSTGRES_DEPEND) + + +############################################################################# +# +# Partial objects for platforms with slow linkers. +# +ifeq ($(BIGOBJS), true) + +OBJS_ACCESS:= $(SRCS_ACCESS:%.c=$(objdir)/%.o) +OBJS_BOOTSTRAP:= $(SRCS_BOOTSTRAP:%.c=$(objdir)/%.o) +OBJS_CATALOG:= $(SRCS_CATALOG:%.c=$(objdir)/%.o) +OBJS_COMMANDS:= $(SRCS_COMMANDS:%.c=$(objdir)/%.o) +OBJS_EXECUTOR:= $(SRCS_EXECUTOR:%.c=$(objdir)/%.o) +OBJS_MAIN:= $(SRCS_MAIN:%.c=$(objdir)/%.o) +OBJS_POSTMASTER:= $(SRCS_POSTMASTER:%.c=$(objdir)/%.o) +OBJS_LIB:= $(SRCS_LIB:%.c=$(objdir)/%.o) +OBJS_LIBPQ:= $(SRCS_LIBPQ:%.c=$(objdir)/%.o) +OBJS_PORT:= $(addprefix $(objdir)/,$(subst .s,.o,$(SRCS_PORT:.c=.o))) +OBJS_NODES:= $(SRCS_NODES:%.c=$(objdir)/%.o) +OBJS_PARSER:= $(SRCS_PARSER:%.c=$(objdir)/%.o) +OBJS_OPTIMIZER:= $(SRCS_OPTIMIZER:%.c=$(objdir)/%.o) +OBJS_REGEX:= $(SRCS_REGEX:%.c=$(objdir)/%.o) +OBJS_REWRITE:= $(SRCS_REWRITE:%.c=$(objdir)/%.o) +OBJS_STORAGE:= $(SRCS_STORAGE:%.c=$(objdir)/%.o) +OBJS_TCOP:= $(SRCS_TCOP:%.c=$(objdir)/%.o) +OBJS_UTILS:= $(SRCS_UTILS:%.c=$(objdir)/%.o) + +ACCESS.o: $(OBJS_ACCESS) + $(make_partial) +BOOTSTRAP.o: $(OBJS_BOOTSTRAP) + $(make_partial) +COMMANDS.o: $(OBJS_COMMANDS) + $(make_partial) +EXECUTOR.o: $(OBJS_EXECUTOR) + $(make_partial) +MAIN.o: $(OBJS_MAIN) $(OBJS_POSTMASTER) + $(make_partial) +MISC.o: $(OBJS_CATALOG) $(OBJS_LIB) $(OBJS_LIBPQ) $(OBJS_PORT) + $(make_partial) +NODES.o: $(OBJS_NODES) + $(make_partial) +PARSER.o: $(OBJS_PARSER) + $(make_partial) +OPTIMIZER.o: $(OBJS_OPTIMIZER) + $(make_partial) +REGEX.o: $(OBJS_REGEX) + $(make_partial) +REWRITE.o: $(OBJS_REWRITE) + $(make_partial) +STORAGE.o: $(OBJS_STORAGE) + $(make_partial) +TCOP.o: $(OBJS_TCOP) + $(make_partial) +UTILS.o: $(OBJS_UTILS) + $(make_partial) +endif + +############################################################################# +# +# Installation. +# +# Install the bki files to the data directory. We also copy a version +# of them that has "PGUID" intact, so one can change the value of the +# postgres userid before running initdb in the case of customizing the +# binary release (i.e., fixing up PGUID w/o recompiling the system). +# Those files are copied out as foo.source. The program newbki(1) can +# be run later to reset the postgres login id (but it must be run before +# initdb is run, or after clearing the data directory with +# cleardbdir(1)). [newbki distributed with v4r2 but not with Postgres95.] +# + +# NAMEDATALEN=`egrep "^#define NAMEDATALEN" $(CURDIR)/include/postgres.h | awk '{print $$3}'`; \ +# OIDNAMELEN=`egrep "^#define OIDNAMELEN" $(CURDIR)/include/postgres.h | awk '{print $$3}'`; \ + +install: beforeinstall pg_id $(BKIFILES) postgres + $(INSTALL) $(INSTL_EXE_OPTS) $(objdir)/postgres $(DESTDIR)$(BINDIR)/postgres + @rm -f $(DESTDIR)$(BINDIR)/postmaster + cd $(DESTDIR)$(BINDIR); ln -s postgres postmaster + @cd $(objdir); \ + PG_UID=`./pg_id $(POSTGRESLOGIN)`; \ + POSTGRESLOGIN=$(POSTGRESLOGIN);\ + echo "NAMEDATALEN = $(NAMEDATALEN)"; \ + echo "OIDNAMELEN = $(OIDNAMELEN)"; \ + case $$PG_UID in "NOUSER") \ + echo "Warning: no account named $(POSTGRESLOGIN), using yours";\ + POSTGRESLOGIN=`whoami`; \ + PG_UID=`./pg_id`;; \ + esac ;\ + for bki in $(BKIFILES); do \ + sed \ + -e "s/postgres PGUID/$$POSTGRESLOGIN $$PG_UID/" \ + -e "s/NAMEDATALEN/$(NAMEDATALEN)/g" \ + -e "s/OIDNAMELEN/$(OIDNAMELEN)/g" \ + -e "s/PGUID/$$PG_UID/" \ + < $$bki > $$bki.sed ; \ + echo "Installing $(DESTDIR)$(DATADIR)/files/$$bki."; \ + $(INSTALL) $(INSTLOPTS) \ + $$bki.sed $(DESTDIR)$(DATADIR)/files/$$bki; \ + rm -f $$bki.sed; \ + echo "Installing $(DESTDIR)$(DATADIR)/files/$$bki.source."; \ + $(INSTALL) $(INSTLOPTS) \ + $$bki $(DESTDIR)$(DATADIR)/files/$$bki.source; \ + done; + @echo "Installing $(DATADIR)/pg_hba"; + @cp $(srcdir)/libpq/pg_hba $(DATADIR) + @chmod 644 $(DATADIR)/pg_hba + + +# so we can get the UID of the postgres owner (w/o moving pg_id to +# src/tools). We just want the vanilla LDFLAGS for pg_id +IDLDFLAGS:= $(LDFLAGS) +ifeq ($(PORTNAME), hpux) +ifeq ($(CC), cc) +IDLDFLAGS+= -Aa -D_HPUX_SOURCE +endif +endif +pg_id: $(srcdir)/bin/pg_id/pg_id.c + $(CC) $(IDLDFLAGS) -o $(objdir)/$(@F) $< + +CLEANFILES+= pg_id postgres + + +############################################################################# +# +# Support for code development. +# + +# +# Build the file, "./ID", used by the "gid" (grep-for-identifier) tool +# +IDFILE= ID +.PHONY: $(IDFILE) +$(IDFILE): + $(CURDIR)/makeID $(PORTNAME) + +# +# Special rule to generate cpp'd version of a .c file. This is +# especially useful given all the hellish macro processing going on. +# The cpp'd version has a .C suffix. To create foo.C from foo.c, just +# type +# bmake foo.C +# +%.cpp: %.c + $(CC) -E $(CFLAGS) $(<:.C=.c) | cat -s | cb | tr -s '\012*' '\012' > $(objdir)/$(@F) + +cppall: $(SRCS:.c=.cpp) + +# +# To use Purify (SunOS only), define PURIFY to be the path (and +# options) with which to invoke the Purify loader. Only the executable +# needs to be loaded with Purify. +# +# PURIFY = /usr/sww/bin/purify -cache-dir=/usr/local/postgres/src/backend/purify-cache +#.if defined(PURIFY) +#${PROG}: $(POSTGRES_DEPEND) $(OBJS) $(EXPORTS) +# ${PURIFY} ${CC} ${LDFLAGS} -o $(objdir)/$(@F) $(addprefix $(objdir)/,$(notdir $(OBJS))) $(LDADD) +# +#CLEANFILES+= .purify* .pure .lock.*.o *_pure_*.o *.pure_*link* +#.endif + diff --git a/src/backend/access/Makefile.inc b/src/backend/access/Makefile.inc new file mode 100644 index 00000000000..6adc2c692b5 --- /dev/null +++ b/src/backend/access/Makefile.inc @@ -0,0 +1,35 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the access methods module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ +# +#------------------------------------------------------------------------- + +accdir=$(CURDIR)/access +VPATH:=$(VPATH):$(accdir):\ + $(accdir)/common:$(accdir)/hash:$(accdir)/heap:$(accdir)/index:\ + $(accdir)/rtree:$(accdir)/nbtree:$(accdir)/transam + + +SUBSRCS= +include $(accdir)/common/Makefile.inc +include $(accdir)/hash/Makefile.inc +include $(accdir)/heap/Makefile.inc +include $(accdir)/index/Makefile.inc +include $(accdir)/rtree/Makefile.inc +include $(accdir)/nbtree/Makefile.inc +include $(accdir)/transam/Makefile.inc +SRCS_ACCESS:= $(SUBSRCS) + +HEADERS+= attnum.h funcindex.h genam.h hash.h \ + heapam.h hio.h htup.h ibit.h iqual.h istrat.h \ + itup.h nbtree.h printtup.h relscan.h rtree.h \ + sdir.h skey.h strat.h transam.h tupdesc.h tupmacs.h \ + valid.h xact.h + diff --git a/src/backend/access/attnum.h b/src/backend/access/attnum.h new file mode 100644 index 00000000000..7c999e58e9d --- /dev/null +++ b/src/backend/access/attnum.h @@ -0,0 +1,61 @@ +/*------------------------------------------------------------------------- + * + * attnum.h-- + * POSTGRES attribute number definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: attnum.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef ATTNUM_H +#define ATTNUM_H + +#include "c.h" + +/* + * user defined attribute numbers start at 1. -ay 2/95 + */ +typedef int16 AttrNumber; + +#define InvalidAttrNumber 0 + +/* ---------------- + * support macros + * ---------------- + */ +/* + * AttributeNumberIsValid -- + * True iff the attribute number is valid. + */ +#define AttributeNumberIsValid(attributeNumber) \ + ((bool) ((attributeNumber) != InvalidAttrNumber)) + +/* + * AttrNumberIsForUserDefinedAttr -- + * True iff the attribute number corresponds to an user defined attribute. + */ +#define AttrNumberIsForUserDefinedAttr(attributeNumber) \ + ((bool) ((attributeNumber) > 0)) + +/* + * AttrNumberGetAttrOffset -- + * Returns the attribute offset for an attribute number. + * + * Note: + * Assumes the attribute number is for an user defined attribute. + */ +#define AttrNumberGetAttrOffset(attNum) \ + (AssertMacro(AttrNumberIsForUserDefinedAttr(attNum)) ? \ + ((attNum - 1)) : 0) + +/* + * AttributeOffsetGetAttributeNumber -- + * Returns the attribute number for an attribute offset. + */ +#define AttrOffsetGetAttrNumber(attributeOffset) \ + ((AttrNumber) (1 + attributeOffset)) + +#endif /* ATTNUM_H */ diff --git a/src/backend/access/common/Makefile.inc b/src/backend/access/common/Makefile.inc new file mode 100644 index 00000000000..5d5dd476274 --- /dev/null +++ b/src/backend/access/common/Makefile.inc @@ -0,0 +1,16 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/common +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/common/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= heaptuple.c heapvalid.c indextuple.c indexvalid.c printtup.c \ + scankey.c tupdesc.c + diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c new file mode 100644 index 00000000000..c3e72fb97e8 --- /dev/null +++ b/src/backend/access/common/heaptuple.c @@ -0,0 +1,1011 @@ +/*------------------------------------------------------------------------- + * + * heaptuple.c-- + * This file contains heap tuple accessor and mutator routines, as well + * as a few various tuple utilities. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + * NOTES + * The old interface functions have been converted to macros + * and moved to heapam.h + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" + +#include "access/htup.h" +#include "access/itup.h" +#include "access/tupmacs.h" +#include "access/skey.h" +#include "storage/ipc.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "access/transam.h" +#include "storage/bufpage.h" /* for MAXTUPLEN */ +#include "storage/itemptr.h" +#include "utils/memutils.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/nabstime.h" + +/* this is so the sparcstation debugger works */ + +#ifndef NO_ASSERT_CHECKING +#ifdef sparc +#define register +#endif /* sparc */ +#endif /* NO_ASSERT_CHECKING */ + +/* ---------------------------------------------------------------- + * misc support routines + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ComputeDataSize + * ---------------- + */ +Size +ComputeDataSize(TupleDesc tupleDesc, + Datum value[], + char nulls[]) +{ + uint32 length; + int i; + int numberOfAttributes = tupleDesc->natts; + AttributeTupleForm *att = tupleDesc->attrs; + + for (length = 0, i = 0; i < numberOfAttributes; i++) { + if (nulls[i] != ' ') continue; + + switch (att[i]->attlen) { + case -1: + /* + * This is the size of the disk representation and so + * must include the additional sizeof long. + */ + if (att[i]->attalign == 'd') { + length = DOUBLEALIGN(length) + + VARSIZE(DatumGetPointer(value[i])); + } else { + length = INTALIGN(length) + + VARSIZE(DatumGetPointer(value[i])); + } + break; + case sizeof(char): + length++; + break; + case sizeof(short): + length = SHORTALIGN(length + sizeof(short)); + break; + case sizeof(int32): + length = INTALIGN(length + sizeof(int32)); + break; + default: + if (att[i]->attlen < sizeof(int32)) + elog(WARN, "ComputeDataSize: attribute %d has len %d", + i, att[i]->attlen); + if (att[i]->attalign == 'd') + length = DOUBLEALIGN(length) + att[i]->attlen; + else + length = LONGALIGN(length) + att[i]->attlen; + break; + } + } + + return length; +} + +/* ---------------- + * DataFill + * ---------------- + */ +void +DataFill(char *data, + TupleDesc tupleDesc, + Datum value[], + char nulls[], + char *infomask, + bits8 bit[]) +{ + bits8 *bitP; + int bitmask; + uint32 length; + int i; + int numberOfAttributes = tupleDesc->natts; + AttributeTupleForm* att = tupleDesc->attrs; + + if (bit != NULL) { + bitP = &bit[-1]; + bitmask = CSIGNBIT; + } + + *infomask = 0; + + for (i = 0; i < numberOfAttributes; i++) { + if (bit != NULL) { + if (bitmask != CSIGNBIT) { + bitmask <<= 1; + } else { + bitP += 1; + *bitP = 0x0; + bitmask = 1; + } + + if (nulls[i] == 'n') { + *infomask |= HEAP_HASNULL; + continue; + } + + *bitP |= bitmask; + } + + switch (att[i]->attlen) { + case -1: + *infomask |= HEAP_HASVARLENA; + if (att[i]->attalign=='d') { + data = (char *) DOUBLEALIGN(data); + } else { + data = (char *) INTALIGN(data); + } + length = VARSIZE(DatumGetPointer(value[i])); + memmove(data, DatumGetPointer(value[i]),length); + data += length; + break; + case sizeof(char): + *data = att[i]->attbyval ? + DatumGetChar(value[i]) : *((char *) value[i]); + data += sizeof(char); + break; + case sizeof(int16): + data = (char *) SHORTALIGN(data); + * (short *) data = (att[i]->attbyval ? + DatumGetInt16(value[i]) : + *((short *) value[i])); + data += sizeof(short); + break; + case sizeof(int32): + data = (char *) INTALIGN(data); + * (int32 *) data = (att[i]->attbyval ? + DatumGetInt32(value[i]) : + *((int32 *) value[i])); + data += sizeof(int32); + break; + default: + if (att[i]->attlen < sizeof(int32)) + elog(WARN, "DataFill: attribute %d has len %d", + i, att[i]->attlen); + if (att[i]->attalign == 'd') { + data = (char *) DOUBLEALIGN(data); + memmove(data, DatumGetPointer(value[i]), + att[i]->attlen); + data += att[i]->attlen; + } else { + data = (char *) LONGALIGN(data); + memmove(data, DatumGetPointer(value[i]), + att[i]->attlen); + data += att[i]->attlen; + } + + } + } +} + +/* ---------------------------------------------------------------- + * heap tuple interface + * ---------------------------------------------------------------- + */ + +/* ---------------- + * heap_attisnull - returns 1 iff tuple attribute is not present + * ---------------- + */ +int +heap_attisnull(HeapTuple tup, int attnum) +{ + if (attnum > (int)tup->t_natts) + return (1); + + if (HeapTupleNoNulls(tup)) return(0); + + if (attnum > 0) { + return(att_isnull(attnum - 1, tup->t_bits)); + } else + switch (attnum) { + case SelfItemPointerAttributeNumber: + case ObjectIdAttributeNumber: + case MinTransactionIdAttributeNumber: + case MinCommandIdAttributeNumber: + case MaxTransactionIdAttributeNumber: + case MaxCommandIdAttributeNumber: + case ChainItemPointerAttributeNumber: + case AnchorItemPointerAttributeNumber: + case MinAbsoluteTimeAttributeNumber: + case MaxAbsoluteTimeAttributeNumber: + case VersionTypeAttributeNumber: + break; + + case 0: + elog(WARN, "heap_attisnull: zero attnum disallowed"); + + default: + elog(WARN, "heap_attisnull: undefined negative attnum"); + } + + return (0); +} + +/* ---------------------------------------------------------------- + * system attribute heap tuple support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * heap_sysattrlen + * + * This routine returns the length of a system attribute. + * ---------------- + */ +int +heap_sysattrlen(AttrNumber attno) +{ + HeapTupleData *f = NULL; + int len; + + switch (attno) { + case SelfItemPointerAttributeNumber: + len = sizeof f->t_ctid; + break; + case ObjectIdAttributeNumber: + len = sizeof f->t_oid; + break; + case MinTransactionIdAttributeNumber: + len = sizeof f->t_xmin; + break; + case MinCommandIdAttributeNumber: + len = sizeof f->t_cmin; + break; + case MaxTransactionIdAttributeNumber: + len = sizeof f->t_xmax; + break; + case MaxCommandIdAttributeNumber: + len = sizeof f->t_cmax; + break; + case ChainItemPointerAttributeNumber: + len = sizeof f->t_chain; + break; + case AnchorItemPointerAttributeNumber: + elog(WARN, "heap_sysattrlen: field t_anchor does not exist!"); + break; + case MinAbsoluteTimeAttributeNumber: + len = sizeof f->t_tmin; + break; + case MaxAbsoluteTimeAttributeNumber: + len = sizeof f->t_tmax; + break; + case VersionTypeAttributeNumber: + len = sizeof f->t_vtype; + break; + default: + elog(WARN, "sysattrlen: System attribute number %d unknown.", + attno); + len = 0; + break; + } + return (len); +} + +/* ---------------- + * heap_sysattrbyval + * + * This routine returns the "by-value" property of a system attribute. + * ---------------- + */ +bool +heap_sysattrbyval(AttrNumber attno) +{ + bool byval; + + switch (attno) { + case SelfItemPointerAttributeNumber: + byval = false; + break; + case ObjectIdAttributeNumber: + byval = true; + break; + case MinTransactionIdAttributeNumber: + byval = true; + break; + case MinCommandIdAttributeNumber: + byval = true; + break; + case MaxTransactionIdAttributeNumber: + byval = true; + break; + case MaxCommandIdAttributeNumber: + byval = true; + break; + case ChainItemPointerAttributeNumber: + byval = false; + break; + case AnchorItemPointerAttributeNumber: + byval = false; + break; + case MinAbsoluteTimeAttributeNumber: + byval = true; + break; + case MaxAbsoluteTimeAttributeNumber: + byval = true; + break; + case VersionTypeAttributeNumber: + byval = true; + break; + default: + byval = true; + elog(WARN, "sysattrbyval: System attribute number %d unknown.", + attno); + break; + } + + return byval; +} + +/* ---------------- + * heap_getsysattr + * ---------------- + */ +char * +heap_getsysattr(HeapTuple tup, Buffer b, int attnum) +{ + switch (attnum) { + case SelfItemPointerAttributeNumber: + return ((char *)&tup->t_ctid); + case ObjectIdAttributeNumber: + return ((char *) (long) tup->t_oid); + case MinTransactionIdAttributeNumber: + return ((char *) (long) tup->t_xmin); + case MinCommandIdAttributeNumber: + return ((char *) (long) tup->t_cmin); + case MaxTransactionIdAttributeNumber: + return ((char *) (long) tup->t_xmax); + case MaxCommandIdAttributeNumber: + return ((char *) (long) tup->t_cmax); + case ChainItemPointerAttributeNumber: + return ((char *) &tup->t_chain); + case AnchorItemPointerAttributeNumber: + elog(WARN, "heap_getsysattr: t_anchor does not exist!"); + break; + + /* + * For tmin and tmax, we need to do some extra work. These don't + * get filled in until the vacuum cleaner runs (or we manage to flush + * a page after setting the value correctly below). If the vacuum + * cleaner hasn't run yet, then the times stored in the tuple are + * wrong, and we need to look up the commit time of the transaction. + * We cache this value in the tuple to avoid doing the work more than + * once. + */ + + case MinAbsoluteTimeAttributeNumber: + if (!AbsoluteTimeIsBackwardCompatiblyValid(tup->t_tmin) && + TransactionIdDidCommit(tup->t_xmin)) + tup->t_tmin = TransactionIdGetCommitTime(tup->t_xmin); + return ((char *) (long) tup->t_tmin); + case MaxAbsoluteTimeAttributeNumber: + if (!AbsoluteTimeIsBackwardCompatiblyReal(tup->t_tmax)) { + if (TransactionIdDidCommit(tup->t_xmax)) + tup->t_tmax = TransactionIdGetCommitTime(tup->t_xmax); + else + tup->t_tmax = CURRENT_ABSTIME; + } + return ((char *) (long) tup->t_tmax); + case VersionTypeAttributeNumber: + return ((char *) (long) tup->t_vtype); + default: + elog(WARN, "heap_getsysattr: undefined attnum %d", attnum); + } + return(NULL); +} + +/* ---------------- + * fastgetattr + * + * This is a newer version of fastgetattr which attempts to be + * faster by caching attribute offsets in the attribute descriptor. + * + * an alternate way to speed things up would be to cache offsets + * with the tuple, but that seems more difficult unless you take + * the storage hit of actually putting those offsets into the + * tuple you send to disk. Yuck. + * + * This scheme will be slightly slower than that, but should + * preform well for queries which hit large #'s of tuples. After + * you cache the offsets once, examining all the other tuples using + * the same attribute descriptor will go much quicker. -cim 5/4/91 + * ---------------- + */ +char * +fastgetattr(HeapTuple tup, + int attnum, + TupleDesc tupleDesc, + bool *isnull) +{ + char *tp; /* ptr to att in tuple */ + bits8 *bp; /* ptr to att in tuple */ + int slow; /* do we have to walk nulls? */ + AttributeTupleForm *att = tupleDesc->attrs; + + /* ---------------- + * sanity checks + * ---------------- + */ + + Assert(PointerIsValid(isnull)); + Assert(attnum > 0); + + /* ---------------- + * Three cases: + * + * 1: No nulls and no variable length attributes. + * 2: Has a null or a varlena AFTER att. + * 3: Has nulls or varlenas BEFORE att. + * ---------------- + */ + + *isnull = false; + + if (HeapTupleNoNulls(tup)) { + attnum--; + if (att[attnum]->attcacheoff > 0) { + return (char *) + fetchatt( &(att[attnum]), + (char *)tup + tup->t_hoff + att[attnum]->attcacheoff); + } else if (attnum == 0) { + /* + * first attribute is always at position zero + */ + return((char *) fetchatt(&(att[0]), (char *) tup + tup->t_hoff)); + } + + tp = (char *) tup + tup->t_hoff; + + slow = 0; + } else { + /* + * there's a null somewhere in the tuple + */ + + bp = tup->t_bits; + tp = (char *) tup + tup->t_hoff; + slow = 0; + attnum--; + + /* ---------------- + * check to see if desired att is null + * ---------------- + */ + + if (att_isnull(attnum, bp)) { + *isnull = true; + return NULL; + } + + /* ---------------- + * Now check to see if any preceeding bits are null... + * ---------------- + */ + + { + register int i = 0; /* current offset in bp */ + + for (i = 0; i < attnum && !slow; i++) { + if (att_isnull(i, bp)) slow = 1; + } + } + } + + /* + * now check for any non-fixed length attrs before our attribute + */ + if (!slow) { + if (att[attnum]->attcacheoff > 0) { + return (char *) + fetchatt(&(att[attnum]), + tp + att[attnum]->attcacheoff); + } else if (attnum == 0) { + return (char *) + fetchatt(&(att[0]), (char *) tup + tup->t_hoff); + } else if (!HeapTupleAllFixed(tup)) { + register int j = 0; + + for (j = 0; j < attnum && !slow; j++) + if (att[j]->attlen < 1) slow = 1; + } + } + + /* + * if slow is zero, and we got here, we know that we have a tuple with + * no nulls. We also have to initialize the remainder of + * the attribute cached offset values. + */ + if (!slow) { + register int j = 1; + register long off; + + /* + * need to set cache for some atts + */ + + att[0]->attcacheoff = 0; + + while (att[j]->attcacheoff > 0) j++; + + off = att[j-1]->attcacheoff + att[j-1]->attlen; + + for (; j < attnum + 1; j++) { + switch(att[j]->attlen) { + case -1: + off = (att[j]->attalign=='d') ? + DOUBLEALIGN(off) : INTALIGN(off); + break; + case sizeof(char): + break; + case sizeof(short): + off = SHORTALIGN(off); + break; + case sizeof(int32): + off = INTALIGN(off); + break; + default: + if (att[j]->attlen < sizeof(int32)) { + elog(WARN, + "fastgetattr: attribute %d has len %d", + j, att[j]->attlen); + } + if (att[j]->attalign == 'd') + off = DOUBLEALIGN(off); + else + off = LONGALIGN(off); + break; + } + + att[j]->attcacheoff = off; + off += att[j]->attlen; + } + + return + (char *)fetchatt(&(att[attnum]), tp + att[attnum]->attcacheoff); + } else { + register bool usecache = true; + register int off = 0; + register int i; + + /* + * Now we know that we have to walk the tuple CAREFULLY. + * + * Note - This loop is a little tricky. On iteration i we + * first set the offset for attribute i and figure out how much + * the offset should be incremented. Finally, we need to align the + * offset based on the size of attribute i+1 (for which the offset + * has been computed). -mer 12 Dec 1991 + */ + + for (i = 0; i < attnum; i++) { + if (!HeapTupleNoNulls(tup)) { + if (att_isnull(i, bp)) { + usecache = false; + continue; + } + } + switch (att[i]->attlen) { + case -1: + off = (att[i]->attalign=='d') ? + DOUBLEALIGN(off) : INTALIGN(off); + break; + case sizeof(char): + break; + case sizeof(short): + off = SHORTALIGN(off); + break; + case sizeof(int32): + off = INTALIGN(off); + break; + default: + if (att[i]->attlen < sizeof(int32)) + elog(WARN, + "fastgetattr2: attribute %d has len %d", + i, att[i]->attlen); + if (att[i]->attalign == 'd') + off = DOUBLEALIGN(off); + else + off = LONGALIGN(off); + break; + } + if (usecache && att[i]->attcacheoff > 0) { + off = att[i]->attcacheoff; + if (att[i]->attlen == -1) { + usecache = false; + } + } else { + if (usecache) att[i]->attcacheoff = off; + } + + switch(att[i]->attlen) { + case sizeof(char): + off++; + break; + case sizeof(int16): + off += sizeof(int16); + break; + case sizeof(int32): + off += sizeof(int32); + break; + case -1: + usecache = false; + off += VARSIZE(tp + off); + break; + default: + off += att[i]->attlen; + break; + } + } + switch (att[attnum]->attlen) { + case -1: + off = (att[attnum]->attalign=='d')? + DOUBLEALIGN(off) : INTALIGN(off); + break; + case sizeof(char): + break; + case sizeof(short): + off = SHORTALIGN(off); + break; + case sizeof(int32): + off = INTALIGN(off); + break; + default: + if (att[attnum]->attlen < sizeof(int32)) + elog(WARN, "fastgetattr3: attribute %d has len %d", + attnum, att[attnum]->attlen); + if (att[attnum]->attalign == 'd') + off = DOUBLEALIGN(off); + else + off = LONGALIGN(off); + break; + } + return((char *) fetchatt(&(att[attnum]), tp + off)); + } +} + +/* ---------------- + * heap_getattr + * + * returns an attribute from a heap tuple. uses + * ---------------- + */ +char * +heap_getattr(HeapTuple tup, + Buffer b, + int attnum, + TupleDesc tupleDesc, + bool *isnull) +{ + bool localIsNull; + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(tup != NULL); + + if (! PointerIsValid(isnull)) + isnull = &localIsNull; + + if (attnum > (int) tup->t_natts) { + *isnull = true; + return ((char *) NULL); + } + + /* ---------------- + * take care of user defined attributes + * ---------------- + */ + if (attnum > 0) { + char *datum; + datum = fastgetattr(tup, attnum, tupleDesc, isnull); + + return (datum); + } + + /* ---------------- + * take care of system attributes + * ---------------- + */ + *isnull = false; + return + heap_getsysattr(tup, b, attnum); +} + +/* ---------------- + * heap_copytuple + * + * returns a copy of an entire tuple + * ---------------- + */ +HeapTuple +heap_copytuple(HeapTuple tuple) +{ + HeapTuple newTuple; + + if (! HeapTupleIsValid(tuple)) + return (NULL); + + /* XXX For now, just prevent an undetectable executor related error */ + if (tuple->t_len > MAXTUPLEN) { + elog(WARN, "palloctup: cannot handle length %d tuples", + tuple->t_len); + } + + newTuple = (HeapTuple) palloc(tuple->t_len); + memmove((char *) newTuple, (char *) tuple, (int) tuple->t_len); + return(newTuple); +} + +/* ---------------- + * heap_deformtuple + * + * the inverse of heap_formtuple (see below) + * ---------------- + */ +void +heap_deformtuple(HeapTuple tuple, + TupleDesc tdesc, + Datum values[], + char nulls[]) +{ + int i; + int natts; + + Assert(HeapTupleIsValid(tuple)); + + natts = tuple->t_natts; + for (i = 0; inatts; + + len = sizeof *tuple - sizeof tuple->t_bits; + + for (i = 0; i < numberOfAttributes && !hasnull; i++) { + if (nulls[i] != ' ') hasnull = true; + } + + if (numberOfAttributes > MaxHeapAttributeNumber) + elog(WARN, "heap_formtuple: numberOfAttributes of %d > %d", + numberOfAttributes, MaxHeapAttributeNumber); + + if (hasnull) { + bitmaplen = BITMAPLEN(numberOfAttributes); + len += bitmaplen; + } + + hoff = len = DOUBLEALIGN(len); /* be conservative here */ + + len += ComputeDataSize(tupleDescriptor, value, nulls); + + tp = (char *) palloc(len); + tuple = (HeapTuple) tp; + + memset(tp, 0, (int)len); + + tuple->t_len = len; + tuple->t_natts = numberOfAttributes; + tuple->t_hoff = hoff; + tuple->t_tmin = INVALID_ABSTIME; + tuple->t_tmax = CURRENT_ABSTIME; + + DataFill((char *)tuple + tuple->t_hoff, + tupleDescriptor, + value, + nulls, + &tuple->t_infomask, + (hasnull ? tuple->t_bits : NULL)); + + return (tuple); +} + +/* ---------------- + * heap_modifytuple + * + * forms a new tuple from an old tuple and a set of replacement values. + * ---------------- + */ +HeapTuple +heap_modifytuple(HeapTuple tuple, + Buffer buffer, + Relation relation, + Datum replValue[], + char replNull[], + char repl[]) +{ + int attoff; + int numberOfAttributes; + Datum *value; + char *nulls; + bool isNull; + HeapTuple newTuple; + int madecopy; + uint8 infomask; + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(HeapTupleIsValid(tuple)); + Assert(BufferIsValid(buffer) || RelationIsValid(relation)); + Assert(HeapTupleIsValid(tuple)); + Assert(PointerIsValid(replValue)); + Assert(PointerIsValid(replNull)); + Assert(PointerIsValid(repl)); + + /* ---------------- + * if we're pointing to a disk page, then first + * make a copy of our tuple so that all the attributes + * are available. XXX this is inefficient -cim + * ---------------- + */ + madecopy = 0; + if (BufferIsValid(buffer) == true) { + relation = (Relation) BufferGetRelation(buffer); + tuple = heap_copytuple(tuple); + madecopy = 1; + } + + numberOfAttributes = RelationGetRelationTupleForm(relation)->relnatts; + + /* ---------------- + * allocate and fill value[] and nulls[] arrays from either + * the tuple or the repl information, as appropriate. + * ---------------- + */ + value = (Datum *) palloc(numberOfAttributes * sizeof *value); + nulls = (char *) palloc(numberOfAttributes * sizeof *nulls); + + for (attoff = 0; + attoff < numberOfAttributes; + attoff += 1) { + + if (repl[attoff] == ' ') { + char *attr; + + attr = + heap_getattr(tuple, + InvalidBuffer, + AttrOffsetGetAttrNumber(attoff), + RelationGetTupleDescriptor(relation), + &isNull) ; + value[attoff] = PointerGetDatum(attr); + nulls[attoff] = (isNull) ? 'n' : ' '; + + } else if (repl[attoff] != 'r') { + elog(WARN, "heap_modifytuple: repl is \\%3d", repl[attoff]); + + } else { /* == 'r' */ + value[attoff] = replValue[attoff]; + nulls[attoff] = replNull[attoff]; + } + } + + /* ---------------- + * create a new tuple from the values[] and nulls[] arrays + * ---------------- + */ + newTuple = heap_formtuple(RelationGetTupleDescriptor(relation), + value, + nulls); + + /* ---------------- + * copy the header except for t_len, t_natts, t_hoff, t_bits, t_infomask + * ---------------- + */ + infomask = newTuple->t_infomask; + memmove((char *) &newTuple->t_ctid, /*XXX*/ + (char *) &tuple->t_ctid, + ((char *) &tuple->t_hoff - (char *) &tuple->t_ctid)); /*XXX*/ + newTuple->t_infomask = infomask; + newTuple->t_natts = numberOfAttributes; /* fix t_natts just in case */ + + /* ---------------- + * if we made a copy of the tuple, then free it. + * ---------------- + */ + if (madecopy) + pfree(tuple); + + return + newTuple; +} + +/* ---------------------------------------------------------------- + * other misc functions + * ---------------------------------------------------------------- + */ + +HeapTuple +heap_addheader(uint32 natts, /* max domain index */ + int structlen, /* its length */ + char *structure) /* pointer to the struct */ +{ + register char *tp; /* tuple data pointer */ + HeapTuple tup; + long len; + int hoff; + + AssertArg(natts > 0); + + len = sizeof (HeapTupleData) - sizeof (tup->t_bits); + + hoff = len = DOUBLEALIGN(len); /* be conservative */ + len += structlen; + tp = (char *) palloc(len); + tup = (HeapTuple) tp; + memset((char*)tup, 0, len); + + tup->t_len = (short) len; /* XXX */ + tp += tup->t_hoff = hoff; + tup->t_natts = natts; + tup->t_infomask = 0; + + memmove(tp, structure, structlen); + + return (tup); +} diff --git a/src/backend/access/common/heapvalid.c b/src/backend/access/common/heapvalid.c new file mode 100644 index 00000000000..b80c5dd9eb0 --- /dev/null +++ b/src/backend/access/common/heapvalid.c @@ -0,0 +1,134 @@ +/*------------------------------------------------------------------------- + * + * heapvalid.c-- + * heap tuple qualification validity checking code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/Attic/heapvalid.c,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "access/htup.h" +#include "access/skey.h" +#include "access/heapam.h" +#include "utils/tqual.h" +#include "access/valid.h" /* where the declarations go */ +#include "access/xact.h" + +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/bufpage.h" +#include "storage/itemid.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/rel.h" + +/* ---------------- + * heap_keytest + * + * Test a heap tuple with respect to a scan key. + * ---------------- + */ +bool +heap_keytest(HeapTuple t, + TupleDesc tupdesc, + int nkeys, + ScanKey keys) +{ + bool isnull; + Datum atp; + int test; + + for (; nkeys--; keys++) { + atp = (Datum)heap_getattr(t, InvalidBuffer, + keys->sk_attno, + tupdesc, + &isnull); + + if (isnull) + /* XXX eventually should check if SK_ISNULL */ + return false; + + if (keys->sk_flags & SK_COMMUTE) + test = (long) FMGR_PTR2(keys->sk_func, keys->sk_procedure, + keys->sk_argument, atp); + else + test = (long) FMGR_PTR2(keys->sk_func, keys->sk_procedure, + atp, keys->sk_argument); + + if (!test == !(keys->sk_flags & SK_NEGATE)) + return false; + } + + return true; +} + +/* ---------------- + * heap_tuple_satisfies + * + * Returns a valid HeapTuple if it satisfies the timequal and keytest. + * Returns NULL otherwise. Used to be heap_satisifies (sic) which + * returned a boolean. It now returns a tuple so that we can avoid doing two + * PageGetItem's per tuple. + * + * Complete check of validity including LP_CTUP and keytest. + * This should perhaps be combined with valid somehow in the + * future. (Also, additional rule tests/time range tests.) + * + * on 8/21/92 mao says: i rearranged the tests here to do keytest before + * SatisfiesTimeQual. profiling indicated that even for vacuumed relations, + * time qual checking was more expensive than key testing. time qual is + * least likely to fail, too. we should really add the time qual test to + * the restriction and optimize it in the normal way. this has interactions + * with joey's expensive function work. + * ---------------- + */ +HeapTuple +heap_tuple_satisfies(ItemId itemId, + Relation relation, + PageHeader disk_page, + TimeQual qual, + int nKeys, + ScanKey key) +{ + HeapTuple tuple; + bool res; + + if (! ItemIdIsUsed(itemId)) + return NULL; + + tuple = (HeapTuple) PageGetItem((Page) disk_page, itemId); + + if (key != NULL) + res = heap_keytest(tuple, RelationGetTupleDescriptor(relation), + nKeys, key); + else + res = TRUE; + + if (res && (relation->rd_rel->relkind == RELKIND_UNCATALOGED + || HeapTupleSatisfiesTimeQual(tuple,qual))) + return tuple; + + return (HeapTuple) NULL; +} + +/* + * TupleUpdatedByCurXactAndCmd() -- Returns true if this tuple has + * already been updated once by the current transaction/command + * pair. + */ +bool +TupleUpdatedByCurXactAndCmd(HeapTuple t) +{ + if (TransactionIdEquals(t->t_xmax, + GetCurrentTransactionId()) && + t->t_cmax == GetCurrentCommandId()) + return true; + + return false; +} diff --git a/src/backend/access/common/indextuple.c b/src/backend/access/common/indextuple.c new file mode 100644 index 00000000000..be5d2ccbd96 --- /dev/null +++ b/src/backend/access/common/indextuple.c @@ -0,0 +1,427 @@ +/*------------------------------------------------------------------------- + * + * indextuple.c-- + * This file contains index tuple accessor and mutator routines, + * as well as a few various tuple utilities. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "c.h" +#include "access/ibit.h" +#include "access/itup.h" /* where the declarations go */ +#include "access/heapam.h" +#include "access/genam.h" +#include "access/tupdesc.h" +#include "access/tupmacs.h" + +#include "storage/itemptr.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +static Size IndexInfoFindDataOffset(unsigned short t_info); + +/* ---------------------------------------------------------------- + * index_ tuple interface routines + * ---------------------------------------------------------------- + */ + +/* ---------------- + * index_formtuple + * ---------------- + */ +IndexTuple +index_formtuple(TupleDesc tupleDescriptor, + Datum value[], + char null[]) +{ + register char *tp; /* tuple pointer */ + IndexTuple tuple; /* return tuple */ + Size size, hoff; + int i; + unsigned short infomask = 0; + bool hasnull = false; + char tupmask = 0; + int numberOfAttributes = tupleDescriptor->natts; + + if (numberOfAttributes > MaxIndexAttributeNumber) + elog(WARN, "index_formtuple: numberOfAttributes of %d > %d", + numberOfAttributes, MaxIndexAttributeNumber); + + + for (i = 0; i < numberOfAttributes && !hasnull; i++) { + if (null[i] != ' ') hasnull = true; + } + + if (hasnull) infomask |= INDEX_NULL_MASK; + + hoff = IndexInfoFindDataOffset(infomask); + size = hoff + + ComputeDataSize(tupleDescriptor, + value, null); + size = DOUBLEALIGN(size); /* be conservative */ + + tp = (char *) palloc(size); + tuple = (IndexTuple) tp; + memset(tp,0,(int)size); + + DataFill((char *)tp + hoff, + tupleDescriptor, + value, + null, + &tupmask, + (hasnull ? (bits8*)tp + sizeof(*tuple) : NULL)); + + /* + * We do this because DataFill wants to initialize a "tupmask" which + * is used for HeapTuples, but we want an indextuple infomask. The only + * "relevent" info is the "has variable attributes" field, which is in + * mask position 0x02. We have already set the null mask above. + */ + + if (tupmask & 0x02) infomask |= INDEX_VAR_MASK; + + /* + * Here we make sure that we can actually hold the size. We also want + * to make sure that size is not aligned oddly. This actually is a + * rather odd way to make sure the size is not too large overall. + */ + + if (size & 0xE000) + elog(WARN, "index_formtuple: data takes %d bytes: too big", size); + + + infomask |= size; + + /* ---------------- + * initialize metadata + * ---------------- + */ + tuple->t_info = infomask; + return (tuple); +} + +/* ---------------- + * fastgetiattr + * + * This is a newer version of fastgetiattr which attempts to be + * faster by caching attribute offsets in the attribute descriptor. + * + * an alternate way to speed things up would be to cache offsets + * with the tuple, but that seems more difficult unless you take + * the storage hit of actually putting those offsets into the + * tuple you send to disk. Yuck. + * + * This scheme will be slightly slower than that, but should + * preform well for queries which hit large #'s of tuples. After + * you cache the offsets once, examining all the other tuples using + * the same attribute descriptor will go much quicker. -cim 5/4/91 + * ---------------- + */ +char * +fastgetiattr(IndexTuple tup, + int attnum, + TupleDesc tupleDesc, + bool *isnull) +{ + register char *tp; /* ptr to att in tuple */ + register char *bp; /* ptr to att in tuple */ + int slow; /* do we have to walk nulls? */ + register int data_off; /* tuple data offset */ + + /* ---------------- + * sanity checks + * ---------------- + */ + + Assert(PointerIsValid(isnull)); + Assert(attnum > 0); + + /* ---------------- + * Three cases: + * + * 1: No nulls and no variable length attributes. + * 2: Has a null or a varlena AFTER att. + * 3: Has nulls or varlenas BEFORE att. + * ---------------- + */ + + *isnull = false; + data_off = IndexTupleHasMinHeader(tup) ? sizeof *tup : + IndexInfoFindDataOffset(tup->t_info); + + if (IndexTupleNoNulls(tup)) { + + /* first attribute is always at position zero */ + + if (attnum == 1) { + return(fetchatt(&(tupleDesc->attrs[0]), (char *) tup + data_off)); + } + attnum--; + + if (tupleDesc->attrs[attnum]->attcacheoff > 0) { + return(fetchatt(&(tupleDesc->attrs[attnum]), + (char *) tup + data_off + + tupleDesc->attrs[attnum]->attcacheoff)); + } + + tp = (char *) tup + data_off; + + slow = 0; + }else { /* there's a null somewhere in the tuple */ + + bp = (char *) tup + sizeof(*tup); /* "knows" t_bits are here! */ + slow = 0; + /* ---------------- + * check to see if desired att is null + * ---------------- + */ + + attnum--; + { + if (att_isnull(attnum, bp)) { + *isnull = true; + return NULL; + } + } + /* ---------------- + * Now check to see if any preceeding bits are null... + * ---------------- + */ + { + register int i = 0; /* current offset in bp */ + register int mask; /* bit in byte we're looking at */ + register char n; /* current byte in bp */ + register int byte, finalbit; + + byte = attnum >> 3; + finalbit = attnum & 0x07; + + for (; i <= byte; i++) { + n = bp[i]; + if (i < byte) { + /* check for nulls in any "earlier" bytes */ + if ((~n) != 0) { + slow++; + break; + } + } else { + /* check for nulls "before" final bit of last byte*/ + mask = (finalbit << 1) - 1; + if ((~n) & mask) + slow++; + } + } + } + tp = (char *) tup + data_off; + } + + /* now check for any non-fixed length attrs before our attribute */ + + if (!slow) { + if (tupleDesc->attrs[attnum]->attcacheoff > 0) { + return(fetchatt(&(tupleDesc->attrs[attnum]), + tp + tupleDesc->attrs[attnum]->attcacheoff)); + }else if (!IndexTupleAllFixed(tup)) { + register int j = 0; + + for (j = 0; j < attnum && !slow; j++) + if (tupleDesc->attrs[j]->attlen < 1) slow = 1; + } + } + + /* + * if slow is zero, and we got here, we know that we have a tuple with + * no nulls. We also know that we have to initialize the remainder of + * the attribute cached offset values. + */ + + if (!slow) { + register int j = 1; + register long off; + + /* + * need to set cache for some atts + */ + + tupleDesc->attrs[0]->attcacheoff = 0; + + while (tupleDesc->attrs[j]->attcacheoff > 0) j++; + + off = tupleDesc->attrs[j-1]->attcacheoff + + tupleDesc->attrs[j-1]->attlen; + + for (; j < attnum + 1; j++) { + /* + * Fix me when going to a machine with more than a four-byte + * word! + */ + + switch(tupleDesc->attrs[j]->attlen) + { + case -1: + off = (tupleDesc->attrs[j]->attalign=='d')? + DOUBLEALIGN(off):INTALIGN(off); + break; + case sizeof(char): + break; + case sizeof(short): + off = SHORTALIGN(off); + break; + case sizeof(int32): + off = INTALIGN(off); + break; + default: + if (tupleDesc->attrs[j]->attlen > sizeof(int32)) + off = (tupleDesc->attrs[j]->attalign=='d')? + DOUBLEALIGN(off) : LONGALIGN(off); + else + elog(WARN, "fastgetiattr: attribute %d has len %d", + j, tupleDesc->attrs[j]->attlen); + break; + + } + + tupleDesc->attrs[j]->attcacheoff = off; + off += tupleDesc->attrs[j]->attlen; + } + + return(fetchatt( &(tupleDesc->attrs[attnum]), + tp + tupleDesc->attrs[attnum]->attcacheoff)); + }else { + register bool usecache = true; + register int off = 0; + register int i; + + /* + * Now we know that we have to walk the tuple CAREFULLY. + */ + + for (i = 0; i < attnum; i++) { + if (!IndexTupleNoNulls(tup)) { + if (att_isnull(i, bp)) { + usecache = false; + continue; + } + } + + if (usecache && tupleDesc->attrs[i]->attcacheoff > 0) { + off = tupleDesc->attrs[i]->attcacheoff; + if (tupleDesc->attrs[i]->attlen == -1) + usecache = false; + else + continue; + } + + if (usecache) tupleDesc->attrs[i]->attcacheoff = off; + switch(tupleDesc->attrs[i]->attlen) + { + case sizeof(char): + off++; + break; + case sizeof(short): + off = SHORTALIGN(off) + sizeof(short); + break; + case -1: + usecache = false; + off = (tupleDesc->attrs[i]->attalign=='d')? + DOUBLEALIGN(off):INTALIGN(off); + off += VARSIZE(tp + off); + break; + default: + if (tupleDesc->attrs[i]->attlen > sizeof(int32)) + off = (tupleDesc->attrs[i]->attalign=='d') ? + DOUBLEALIGN(off) + tupleDesc->attrs[i]->attlen : + LONGALIGN(off) + tupleDesc->attrs[i]->attlen; + else + elog(WARN, "fastgetiattr2: attribute %d has len %d", + i, tupleDesc->attrs[i]->attlen); + + break; + } + } + + return(fetchatt(&tupleDesc->attrs[attnum], tp + off)); + } +} + +/* ---------------- + * index_getattr + * ---------------- + */ +Datum +index_getattr(IndexTuple tuple, + AttrNumber attNum, + TupleDesc tupDesc, + bool *isNullOutP) +{ + Assert (attNum > 0); + + return (Datum) + fastgetiattr(tuple, attNum, tupDesc, isNullOutP); +} + +RetrieveIndexResult +FormRetrieveIndexResult(ItemPointer indexItemPointer, + ItemPointer heapItemPointer) +{ + RetrieveIndexResult result; + + Assert(ItemPointerIsValid(indexItemPointer)); + Assert(ItemPointerIsValid(heapItemPointer)); + + result = (RetrieveIndexResult) palloc(sizeof *result); + + result->index_iptr = *indexItemPointer; + result->heap_iptr = *heapItemPointer; + + return (result); +} + +/* + * Takes an infomask as argument (primarily because this needs to be usable + * at index_formtuple time so enough space is allocated). + * + * Change me if adding an attribute to IndexTuples!!!!!!!!!!! + */ +static Size +IndexInfoFindDataOffset(unsigned short t_info) +{ + if (!(t_info & INDEX_NULL_MASK)) + return((Size) sizeof(IndexTupleData)); + else { + Size size = sizeof(IndexTupleData); + + if (t_info & INDEX_NULL_MASK) { + size += sizeof(IndexAttributeBitMapData); + } + return DOUBLEALIGN(size); /* be conservative */ + } +} + +/* + * Copies source into target. If *target == NULL, we palloc space; otherwise + * we assume we have space that is already palloc'ed. + */ +void +CopyIndexTuple(IndexTuple source, IndexTuple *target) +{ + Size size; + IndexTuple ret; + + size = IndexTupleSize(source); + if (*target == NULL) { + *target = (IndexTuple) palloc(size); + } + + ret = *target; + memmove((char*)ret, (char*)source, size); +} + diff --git a/src/backend/access/common/indexvalid.c b/src/backend/access/common/indexvalid.c new file mode 100644 index 00000000000..b437718cecc --- /dev/null +++ b/src/backend/access/common/indexvalid.c @@ -0,0 +1,84 @@ +/*------------------------------------------------------------------------- + * + * indexvalid.c-- + * index tuple qualification validity checking code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/Attic/indexvalid.c,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "executor/execdebug.h" +#include "access/genam.h" +#include "access/iqual.h" /* where the declarations go */ +#include "access/itup.h" +#include "access/skey.h" + +#include "storage/buf.h" +#include "storage/bufpage.h" +#include "storage/itemid.h" +#include "utils/rel.h" + +/* ---------------------------------------------------------------- + * index scan key qualification code + * ---------------------------------------------------------------- + */ +int NIndexTupleProcessed; + +/* ---------------- + * index_keytest + * + * old comments + * May eventually combine with other tests (like timeranges)? + * Should have Buffer buffer; as an argument and pass it to amgetattr. + * ---------------- + */ +bool +index_keytest(IndexTuple tuple, + TupleDesc tupdesc, + int scanKeySize, + ScanKey key) +{ + bool isNull; + Datum datum; + int test; + + IncrIndexProcessed(); + + while (scanKeySize > 0) { + datum = index_getattr(tuple, + 1, + tupdesc, + &isNull); + + if (isNull) { + /* XXX eventually should check if SK_ISNULL */ + return (false); + } + + if (key[0].sk_flags & SK_COMMUTE) { + test = (int) (*(key[0].sk_func)) + (DatumGetPointer(key[0].sk_argument), + datum); + } else { + test = (int) (*(key[0].sk_func)) + (datum, + DatumGetPointer(key[0].sk_argument)); + } + + if (!test == !(key[0].sk_flags & SK_NEGATE)) { + return (false); + } + + scanKeySize -= 1; + key++; + } + + return (true); +} + diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c new file mode 100644 index 00000000000..556b73b9dfd --- /dev/null +++ b/src/backend/access/common/printtup.c @@ -0,0 +1,306 @@ +/*------------------------------------------------------------------------- + * + * printtup.c-- + * Routines to print out tuples to the destination (binary or non-binary + * portals, frontend/interactive backend, etc.). + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include + +#include "postgres.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "access/skey.h" +#include "access/printtup.h" +#include "access/tupdesc.h" +#include "storage/buf.h" +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "fmgr.h" +#include "utils/elog.h" + +#include "utils/syscache.h" +#include "catalog/pg_type.h" + +#include "libpq/libpq.h" + +/* ---------------------------------------------------------------- + * printtup / debugtup support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * typtoout - used by printtup and debugtup + * ---------------- + */ +Oid +typtoout(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0, 0, 0); + + if (HeapTupleIsValid(typeTuple)) + return((Oid) + ((TypeTupleForm) GETSTRUCT(typeTuple))->typoutput); + + elog(WARN, "typtoout: Cache lookup of type %d failed", type); + return(InvalidOid); +} + +Oid +gettypelem(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0,0,0); + + if (HeapTupleIsValid(typeTuple)) + return((Oid) + ((TypeTupleForm) GETSTRUCT(typeTuple))->typelem); + + elog(WARN, "typtoout: Cache lookup of type %d failed", type); + return(InvalidOid); +} + +/* ---------------- + * printtup + * ---------------- + */ +void +printtup(HeapTuple tuple, TupleDesc typeinfo) +{ + int i, j, k; + char *outputstr, *attr; + bool isnull; + Oid typoutput; + + /* ---------------- + * tell the frontend to expect new tuple data + * ---------------- + */ + pq_putnchar("D", 1); + + /* ---------------- + * send a bitmap of which attributes are null + * ---------------- + */ + j = 0; + k = 1 << 7; + for (i = 0; i < tuple->t_natts; ) { + attr = heap_getattr(tuple, InvalidBuffer, ++i, typeinfo, &isnull); + if (!isnull) + j |= k; + k >>= 1; + if (!(i & 7)) { + pq_putint(j, 1); + j = 0; + k = 1 << 7; + } + } + if (i & 7) + pq_putint(j, 1); + + /* ---------------- + * send the attributes of this tuple + * ---------------- + */ + for (i = 0; i < tuple->t_natts; ++i) { + attr = heap_getattr(tuple, InvalidBuffer, i+1, typeinfo, &isnull); + typoutput = typtoout((Oid) typeinfo->attrs[i]->atttypid); + + if (!isnull && OidIsValid(typoutput)) { + outputstr = fmgr(typoutput, attr, + gettypelem(typeinfo->attrs[i]->atttypid)); + pq_putint(strlen(outputstr)+4, 4); + pq_putnchar(outputstr, strlen(outputstr)); + pfree(outputstr); + } + } +} + +/* ---------------- + * printatt + * ---------------- + */ +static void +printatt(unsigned attributeId, + AttributeTupleForm attributeP, + char *value) +{ + printf("\t%2d: %.*s%s%s%s\t(typeid = %u, len = %d, byval = %c)\n", + attributeId, + NAMEDATALEN, /* attname is a char16 */ + attributeP->attname.data, + value != NULL ? " = \"" : "", + value != NULL ? value : "", + value != NULL ? "\"" : "", + (unsigned int) (attributeP->atttypid), + attributeP->attlen, + attributeP->attbyval ? 't' : 'f'); +} + +/* ---------------- + * showatts + * ---------------- + */ +void +showatts(char *name, TupleDesc tupleDesc) +{ + int i; + int natts = tupleDesc->natts; + AttributeTupleForm *attinfo = tupleDesc->attrs; + + puts(name); + for (i = 0; i < natts; ++i) + printatt((unsigned) i+1, attinfo[i], (char *) NULL); + printf("\t----\n"); +} + +/* ---------------- + * debugtup + * ---------------- + */ +void +debugtup(HeapTuple tuple, TupleDesc typeinfo) +{ + register int i; + char *attr, *value; + bool isnull; + Oid typoutput; + + for (i = 0; i < tuple->t_natts; ++i) { + attr = heap_getattr(tuple, InvalidBuffer, i+1, typeinfo, &isnull); + typoutput = typtoout((Oid) typeinfo->attrs[i]->atttypid); + + if (!isnull && OidIsValid(typoutput)) { + value = fmgr(typoutput, attr, + gettypelem(typeinfo->attrs[i]->atttypid)); + printatt((unsigned) i+1, typeinfo->attrs[i], value); + pfree(value); + } + } + printf("\t----\n"); +} + +/*#define IPORTAL_DEBUG*/ + +/* ---------------- + * printtup_internal + * Protocol expects either T, D, C, E, or N. + * We use a different data prefix, e.g. 'B' instead of 'D' to + * indicate a tuple in internal (binary) form. + * + * This is same as printtup, except we don't use the typout func. + * ---------------- + */ +void +printtup_internal(HeapTuple tuple, TupleDesc typeinfo) +{ + int i, j, k; + char *attr; + bool isnull; + + /* ---------------- + * tell the frontend to expect new tuple data + * ---------------- + */ + pq_putnchar("B", 1); + + /* ---------------- + * send a bitmap of which attributes are null + * ---------------- + */ + j = 0; + k = 1 << 7; + for (i = 0; i < tuple->t_natts; ) { + attr = heap_getattr(tuple, InvalidBuffer, ++i, typeinfo, &isnull); + if (!isnull) + j |= k; + k >>= 1; + if (!(i & 7)) { + pq_putint(j, 1); + j = 0; + k = 1 << 7; + } + } + if (i & 7) + pq_putint(j, 1); + + /* ---------------- + * send the attributes of this tuple + * ---------------- + */ +#ifdef IPORTAL_DEBUG + fprintf(stderr, "sending tuple with %d atts\n", tuple->t_natts); +#endif + for (i = 0; i < tuple->t_natts; ++i) { + int32 len = typeinfo->attrs[i]->attlen; + + attr = heap_getattr(tuple, InvalidBuffer, i+1, typeinfo, &isnull); + if (!isnull) { + /* # of bytes, and opaque data */ + if (len == -1) { + /* variable length, assume a varlena structure */ + len = VARSIZE(attr) - VARHDRSZ; + + pq_putint(len, sizeof(int32)); + pq_putnchar(VARDATA(attr), len); +#ifdef IPORTAL_DEBUG + { + char *d = VARDATA(attr); + + fprintf(stderr, "length %d data %x%x%x%x\n", + len, *d, *(d+1), *(d+2), *(d+3)); + } +#endif + } else { + /* fixed size */ + if (typeinfo->attrs[i]->attbyval) { + int8 i8; + int16 i16; + int32 i32; + + pq_putint(len, sizeof(int32)); + switch (len) { + case sizeof(int8): + i8 = DatumGetChar(attr); + pq_putnchar((char *) &i8, len); + break; + case sizeof(int16): + i16 = DatumGetInt16(attr); + pq_putnchar((char *) &i16, len); + break; + case sizeof(int32): + i32 = DatumGetInt32(attr); + pq_putnchar((char *) &i32, len); + break; + } +#ifdef IPORTAL_DEBUG + fprintf(stderr, "byval length %d data %d\n", len, attr); +#endif + } else { + pq_putint(len, sizeof(int32)); + pq_putnchar(attr, len); +#ifdef IPORTAL_DEBUG + fprintf(stderr, "byref length %d data %x\n", len, attr); +#endif + } + } + } + } +} diff --git a/src/backend/access/common/scankey.c b/src/backend/access/common/scankey.c new file mode 100644 index 00000000000..7a47219a73c --- /dev/null +++ b/src/backend/access/common/scankey.c @@ -0,0 +1,68 @@ +/*------------------------------------------------------------------------- + * + * scan.c-- + * scan direction and key code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/scankey.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" +#include "access/sdir.h" +#include "access/attnum.h" +#include "access/skey.h" + +#include "fmgr.h" + +/* + * ScanKeyEntryIsLegal -- + * True iff the scan key entry is legal. + */ +#define ScanKeyEntryIsLegal(entry) \ + ((bool) (AssertMacro(PointerIsValid(entry)) && \ + AttributeNumberIsValid(entry->sk_attno))) + +/* + * ScanKeyEntrySetIllegal -- + * Marks a scan key entry as illegal. + */ +void +ScanKeyEntrySetIllegal(ScanKey entry) +{ + + Assert(PointerIsValid(entry)); + + entry->sk_flags = 0; /* just in case... */ + entry->sk_attno = InvalidAttrNumber; + entry->sk_procedure = 0; /* should be InvalidRegProcedure */ +} + +/* + * ScanKeyEntryInitialize -- + * Initializes an scan key entry. + * + * Note: + * Assumes the scan key entry is valid. + * Assumes the intialized scan key entry will be legal. + */ +void +ScanKeyEntryInitialize(ScanKey entry, + bits16 flags, + AttrNumber attributeNumber, + RegProcedure procedure, + Datum argument) +{ + Assert(PointerIsValid(entry)); + + entry->sk_flags = flags; + entry->sk_attno = attributeNumber; + entry->sk_procedure = procedure; + entry->sk_argument = argument; + fmgr_info(procedure, &entry->sk_func, &entry->sk_nargs); + + Assert(ScanKeyEntryIsLegal(entry)); +} diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c new file mode 100644 index 00000000000..527eb5113df --- /dev/null +++ b/src/backend/access/common/tupdesc.c @@ -0,0 +1,398 @@ +/*------------------------------------------------------------------------- + * + * tupdesc.c-- + * POSTGRES tuple descriptor support code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * some of the executor utility code such as "ExecTypeFromTL" should be + * moved here. + * + *------------------------------------------------------------------------- + */ +#include /* for sprintf() */ +#include +#include + +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" + +#include "access/attnum.h" +#include "access/htup.h" +#include "access/tupdesc.h" + +#include "utils/builtins.h" +#include "utils/elog.h" /* XXX generate exceptions instead */ +#include "utils/palloc.h" + +#include "utils/syscache.h" +#include "catalog/pg_type.h" + +#include "nodes/primnodes.h" + +#include "parser/catalog_utils.h" + +/* ---------------------------------------------------------------- + * CreateTemplateTupleDesc + * + * This function allocates and zeros a tuple descriptor structure. + * ---------------------------------------------------------------- + */ +TupleDesc +CreateTemplateTupleDesc(int natts) +{ + uint32 size; + TupleDesc desc; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(natts >= 1); + + /* ---------------- + * allocate enough memory for the tuple descriptor and + * zero it as TupleDescInitEntry assumes that the descriptor + * is filled with NULL pointers. + * ---------------- + */ + size = natts * sizeof (AttributeTupleForm); + desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); + desc->attrs = (AttributeTupleForm*) palloc(size); + memset(desc->attrs, 0, size); + + desc->natts = natts; + + return (desc); +} + +/* ---------------------------------------------------------------- + * CreateTupleDesc + * + * This function allocates a new TupleDesc from AttributeTupleForm array + * ---------------------------------------------------------------- + */ +TupleDesc +CreateTupleDesc(int natts, AttributeTupleForm* attrs) +{ + TupleDesc desc; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(natts >= 1); + + desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); + desc->attrs = attrs; + desc->natts = natts; + + + return (desc); +} + +/* ---------------------------------------------------------------- + * CreateTupleDescCopy + * + * This function creates a new TupleDesc by copying from an existing + * TupleDesc + * + * ---------------------------------------------------------------- + */ +TupleDesc +CreateTupleDescCopy(TupleDesc tupdesc) +{ + TupleDesc desc; + int i, size; + + desc = (TupleDesc) palloc(sizeof(struct tupleDesc)); + desc->natts = tupdesc->natts; + size = desc->natts * sizeof (AttributeTupleForm); + desc->attrs = (AttributeTupleForm*) palloc(size); + for (i=0;inatts;i++) { + desc->attrs[i] = + (AttributeTupleForm)palloc(ATTRIBUTE_TUPLE_SIZE); + memmove(desc->attrs[i], + tupdesc->attrs[i], + ATTRIBUTE_TUPLE_SIZE); + } + return desc; +} + +/* ---------------------------------------------------------------- + * TupleDescInitEntry + * + * This function initializes a single attribute structure in + * a preallocated tuple descriptor. + * ---------------------------------------------------------------- + */ +bool +TupleDescInitEntry(TupleDesc desc, + AttrNumber attributeNumber, + char *attributeName, + char *typeName, + int attdim, + bool attisset) +{ + HeapTuple tuple; + TypeTupleForm typeForm; + AttributeTupleForm att; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(PointerIsValid(desc)); + AssertArg(attributeNumber >= 1); + /* attributeName's are sometimes NULL, + from resdom's. I don't know why that is, though -- Jolly */ +/* AssertArg(NameIsValid(attributeName));*/ +/* AssertArg(NameIsValid(typeName));*/ + + AssertArg(!PointerIsValid(desc->attrs[attributeNumber - 1])); + + + /* ---------------- + * allocate storage for this attribute + * ---------------- + */ + + att = (AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE); + desc->attrs[attributeNumber - 1] = att; + + /* ---------------- + * initialize some of the attribute fields + * ---------------- + */ + att->attrelid = 0; /* dummy value */ + + if (attributeName != NULL) + namestrcpy(&(att->attname), attributeName); + else + memset(att->attname.data,0,NAMEDATALEN); + + + att->attdefrel = 0; /* dummy value */ + att->attnvals = 0; /* dummy value */ + att->atttyparg = 0; /* dummy value */ + att->attbound = 0; /* dummy value */ + att->attcanindex = 0; /* dummy value */ + att->attproc = 0; /* dummy value */ + att->attcacheoff = -1; + + att->attnum = attributeNumber; + att->attnelems = attdim; + att->attisset = attisset; + + /* ---------------- + * search the system cache for the type tuple of the attribute + * we are creating so that we can get the typeid and some other + * stuff. + * + * Note: in the special case of + * + * create EMP (name = char16, manager = EMP) + * + * RelationNameCreateHeapRelation() calls BuildDesc() which + * calls this routine and since EMP does not exist yet, the + * system cache lookup below fails. That's fine, but rather + * then doing a elog(WARN) we just leave that information + * uninitialized, return false, then fix things up later. + * -cim 6/14/90 + * ---------------- + */ + tuple = SearchSysCacheTuple(TYPNAME, PointerGetDatum(typeName), + 0,0,0); + if (! HeapTupleIsValid(tuple)) { + /* ---------------- + * here type info does not exist yet so we just fill + * the attribute with dummy information and return false. + * ---------------- + */ + att->atttypid = InvalidOid; + att->attlen = (int16) 0; + att->attbyval = (bool) 0; + att->attalign = 'i'; + return false; + } + + /* ---------------- + * type info exists so we initialize our attribute + * information from the type tuple we found.. + * ---------------- + */ + typeForm = (TypeTupleForm) GETSTRUCT(tuple); + + att->atttypid = tuple->t_oid; + att->attalign = typeForm->typalign; + + /* ------------------------ + If this attribute is a set, what is really stored in the + attribute is the OID of a tuple in the pg_proc catalog. + The pg_proc tuple contains the query string which defines + this set - i.e., the query to run to get the set. + So the atttypid (just assigned above) refers to the type returned + by this query, but the actual length of this attribute is the + length (size) of an OID. + + Why not just make the atttypid point to the OID type, instead + of the type the query returns? Because the executor uses the atttypid + to tell the front end what type will be returned (in BeginCommand), + and in the end the type returned will be the result of the query, not + an OID. + + Why not wait until the return type of the set is known (i.e., the + recursive call to the executor to execute the set has returned) + before telling the front end what the return type will be? Because + the executor is a delicate thing, and making sure that the correct + order of front-end commands is maintained is messy, especially + considering that target lists may change as inherited attributes + are considered, etc. Ugh. + ----------------------------------------- + */ + if (attisset) { + Type t = type("oid"); + att->attlen = tlen(t); + att->attbyval = tbyval(t); + } else { + att->attlen = typeForm->typlen; + att->attbyval = typeForm->typbyval; + } + + + return true; +} + + +/* ---------------------------------------------------------------- + * TupleDescMakeSelfReference + * + * This function initializes a "self-referential" attribute like + * manager in "create EMP (name=text, manager = EMP)". + * It calls TypeShellMake() which inserts a "shell" type + * tuple into pg_type. A self-reference is one kind of set, so + * its size and byval are the same as for a set. See the comments + * above in TupleDescInitEntry. + * ---------------------------------------------------------------- + */ +static void +TupleDescMakeSelfReference(TupleDesc desc, + AttrNumber attnum, + char *relname) +{ + AttributeTupleForm att; + Type t = type("oid"); + + att = desc->attrs[attnum-1]; + att->atttypid = TypeShellMake(relname); + att->attlen = tlen(t); + att->attbyval = tbyval(t); + att->attnelems = 0; +} + +/* ---------------------------------------------------------------- + * BuildDescForRelation + * + * This is a general purpose function identical to BuildDesc + * but is used by the DefineRelation() code to catch the + * special case where you + * + * create FOO ( ..., x = FOO ) + * + * here, the initial type lookup for "x = FOO" will fail + * because FOO isn't in the catalogs yet. But since we + * are creating FOO, instead of doing an elog() we add + * a shell type tuple to pg_type and fix things later + * in amcreate(). + * ---------------------------------------------------------------- + */ +TupleDesc +BuildDescForRelation(List *schema, char *relname) +{ + int natts; + AttrNumber attnum; + List *p; + TupleDesc desc; + char *attname; + char *typename; + int attdim; + bool attisset; + + /* ---------------- + * allocate a new tuple descriptor + * ---------------- + */ + natts = length(schema); + desc = CreateTemplateTupleDesc(natts); + + attnum = 0; + + typename = palloc(NAMEDATALEN+1); + + foreach(p, schema) { + ColumnDef *entry; + List *arry; + + /* ---------------- + * for each entry in the list, get the name and type + * information from the list and have TupleDescInitEntry + * fill in the attribute information we need. + * ---------------- + */ + attnum++; + + entry = lfirst(p); + attname = entry->colname; + arry = entry->typename->arrayBounds; + attisset = entry->typename->setof; + + if (arry != NIL) { + char buf[20]; + + attdim = length(arry); + + /* array of XXX is _XXX (inherited from release 3) */ + sprintf(buf, "_%.*s", NAMEDATALEN, entry->typename->name); + strcpy(typename, buf); + } else { + strcpy(typename, entry->typename->name); + attdim = 0; + } + + if (! TupleDescInitEntry(desc, attnum, attname, + typename, attdim, attisset)) { + /* ---------------- + * if TupleDescInitEntry() fails, it means there is + * no type in the system catalogs. So now we check if + * the type name equals the relation name. If so we + * have a self reference, otherwise it's an error. + * ---------------- + */ + if (!strcmp(typename, relname)) { + TupleDescMakeSelfReference(desc, attnum, relname); + } else + elog(WARN, "DefineRelation: no such type %.*s", + NAMEDATALEN, typename); + } + + /* + * this is for char() and varchar(). When an entry is of type + * char() or varchar(), typlen is set to the appropriate length, + * which we'll use here instead. (The catalog lookup only returns + * the length of bpchar and varchar which is not what we want!) + * - ay 6/95 + */ + if (entry->typename->typlen > 0) { + desc->attrs[attnum - 1]->attlen = entry->typename->typlen; + } + } + return desc; +} + diff --git a/src/backend/access/funcindex.h b/src/backend/access/funcindex.h new file mode 100644 index 00000000000..4689df19c04 --- /dev/null +++ b/src/backend/access/funcindex.h @@ -0,0 +1,43 @@ +/*------------------------------------------------------------------------- + * + * funcindex.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: funcindex.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef _FUNC_INDEX_INCLUDED_ +#define _FUNC_INDEX_INCLUDED_ + +#include "postgres.h" + +typedef struct { + int nargs; + Oid arglist[8]; + Oid procOid; + NameData funcName; +} FuncIndexInfo; + +typedef FuncIndexInfo *FuncIndexInfoPtr; + +/* + * some marginally useful macro definitions + */ +/* #define FIgetname(FINFO) (&((FINFO)->funcName.data[0]))*/ +#define FIgetname(FINFO) (FINFO)->funcName.data +#define FIgetnArgs(FINFO) (FINFO)->nargs +#define FIgetProcOid(FINFO) (FINFO)->procOid +#define FIgetArg(FINFO, argnum) (FINFO)->arglist[argnum] +#define FIgetArglist(FINFO) (FINFO)->arglist + +#define FIsetnArgs(FINFO, numargs) ((FINFO)->nargs = numargs) +#define FIsetProcOid(FINFO, id) ((FINFO)->procOid = id) +#define FIsetArg(FINFO, argnum, argtype) ((FINFO)->arglist[argnum] = argtype) + +#define FIisFunctionalIndex(FINFO) (FINFO->procOid != InvalidOid) + +#endif /* FUNCINDEX_H */ diff --git a/src/backend/access/genam.h b/src/backend/access/genam.h new file mode 100644 index 00000000000..b2544650de8 --- /dev/null +++ b/src/backend/access/genam.h @@ -0,0 +1,60 @@ +/*------------------------------------------------------------------------- + * + * genam.h-- + * POSTGRES general access method definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: genam.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef GENAM_H +#define GENAM_H + +#include "postgres.h" + +#include "access/attnum.h" +#include "access/htup.h" +#include "access/istrat.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/sdir.h" +#include "access/funcindex.h" + +/* ---------------- + * generalized index_ interface routines + * ---------------- + */ +extern Relation index_open(Oid relationId); +extern Relation index_openr(char *relationName); +extern void index_close(Relation relation); +extern InsertIndexResult index_insert(Relation relation, + IndexTuple indexTuple); +extern void index_delete(Relation relation, ItemPointer indexItem); +extern IndexScanDesc index_beginscan(Relation relation, bool scanFromEnd, + uint16 numberOfKeys, ScanKey key); +extern void index_rescan(IndexScanDesc scan, bool scanFromEnd, ScanKey key); +extern void index_endscan(IndexScanDesc scan); +extern void index_markpos(IndexScanDesc scan); +extern void index_restrpos(IndexScanDesc scan); +extern RetrieveIndexResult index_getnext(IndexScanDesc scan, + ScanDirection direction); +extern RegProcedure index_getprocid(Relation irel, AttrNumber attnum, + uint16 procnum); +extern Datum GetIndexValue(HeapTuple tuple, TupleDesc hTupDesc, + int attOff, AttrNumber attrNums[], FuncIndexInfo *fInfo, + bool *attNull, Buffer buffer); + +/* in genam.c */ +extern IndexScanDesc RelationGetIndexScan(Relation relation, bool scanFromEnd, + uint16 numberOfKeys, ScanKey key); +extern void IndexScanRestart(IndexScanDesc scan, bool scanFromEnd, + ScanKey key); +extern void IndexScanEnd(IndexScanDesc scan); +extern void IndexScanMarkPosition(IndexScanDesc scan); +extern void IndexScanRestorePosition(IndexScanDesc scan); + +#endif /* GENAM_H */ diff --git a/src/backend/access/hash.h b/src/backend/access/hash.h new file mode 100644 index 00000000000..21407696b44 --- /dev/null +++ b/src/backend/access/hash.h @@ -0,0 +1,336 @@ +/*------------------------------------------------------------------------- + * + * hash.h-- + * header file for postgres hash access method implementation + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: hash.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + * NOTES + * modeled after Margo Seltzer's hash implementation for unix. + * + *------------------------------------------------------------------------- + */ +#ifndef HASH_H +#define HASH_H + +#include "access/itup.h" + +/* + * An overflow page is a spare page allocated for storing data whose + * bucket doesn't have room to store it. We use overflow pages rather + * than just splitting the bucket because there is a linear order in + * the way we split buckets. In other words, if there isn't enough space + * in the bucket itself, put it in an overflow page. + * + * Overflow page addresses are stored in form: (Splitnumber, Page offset). + * + * A splitnumber is the number of the generation where the table doubles + * in size. The ovflpage's offset within the splitnumber; offsets start + * at 1. + * + * We convert the stored bitmap address into a page address with the + * macro OADDR_OF(S, O) where S is the splitnumber and O is the page + * offset. + */ +typedef uint32 Bucket; +typedef bits16 OverflowPageAddress; +typedef uint32 SplitNumber; +typedef uint32 PageOffset; + +/* A valid overflow address will always have a page offset >= 1 */ +#define InvalidOvflAddress 0 + +#define SPLITSHIFT 11 +#define SPLITMASK 0x7FF +#define SPLITNUM(N) ((SplitNumber)(((uint32)(N)) >> SPLITSHIFT)) +#define OPAGENUM(N) ((PageOffset)((N) & SPLITMASK)) +#define OADDR_OF(S,O) ((OverflowPageAddress)((uint32)((uint32)(S) << SPLITSHIFT) + (O))) + +#define BUCKET_TO_BLKNO(B) \ + ((Bucket) ((B) + ((B) ? metap->SPARES[_hash_log2((B)+1)-1] : 0)) + 1) +#define OADDR_TO_BLKNO(B) \ + ((BlockNumber) \ + (BUCKET_TO_BLKNO ( (1 << SPLITNUM((B))) -1 ) + OPAGENUM((B)))); + +/* + * hasho_flag tells us which type of page we're looking at. For + * example, knowing overflow pages from bucket pages is necessary + * information when you're deleting tuples from a page. If all the + * tuples are deleted from an overflow page, the overflow is made + * available to other buckets by calling _hash_freeovflpage(). If all + * the tuples are deleted from a bucket page, no additional action is + * necessary. + */ + +#define LH_UNUSED_PAGE (0) +#define LH_OVERFLOW_PAGE (1 << 0) +#define LH_BUCKET_PAGE (1 << 1) +#define LH_BITMAP_PAGE (1 << 2) +#define LH_META_PAGE (1 << 3) + +typedef struct HashPageOpaqueData { + bits16 hasho_flag; /* is this page a bucket or ovfl */ + Bucket hasho_bucket; /* bucket number this pg belongs to */ + OverflowPageAddress hasho_oaddr; /* ovfl address of this ovfl pg */ + BlockNumber hasho_nextblkno; /* next ovfl blkno */ + BlockNumber hasho_prevblkno; /* previous ovfl (or bucket) blkno */ +} HashPageOpaqueData; + +typedef HashPageOpaqueData *HashPageOpaque; + +/* + * ScanOpaqueData is used to remember which buffers we're currently + * examining in the scan. We keep these buffers locked and pinned and + * recorded in the opaque entry of the scan in order to avoid doing a + * ReadBuffer() for every tuple in the index. This avoids semop() calls, + * which are expensive. + */ + +typedef struct HashScanOpaqueData { + Buffer hashso_curbuf; + Buffer hashso_mrkbuf; +} HashScanOpaqueData; + +typedef HashScanOpaqueData *HashScanOpaque; + +/* + * Definitions for metapage. + */ + +#define HASH_METAPAGE 0 /* metapage is always block 0 */ + +#define HASH_MAGIC 0x6440640 +#define HASH_VERSION 0 + +/* + * NCACHED is used to set the array sizeof spares[] & bitmaps[]. + * + * Spares[] is used to hold the number overflow pages currently + * allocated at a certain splitpoint. For example, if spares[3] = 7 + * then there are a maximum of 7 ovflpages available at splitpoint 3. + * The value in spares[] will change as ovflpages are added within + * a splitpoint. + * + * Within a splitpoint, one can find which ovflpages are available and + * which are used by looking at a bitmaps that are stored on the ovfl + * pages themselves. There is at least one bitmap for every splitpoint's + * ovflpages. Bitmaps[] contains the ovflpage addresses of the ovflpages + * that hold the ovflpage bitmaps. + * + * The reason that the size is restricted to NCACHED (32) is because + * the bitmaps are 16 bits: upper 5 represent the splitpoint, lower 11 + * indicate the page number within the splitpoint. Since there are + * only 5 bits to store the splitpoint, there can only be 32 splitpoints. + * Both spares[] and bitmaps[] use splitpoints as there indices, so there + * can only be 32 of them. + */ + +#define NCACHED 32 + + +typedef struct HashMetaPageData { + PageHeaderData hashm_phdr; /* pad for page header + (do not use) */ + uint32 hashm_magic; /* magic no. for hash tables */ + uint32 hashm_version; /* version ID */ + uint32 hashm_nkeys; /* number of keys stored in + the table */ + uint16 hashm_ffactor; /* fill factor */ + uint16 hashm_bsize; /* bucket size (bytes) - + must be a power of 2 */ + uint16 hashm_bshift; /* bucket shift */ + uint16 hashm_bmsize; /* bitmap array size (bytes) - + must be a power of 2 */ + uint32 hashm_maxbucket; /* ID of maximum bucket + in use */ + uint32 hashm_highmask; /* mask to modulo into + entire table */ + uint32 hashm_lowmask; /* mask to modulo into lower + half of table */ + uint32 hashm_ovflpoint; /* pageno. from which ovflpgs + being allocated */ + uint32 hashm_lastfreed; /* last ovflpage freed */ + uint32 hashm_nmaps; /* Initial number of bitmaps */ + uint32 hashm_spares[NCACHED]; /* spare pages available at + splitpoints */ + BlockNumber hashm_mapp[NCACHED]; /* blknumbers of ovfl page + maps */ + RegProcedure hashm_procid; /* hash procedure id from + pg_proc */ +} HashMetaPageData; + +typedef HashMetaPageData *HashMetaPage; + +/* Short hands for accessing structure */ +#define BSHIFT hashm_bshift +#define OVFL_POINT hashm_ovflpoint +#define LAST_FREED hashm_lastfreed +#define MAX_BUCKET hashm_maxbucket +#define FFACTOR hashm_ffactor +#define HIGH_MASK hashm_highmask +#define LOW_MASK hashm_lowmask +#define NKEYS hashm_nkeys +#define SPARES hashm_spares + +extern bool BuildingHash; + +typedef struct HashItemData { + IndexTupleData hash_itup; +} HashItemData; + +typedef HashItemData *HashItem; + +/* + * Constants + */ +#define DEFAULT_FFACTOR 300 +#define SPLITMAX 8 +#define BYTE_TO_BIT 3 /* 2^3 bits/byte */ +#define INT_TO_BYTE 2 /* 2^2 bytes/int */ +#define INT_TO_BIT 5 /* 2^5 bits/int */ +#define ALL_SET ((uint32) ~0) + +/* + * bitmap pages do not contain tuples. they do contain the standard + * page headers and trailers; however, everything in between is a + * giant bit array. the number of bits that fit on a page obviously + * depends on the page size and the header/trailer overhead. + */ +#define BMPGSZ_BYTE(metap) ((metap)->hashm_bmsize) +#define BMPGSZ_BIT(metap) ((metap)->hashm_bmsize << BYTE_TO_BIT) +#define HashPageGetBitmap(pg) \ + ((uint32 *) (((char *) (pg)) + DOUBLEALIGN(sizeof(PageHeaderData)))) + +/* + * The number of bits in an ovflpage bitmap which + * tells which ovflpages are empty versus in use (NOT the number of + * bits in an overflow page *address* bitmap). + */ +#define BITS_PER_MAP 32 /* Number of bits in ovflpage bitmap */ + +/* Given the address of the beginning of a big map, clear/set the nth bit */ +#define CLRBIT(A, N) ((A)[(N)/BITS_PER_MAP] &= ~(1<<((N)%BITS_PER_MAP))) +#define SETBIT(A, N) ((A)[(N)/BITS_PER_MAP] |= (1<<((N)%BITS_PER_MAP))) +#define ISSET(A, N) ((A)[(N)/BITS_PER_MAP] & (1<<((N)%BITS_PER_MAP))) + +/* + * page locking modes + */ +#define HASH_READ 0 +#define HASH_WRITE 1 + +/* + * In general, the hash code tries to localize its knowledge about page + * layout to a couple of routines. However, we need a special value to + * indicate "no page number" in those places where we expect page numbers. + */ + +#define P_NONE 0 + +/* + * Strategy number. There's only one valid strategy for hashing: equality. + */ + +#define HTEqualStrategyNumber 1 +#define HTMaxStrategyNumber 1 + +/* + * When a new operator class is declared, we require that the user supply + * us with an amproc procudure for hashing a key of the new type. + * Since we only have one such proc in amproc, it's number 1. + */ + +#define HASHPROC 1 + +/* public routines */ + +extern void hashbuild(Relation heap, Relation index, int natts, + AttrNumber *attnum, IndexStrategy istrat, uint16 pcount, + Datum *params, FuncIndexInfo *finfo, PredInfo *predInfo); +extern InsertIndexResult hashinsert(Relation rel, IndexTuple itup); +extern char *hashgettuple(IndexScanDesc scan, ScanDirection dir); +extern char *hashbeginscan(Relation rel, bool fromEnd, uint16 keysz, + ScanKey scankey); +extern void hashrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey); +extern void hashendscan(IndexScanDesc scan); +extern void hashmarkpos(IndexScanDesc scan); +extern void hashrestrpos(IndexScanDesc scan); +extern void hashdelete(Relation rel, ItemPointer tid); + +/* hashfunc.c */ +extern uint32 hashint2(int16 key); +extern uint32 hashint4(uint32 key); +extern uint32 hashfloat4(float32 keyp); +extern uint32 hashfloat8(float64 keyp); +extern uint32 hashoid(Oid key); +extern uint32 hashchar(char key); +extern uint32 hashchar2(uint16 intkey); +extern uint32 hashchar4(uint32 intkey); +extern uint32 hashchar8(char *key); +extern uint32 hashchar16(char *key); +extern uint32 hashtext(struct varlena *key); + +/* private routines */ + +/* hashinsert.c */ +extern InsertIndexResult _hash_doinsert(Relation rel, HashItem hitem); + + +/* hashovfl.c */ +extern Buffer _hash_addovflpage(Relation rel, Buffer *metabufp, Buffer buf); +extern Buffer _hash_freeovflpage(Relation rel, Buffer ovflbuf); +extern int32 _hash_initbitmap(Relation rel, HashMetaPage metap, int32 pnum, + int32 nbits, int32 ndx); +extern void _hash_squeezebucket(Relation rel, HashMetaPage metap, + Bucket bucket); + + +/* hashpage.c */ +extern void _hash_metapinit(Relation rel); +extern Buffer _hash_getbuf(Relation rel, BlockNumber blkno, int access); +extern void _hash_relbuf(Relation rel, Buffer buf, int access); +extern void _hash_wrtbuf(Relation rel, Buffer buf); +extern void _hash_wrtnorelbuf(Relation rel, Buffer buf); +extern Page _hash_chgbufaccess(Relation rel, Buffer *bufp, int from_access, + int to_access); +extern void _hash_pageinit(Page page, Size size); +extern void _hash_pagedel(Relation rel, ItemPointer tid); +extern void _hash_expandtable(Relation rel, Buffer metabuf); + + +/* hashscan.c */ +extern void _hash_regscan(IndexScanDesc scan); +extern void _hash_dropscan(IndexScanDesc scan); +extern void _hash_adjscans(Relation rel, ItemPointer tid); + + +/* hashsearch.c */ +extern void _hash_search(Relation rel, int keysz, ScanKey scankey, + Buffer *bufP, HashMetaPage metap); +extern RetrieveIndexResult _hash_next(IndexScanDesc scan, ScanDirection dir); +extern RetrieveIndexResult _hash_first(IndexScanDesc scan, ScanDirection dir); +extern bool _hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, + Buffer metabuf); + + +/* hashstrat.c */ +extern StrategyNumber _hash_getstrat(Relation rel, AttrNumber attno, + RegProcedure proc); +extern bool _hash_invokestrat(Relation rel, AttrNumber attno, + StrategyNumber strat, Datum left, Datum right); + + +/* hashutil.c */ +extern ScanKey _hash_mkscankey(Relation rel, IndexTuple itup, + HashMetaPage metap); +extern void _hash_freeskey(ScanKey skey); +extern bool _hash_checkqual(IndexScanDesc scan, IndexTuple itup); +extern HashItem _hash_formitem(IndexTuple itup); +extern Bucket _hash_call(Relation rel, HashMetaPage metap, Datum key); +extern uint32 _hash_log2(uint32 num); +extern void _hash_checkpage(Page page, int flags); + +#endif /* HASH_H */ diff --git a/src/backend/access/hash/Makefile.inc b/src/backend/access/hash/Makefile.inc new file mode 100644 index 00000000000..8ea221bc264 --- /dev/null +++ b/src/backend/access/hash/Makefile.inc @@ -0,0 +1,18 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/hash (hash access method) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= hash.c hashfunc.c hashinsert.c hashovfl.c hashpage.c hashscan.c \ + hashsearch.c hashstrat.c hashutil.c + + + diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c new file mode 100644 index 00000000000..a4a4e16e599 --- /dev/null +++ b/src/backend/access/hash/hash.c @@ -0,0 +1,467 @@ +/*------------------------------------------------------------------------- + * + * hash.c-- + * Implementation of Margo Seltzer's Hashing package for postgres. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * This file contains only the public interface routines. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" +#include "access/heapam.h" +#include "access/genam.h" +#include "access/sdir.h" +#include "access/hash.h" +#include "access/funcindex.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" +#include "executor/executor.h" +#include "executor/tuptable.h" +#include "catalog/index.h" + + +bool BuildingHash = false; + +/* + * hashbuild() -- build a new hash index. + * + * We use a global variable to record the fact that we're creating + * a new index. This is used to avoid high-concurrency locking, + * since the index won't be visible until this transaction commits + * and since building is guaranteed to be single-threaded. + */ +void +hashbuild(Relation heap, + Relation index, + int natts, + AttrNumber *attnum, + IndexStrategy istrat, + uint16 pcount, + Datum *params, + FuncIndexInfo *finfo, + PredInfo *predInfo) +{ + HeapScanDesc hscan; + Buffer buffer; + HeapTuple htup; + IndexTuple itup; + TupleDesc htupdesc, itupdesc; + Datum *attdata; + bool *nulls; + InsertIndexResult res; + int nhtups, nitups; + int i; + HashItem hitem; + ExprContext *econtext; + TupleTable tupleTable; + TupleTableSlot *slot; + Oid hrelid, irelid; + Node *pred, *oldPred; + + /* note that this is a new btree */ + BuildingHash = true; + + pred = predInfo->pred; + oldPred = predInfo->oldPred; + + /* initialize the hash index metadata page (if this is a new index) */ + if (oldPred == NULL) + _hash_metapinit(index); + + /* get tuple descriptors for heap and index relations */ + htupdesc = RelationGetTupleDescriptor(heap); + itupdesc = RelationGetTupleDescriptor(index); + + /* get space for data items that'll appear in the index tuple */ + attdata = (Datum *) palloc(natts * sizeof(Datum)); + nulls = (bool *) palloc(natts * sizeof(bool)); + + /* + * If this is a predicate (partial) index, we will need to evaluate the + * predicate using ExecQual, which requires the current tuple to be in a + * slot of a TupleTable. In addition, ExecQual must have an ExprContext + * referring to that slot. Here, we initialize dummy TupleTable and + * ExprContext objects for this purpose. --Nels, Feb '92 + */ +#ifndef OMIT_PARTIAL_INDEX + if (pred != NULL || oldPred != NULL) { + tupleTable = ExecCreateTupleTable(1); + slot = ExecAllocTableSlot(tupleTable); + econtext = makeNode(ExprContext); + FillDummyExprContext(econtext, slot, htupdesc, buffer); + } +#endif /* OMIT_PARTIAL_INDEX */ + + /* start a heap scan */ + hscan = heap_beginscan(heap, 0, NowTimeQual, 0, (ScanKey) NULL); + htup = heap_getnext(hscan, 0, &buffer); + + /* build the index */ + nhtups = nitups = 0; + + for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer)) { + + nhtups++; + + /* + * If oldPred != NULL, this is an EXTEND INDEX command, so skip + * this tuple if it was already in the existing partial index + */ + if (oldPred != NULL) { + /*SetSlotContents(slot, htup); */ +#ifndef OMIT_PARTIAL_INDEX + slot->val = htup; + if (ExecQual((List*)oldPred, econtext) == true) { + nitups++; + continue; + } +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* Skip this tuple if it doesn't satisfy the partial-index predicate */ + if (pred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /*SetSlotContents(slot, htup); */ + slot->val = htup; + if (ExecQual((List*)pred, econtext) == false) + continue; +#endif /* OMIT_PARTIAL_INDEX */ +} + + nitups++; + + /* + * For the current heap tuple, extract all the attributes + * we use in this index, and note which are null. + */ + for (i = 1; i <= natts; i++) { + int attoff; + bool attnull; + + /* + * Offsets are from the start of the tuple, and are + * zero-based; indices are one-based. The next call + * returns i - 1. That's data hiding for you. + */ + + /* attoff = i - 1 */ + attoff = AttrNumberGetAttrOffset(i); + + /* below, attdata[attoff] set to equal some datum & + * attnull is changed to indicate whether or not the attribute + * is null for this tuple + */ + attdata[attoff] = GetIndexValue(htup, + htupdesc, + attoff, + attnum, + finfo, + &attnull, + buffer); + nulls[attoff] = (attnull ? 'n' : ' '); + } + + /* form an index tuple and point it at the heap tuple */ + itup = index_formtuple(itupdesc, attdata, nulls); + + /* + * If the single index key is null, we don't insert it into + * the index. Hash tables support scans on '='. + * Relational algebra says that A = B + * returns null if either A or B is null. This + * means that no qualification used in an index scan could ever + * return true on a null attribute. It also means that indices + * can't be used by ISNULL or NOTNULL scans, but that's an + * artifact of the strategy map architecture chosen in 1986, not + * of the way nulls are handled here. + */ + + if (itup->t_info & INDEX_NULL_MASK) { + pfree(itup); + continue; + } + + itup->t_tid = htup->t_ctid; + hitem = _hash_formitem(itup); + res = _hash_doinsert(index, hitem); + pfree(hitem); + pfree(itup); + pfree(res); + } + + /* okay, all heap tuples are indexed */ + heap_endscan(hscan); + + if (pred != NULL || oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + ExecDestroyTupleTable(tupleTable, true); + pfree(econtext); +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* + * Since we just counted the tuples in the heap, we update its + * stats in pg_class to guarantee that the planner takes advantage + * of the index we just created. Finally, only update statistics + * during normal index definitions, not for indices on system catalogs + * created during bootstrap processing. We must close the relations + * before updatings statistics to guarantee that the relcache entries + * are flushed when we increment the command counter in UpdateStats(). + */ + if (IsNormalProcessingMode()) + { + hrelid = heap->rd_id; + irelid = index->rd_id; + heap_close(heap); + index_close(index); + UpdateStats(hrelid, nhtups, true); + UpdateStats(irelid, nitups, false); + if (oldPred != NULL) { + if (nitups == nhtups) pred = NULL; + UpdateIndexPredicate(irelid, oldPred, pred); + } + } + + /* be tidy */ + pfree(nulls); + pfree(attdata); + + /* all done */ + BuildingHash = false; +} + +/* + * hashinsert() -- insert an index tuple into a hash table. + * + * Hash on the index tuple's key, find the appropriate location + * for the new tuple, put it there, and return an InsertIndexResult + * to the caller. + */ +InsertIndexResult +hashinsert(Relation rel, IndexTuple itup) +{ + HashItem hitem; + InsertIndexResult res; + + if (itup->t_info & INDEX_NULL_MASK) + return ((InsertIndexResult) NULL); + + hitem = _hash_formitem(itup); + + res = _hash_doinsert(rel, hitem); + + pfree(hitem); + + return (res); +} + + +/* + * hashgettuple() -- Get the next tuple in the scan. + */ +char * +hashgettuple(IndexScanDesc scan, ScanDirection dir) +{ + RetrieveIndexResult res; + + /* + * If we've already initialized this scan, we can just advance it + * in the appropriate direction. If we haven't done so yet, we + * call a routine to get the first item in the scan. + */ + + if (ItemPointerIsValid(&(scan->currentItemData))) + res = _hash_next(scan, dir); + else + res = _hash_first(scan, dir); + + return ((char *) res); +} + + +/* + * hashbeginscan() -- start a scan on a hash index + */ +char * +hashbeginscan(Relation rel, + bool fromEnd, + uint16 keysz, + ScanKey scankey) +{ + IndexScanDesc scan; + HashScanOpaque so; + + scan = RelationGetIndexScan(rel, fromEnd, keysz, scankey); + so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData)); + so->hashso_curbuf = so->hashso_mrkbuf = InvalidBuffer; + scan->opaque = so; + scan->flags = 0x0; + + /* register scan in case we change pages it's using */ + _hash_regscan(scan); + + return ((char *) scan); +} + +/* + * hashrescan() -- rescan an index relation + */ +void +hashrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey) +{ + ItemPointer iptr; + HashScanOpaque so; + + so = (HashScanOpaque) scan->opaque; + + /* we hold a read lock on the current page in the scan */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ); + so->hashso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + _hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ); + so->hashso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* reset the scan key */ + if (scan->numberOfKeys > 0) { + memmove(scan->keyData, + scankey, + scan->numberOfKeys * sizeof(ScanKeyData)); + } +} + +/* + * hashendscan() -- close down a scan + */ +void +hashendscan(IndexScanDesc scan) +{ + + ItemPointer iptr; + HashScanOpaque so; + + so = (HashScanOpaque) scan->opaque; + + /* release any locks we still hold */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ); + so->hashso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + if (BufferIsValid(so->hashso_mrkbuf)) + _hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ); + so->hashso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* don't need scan registered anymore */ + _hash_dropscan(scan); + + /* be tidy */ +#ifdef PERFECT_MMGR + pfree (scan->opaque); +#endif /* PERFECT_MMGR */ +} + +/* + * hashmarkpos() -- save current scan position + * + */ +void +hashmarkpos(IndexScanDesc scan) +{ + ItemPointer iptr; + HashScanOpaque so; + + /* see if we ever call this code. if we do, then so_mrkbuf a + * useful element in the scan->opaque structure. if this procedure + * is never called, so_mrkbuf should be removed from the scan->opaque + * structure. + */ + elog(NOTICE, "Hashmarkpos() called."); + + so = (HashScanOpaque) scan->opaque; + + /* release lock on old marked data, if any */ + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + _hash_relbuf(scan->relation, so->hashso_mrkbuf, HASH_READ); + so->hashso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* bump lock on currentItemData and copy to currentMarkData */ + if (ItemPointerIsValid(&(scan->currentItemData))) { + so->hashso_mrkbuf = _hash_getbuf(scan->relation, + BufferGetBlockNumber(so->hashso_curbuf), + HASH_READ); + scan->currentMarkData = scan->currentItemData; + } +} + +/* + * hashrestrpos() -- restore scan to last saved position + */ +void +hashrestrpos(IndexScanDesc scan) +{ + ItemPointer iptr; + HashScanOpaque so; + + /* see if we ever call this code. if we do, then so_mrkbuf a + * useful element in the scan->opaque structure. if this procedure + * is never called, so_mrkbuf should be removed from the scan->opaque + * structure. + */ + elog(NOTICE, "Hashrestrpos() called."); + + so = (HashScanOpaque) scan->opaque; + + /* release lock on current data, if any */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _hash_relbuf(scan->relation, so->hashso_curbuf, HASH_READ); + so->hashso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* bump lock on currentMarkData and copy to currentItemData */ + if (ItemPointerIsValid(&(scan->currentMarkData))) { + so->hashso_curbuf = + _hash_getbuf(scan->relation, + BufferGetBlockNumber(so->hashso_mrkbuf), + HASH_READ); + + scan->currentItemData = scan->currentMarkData; + } +} + +/* stubs */ +void +hashdelete(Relation rel, ItemPointer tid) +{ + /* adjust any active scans that will be affected by this deletion */ + _hash_adjscans(rel, tid); + + /* delete the data from the page */ + _hash_pagedel(rel, tid); +} + diff --git a/src/backend/access/hash/hashfunc.c b/src/backend/access/hash/hashfunc.c new file mode 100644 index 00000000000..6b37de29911 --- /dev/null +++ b/src/backend/access/hash/hashfunc.c @@ -0,0 +1,276 @@ +/*------------------------------------------------------------------------- + * + * hashfunc.c-- + * Comparison functions for hash access method. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashfunc.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * These functions are stored in pg_amproc. For each operator class + * defined on hash tables, they compute the hash value of the argument. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "utils/nabstime.h" + +uint32 hashint2(int16 key) +{ + return ((uint32) ~key); +} + +uint32 hashint4(uint32 key) +{ + return (~key); +} + +/* Hash function from Chris Torek. */ +uint32 hashfloat4(float32 keyp) +{ + int len; + int loop; + uint32 h; + char *kp = (char *) keyp; + + len = sizeof(float32data); + +#define HASH4a h = (h << 5) - h + *kp++; +#define HASH4b h = (h << 5) + h + *kp++; +#define HASH4 HASH4b + + + h = 0; + if (len > 0) { + loop = (len + 8 - 1) >> 3; + + switch (len & (8 - 1)) { + case 0: + do { /* All fall throughs */ + HASH4; + case 7: + HASH4; + case 6: + HASH4; + case 5: + HASH4; + case 4: + HASH4; + case 3: + HASH4; + case 2: + HASH4; + case 1: + HASH4; + } while (--loop); + } + } + return (h); +} + + +uint32 hashfloat8(float64 keyp) +{ + int len; + int loop; + uint32 h; + char *kp = (char *) keyp; + + len = sizeof(float64data); + +#define HASH4a h = (h << 5) - h + *kp++; +#define HASH4b h = (h << 5) + h + *kp++; +#define HASH4 HASH4b + + + h = 0; + if (len > 0) { + loop = (len + 8 - 1) >> 3; + + switch (len & (8 - 1)) { + case 0: + do { /* All fall throughs */ + HASH4; + case 7: + HASH4; + case 6: + HASH4; + case 5: + HASH4; + case 4: + HASH4; + case 3: + HASH4; + case 2: + HASH4; + case 1: + HASH4; + } while (--loop); + } + } + return (h); +} + + +uint32 hashoid(Oid key) +{ + return ((uint32) ~key); +} + + +uint32 hashchar(char key) +{ + int len; + uint32 h; + + len = sizeof(char); + +#define PRIME1 37 +#define PRIME2 1048583 + + h = 0; + /* Convert char to integer */ + h = h * PRIME1 ^ (key - ' '); + h %= PRIME2; + + return (h); +} + +uint32 hashchar2(uint16 intkey) +{ + uint32 h; + int len; + char *key = (char *) &intkey; + + h = 0; + len = sizeof(uint16); + /* Convert string to integer */ + while (len--) + h = h * PRIME1 ^ (*key++ - ' '); + h %= PRIME2; + + return (h); +} + +uint32 hashchar4(uint32 intkey) +{ + uint32 h; + int len; + char *key = (char *) &intkey; + + h = 0; + len = sizeof(uint32); + /* Convert string to integer */ + while (len--) + h = h * PRIME1 ^ (*key++ - ' '); + h %= PRIME2; + + return (h); +} + +uint32 hashchar8(char *key) +{ + uint32 h; + int len; + + h = 0; + len = sizeof(char8); + /* Convert string to integer */ + while (len--) + h = h * PRIME1 ^ (*key++ - ' '); + h %= PRIME2; + + return (h); +} + +uint32 hashname(NameData *n) +{ + uint32 h; + int len; + char *key; + + key = n->data; + + h = 0; + len = NAMEDATALEN; + /* Convert string to integer */ + while (len--) + h = h * PRIME1 ^ (*key++ - ' '); + h %= PRIME2; + + return (h); +} + + +uint32 hashchar16(char *key) +{ + uint32 h; + int len; + + h = 0; + len = sizeof(char16); + /* Convert string to integer */ + while (len--) + h = h * PRIME1 ^ (*key++ - ' '); + h %= PRIME2; + + return (h); +} + + +/* + * (Comment from the original db3 hashing code: ) + * + * "This is INCREDIBLY ugly, but fast. We break the string up into 8 byte + * units. On the first time through the loop we get the 'leftover bytes' + * (strlen % 8). On every other iteration, we perform 8 HASHC's so we handle + * all 8 bytes. Essentially, this saves us 7 cmp & branch instructions. If + * this routine is heavily used enough, it's worth the ugly coding. + * + * "OZ's original sdbm hash" + */ +uint32 hashtext(struct varlena *key) +{ + int keylen; + char *keydata; + uint32 n; + int loop; + + keydata = VARDATA(key); + keylen = VARSIZE(key); + + /* keylen includes the four bytes in which string keylength is stored */ + keylen -= sizeof(VARSIZE(key)); + +#define HASHC n = *keydata++ + 65599 * n + + n = 0; + if (keylen > 0) { + loop = (keylen + 8 - 1) >> 3; + + switch (keylen & (8 - 1)) { + case 0: + do { /* All fall throughs */ + HASHC; + case 7: + HASHC; + case 6: + HASHC; + case 5: + HASHC; + case 4: + HASHC; + case 3: + HASHC; + case 2: + HASHC; + case 1: + HASHC; + } while (--loop); + } + } + return (n); +} diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c new file mode 100644 index 00000000000..c514cc614d8 --- /dev/null +++ b/src/backend/access/hash/hashinsert.c @@ -0,0 +1,239 @@ +/*------------------------------------------------------------------------- + * + * hashinsert.c-- + * Item insertion in hash tables for Postgres. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/hash.h" + +static InsertIndexResult _hash_insertonpg(Relation rel, Buffer buf, int keysz, ScanKey scankey, HashItem hitem, Buffer metabuf); +static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, HashItem hitem); + +/* + * _hash_doinsert() -- Handle insertion of a single HashItem in the table. + * + * This routine is called by the public interface routines, hashbuild + * and hashinsert. By here, hashitem is filled in, and has a unique + * (xid, seqno) pair. The datum to be used as a "key" is in the + * hashitem. + */ +InsertIndexResult +_hash_doinsert(Relation rel, HashItem hitem) +{ + Buffer buf; + Buffer metabuf; + BlockNumber blkno; + HashMetaPage metap; + IndexTuple itup; + InsertIndexResult res; + ScanKey itup_scankey; + int natts; + Page page; + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ); + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + /* we need a scan key to do our search, so build one */ + itup = &(hitem->hash_itup); + if ((natts = rel->rd_rel->relnatts) != 1) + elog(WARN, "Hash indices valid for only one index key."); + itup_scankey = _hash_mkscankey(rel, itup, metap); + + /* + * find the first page in the bucket chain containing this key and + * place it in buf. _hash_search obtains a read lock for us. + */ + _hash_search(rel, natts, itup_scankey, &buf, metap); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE); + + /* + * trade in our read lock for a write lock so that we can do the + * insertion. + */ + blkno = BufferGetBlockNumber(buf); + _hash_relbuf(rel, buf, HASH_READ); + buf = _hash_getbuf(rel, blkno, HASH_WRITE); + + + /* + * XXX btree comment (haven't decided what to do in hash): don't + * think the bucket can be split while we're reading the metapage. + * + * If the page was split between the time that we surrendered our + * read lock and acquired our write lock, then this page may no + * longer be the right place for the key we want to insert. + */ + + /* do the insertion */ + res = _hash_insertonpg(rel, buf, natts, itup_scankey, + hitem, metabuf); + + /* be tidy */ + _hash_freeskey(itup_scankey); + + return (res); +} + +/* + * _hash_insertonpg() -- Insert a tuple on a particular page in the table. + * + * This recursive procedure does the following things: + * + * + if necessary, splits the target page. + * + inserts the tuple. + * + * On entry, we must have the right buffer on which to do the + * insertion, and the buffer must be pinned and locked. On return, + * we will have dropped both the pin and the write lock on the buffer. + * + */ +static InsertIndexResult +_hash_insertonpg(Relation rel, + Buffer buf, + int keysz, + ScanKey scankey, + HashItem hitem, + Buffer metabuf) +{ + InsertIndexResult res; + Page page; + BlockNumber itup_blkno; + OffsetNumber itup_off; + int itemsz; + HashPageOpaque pageopaque; + bool do_expand = false; + Buffer ovflbuf; + HashMetaPage metap; + Bucket bucket; + + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); + bucket = pageopaque->hasho_bucket; + + itemsz = IndexTupleDSize(hitem->hash_itup) + + (sizeof(HashItemData) - sizeof(IndexTupleData)); + itemsz = DOUBLEALIGN(itemsz); + + while (PageGetFreeSpace(page) < itemsz) { + /* + * no space on this page; check for an overflow page + */ + if (BlockNumberIsValid(pageopaque->hasho_nextblkno)) { + /* + * ovfl page exists; go get it. if it doesn't have room, + * we'll find out next pass through the loop test above. + */ + ovflbuf = _hash_getbuf(rel, pageopaque->hasho_nextblkno, + HASH_WRITE); + _hash_relbuf(rel, buf, HASH_WRITE); + buf = ovflbuf; + page = BufferGetPage(buf); + } else { + /* + * we're at the end of the bucket chain and we haven't + * found a page with enough room. allocate a new overflow + * page. + */ + do_expand = true; + ovflbuf = _hash_addovflpage(rel, &metabuf, buf); + _hash_relbuf(rel, buf, HASH_WRITE); + buf = ovflbuf; + page = BufferGetPage(buf); + + if (PageGetFreeSpace(page) < itemsz) { + /* it doesn't fit on an empty page -- give up */ + elog(WARN, "hash item too large"); + } + } + _hash_checkpage(page, LH_OVERFLOW_PAGE); + pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); + Assert(pageopaque->hasho_bucket == bucket); + } + + itup_off = _hash_pgaddtup(rel, buf, keysz, scankey, itemsz, hitem); + itup_blkno = BufferGetBlockNumber(buf); + + /* by here, the new tuple is inserted */ + res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); + + ItemPointerSet(&(res->pointerData), itup_blkno, itup_off); + + if (res != NULL) { + /* + * Increment the number of keys in the table. + * We switch lock access type just for a moment + * to allow greater accessibility to the metapage. + */ + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, + HASH_READ, HASH_WRITE); + metap->hashm_nkeys += 1; + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, + HASH_WRITE, HASH_READ); + + } + + _hash_wrtbuf(rel, buf); + + if (do_expand || + (metap->hashm_nkeys / (metap->hashm_maxbucket + 1)) + > metap->hashm_ffactor) { + _hash_expandtable(rel, metabuf); + } + _hash_relbuf(rel, metabuf, HASH_READ); + return (res); +} + +/* + * _hash_pgaddtup() -- add a tuple to a particular page in the index. + * + * This routine adds the tuple to the page as requested, and keeps the + * write lock and reference associated with the page's buffer. It is + * an error to call pgaddtup() without a write lock and reference. + */ +static OffsetNumber +_hash_pgaddtup(Relation rel, + Buffer buf, + int keysz, + ScanKey itup_scankey, + Size itemsize, + HashItem hitem) +{ + OffsetNumber itup_off; + Page page; + + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + + itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page)); + (void) PageAddItem(page, (Item) hitem, itemsize, itup_off, LP_USED); + + /* write the buffer, but hold our lock */ + _hash_wrtnorelbuf(rel, buf); + + return (itup_off); +} diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c new file mode 100644 index 00000000000..55ee9e9ce79 --- /dev/null +++ b/src/backend/access/hash/hashovfl.c @@ -0,0 +1,614 @@ +/*------------------------------------------------------------------------- + * + * hashovfl.c-- + * Overflow page management code for the Postgres hash access method + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * Overflow pages look like ordinary relation pages. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/genam.h" +#include "access/hash.h" + +static OverflowPageAddress _hash_getovfladdr(Relation rel, Buffer *metabufp); +static uint32 _hash_firstfreebit(uint32 map); + +/* + * _hash_addovflpage + * + * Add an overflow page to the page currently pointed to by the buffer + * argument 'buf'. + * + * *Metabufp has a read lock upon entering the function; buf has a + * write lock. + * + */ +Buffer +_hash_addovflpage(Relation rel, Buffer *metabufp, Buffer buf) +{ + + OverflowPageAddress oaddr; + BlockNumber ovflblkno; + Buffer ovflbuf; + HashMetaPage metap; + HashPageOpaque ovflopaque; + HashPageOpaque pageopaque; + Page page; + Page ovflpage; + + /* this had better be the last page in a bucket chain */ + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + pageopaque = (HashPageOpaque) PageGetSpecialPointer(page); + Assert(!BlockNumberIsValid(pageopaque->hasho_nextblkno)); + + metap = (HashMetaPage) BufferGetPage(*metabufp); + _hash_checkpage((Page) metap, LH_META_PAGE); + + /* allocate an empty overflow page */ + oaddr = _hash_getovfladdr(rel, metabufp); + if (oaddr == InvalidOvflAddress) { + elog(WARN, "_hash_addovflpage: problem with _hash_getovfladdr."); + } + ovflblkno = OADDR_TO_BLKNO(OADDR_OF(SPLITNUM(oaddr), OPAGENUM(oaddr))); + Assert(BlockNumberIsValid(ovflblkno)); + ovflbuf = _hash_getbuf(rel, ovflblkno, HASH_WRITE); + Assert(BufferIsValid(ovflbuf)); + ovflpage = BufferGetPage(ovflbuf); + + /* initialize the new overflow page */ + _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf)); + ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); + ovflopaque->hasho_prevblkno = BufferGetBlockNumber(buf); + ovflopaque->hasho_nextblkno = InvalidBlockNumber; + ovflopaque->hasho_flag = LH_OVERFLOW_PAGE; + ovflopaque->hasho_oaddr = oaddr; + ovflopaque->hasho_bucket = pageopaque->hasho_bucket; + _hash_wrtnorelbuf(rel, ovflbuf); + + /* logically chain overflow page to previous page */ + pageopaque->hasho_nextblkno = ovflblkno; + _hash_wrtnorelbuf(rel, buf); + return (ovflbuf); +} + +/* + * _hash_getovfladdr() + * + * Find an available overflow page and return its address. + * + * When we enter this function, we have a read lock on *metabufp which + * we change to a write lock immediately. Before exiting, the write lock + * is exchanged for a read lock. + * + */ +static OverflowPageAddress +_hash_getovfladdr(Relation rel, Buffer *metabufp) +{ + HashMetaPage metap; + Buffer mapbuf; + BlockNumber blkno; + PageOffset offset; + OverflowPageAddress oaddr; + SplitNumber splitnum; + uint32 *freep; + uint32 max_free; + uint32 bit; + uint32 first_page; + uint32 free_bit; + uint32 free_page; + uint32 in_use_bits; + uint32 i, j; + + metap = (HashMetaPage) _hash_chgbufaccess(rel, metabufp, HASH_READ, HASH_WRITE); + + splitnum = metap->OVFL_POINT; + max_free = metap->SPARES[splitnum]; + + free_page = (max_free - 1) >> (metap->BSHIFT + BYTE_TO_BIT); + free_bit = (max_free - 1) & (BMPGSZ_BIT(metap) - 1); + + /* Look through all the free maps to find the first free block */ + first_page = metap->LAST_FREED >> (metap->BSHIFT + BYTE_TO_BIT); + for ( i = first_page; i <= free_page; i++ ) { + Page mappage; + + blkno = metap->hashm_mapp[i]; + mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE); + mappage = BufferGetPage(mapbuf); + _hash_checkpage(mappage, LH_BITMAP_PAGE); + freep = HashPageGetBitmap(mappage); + Assert(freep); + + if (i == free_page) + in_use_bits = free_bit; + else + in_use_bits = BMPGSZ_BIT(metap) - 1; + + if (i == first_page) { + bit = metap->LAST_FREED & (BMPGSZ_BIT(metap) - 1); + j = bit / BITS_PER_MAP; + bit = bit & ~(BITS_PER_MAP - 1); + } else { + bit = 0; + j = 0; + } + for (; bit <= in_use_bits; j++, bit += BITS_PER_MAP) + if (freep[j] != ALL_SET) + goto found; + } + + /* No Free Page Found - have to allocate a new page */ + metap->LAST_FREED = metap->SPARES[splitnum]; + metap->SPARES[splitnum]++; + offset = metap->SPARES[splitnum] - + (splitnum ? metap->SPARES[splitnum - 1] : 0); + +#define OVMSG "HASH: Out of overflow pages. Out of luck.\n" + + if (offset > SPLITMASK) { + if (++splitnum >= NCACHED) { + elog(WARN, OVMSG); + } + metap->OVFL_POINT = splitnum; + metap->SPARES[splitnum] = metap->SPARES[splitnum-1]; + metap->SPARES[splitnum-1]--; + offset = 0; + } + + /* Check if we need to allocate a new bitmap page */ + if (free_bit == BMPGSZ_BIT(metap) - 1) { + /* won't be needing old map page */ + + _hash_relbuf(rel, mapbuf, HASH_WRITE); + + free_page++; + if (free_page >= NCACHED) { + elog(WARN, OVMSG); + } + + /* + * This is tricky. The 1 indicates that you want the new page + * allocated with 1 clear bit. Actually, you are going to + * allocate 2 pages from this map. The first is going to be + * the map page, the second is the overflow page we were + * looking for. The init_bitmap routine automatically, sets + * the first bit of itself to indicate that the bitmap itself + * is in use. We would explicitly set the second bit, but + * don't have to if we tell init_bitmap not to leave it clear + * in the first place. + */ + if (_hash_initbitmap(rel, metap, OADDR_OF(splitnum, offset), + 1, free_page)) { + elog(WARN, "overflow_page: problem with _hash_initbitmap."); + } + metap->SPARES[splitnum]++; + offset++; + if (offset > SPLITMASK) { + if (++splitnum >= NCACHED) { + elog(WARN, OVMSG); + } + metap->OVFL_POINT = splitnum; + metap->SPARES[splitnum] = metap->SPARES[splitnum-1]; + metap->SPARES[splitnum-1]--; + offset = 0; + } + } else { + + /* + * Free_bit addresses the last used bit. Bump it to address + * the first available bit. + */ + free_bit++; + SETBIT(freep, free_bit); + _hash_wrtbuf(rel, mapbuf); + } + + /* Calculate address of the new overflow page */ + oaddr = OADDR_OF(splitnum, offset); + _hash_chgbufaccess(rel, metabufp, HASH_WRITE, HASH_READ); + return (oaddr); + + found: + bit = bit + _hash_firstfreebit(freep[j]); + SETBIT(freep, bit); + _hash_wrtbuf(rel, mapbuf); + + /* + * Bits are addressed starting with 0, but overflow pages are addressed + * beginning at 1. Bit is a bit addressnumber, so we need to increment + * it to convert it to a page number. + */ + + bit = 1 + bit + (i * BMPGSZ_BIT(metap)); + if (bit >= metap->LAST_FREED) { + metap->LAST_FREED = bit - 1; + } + + /* Calculate the split number for this page */ + for (i = 0; (i < splitnum) && (bit > metap->SPARES[i]); i++) + ; + offset = (i ? bit - metap->SPARES[i - 1] : bit); + if (offset >= SPLITMASK) { + elog(WARN, OVMSG); + } + + /* initialize this page */ + oaddr = OADDR_OF(i, offset); + _hash_chgbufaccess(rel, metabufp, HASH_WRITE, HASH_READ); + return (oaddr); +} + +/* + * _hash_firstfreebit() + * + * Return the first bit that is not set in the argument 'map'. This + * function is used to find an available overflow page within a + * splitnumber. + * + */ +static uint32 +_hash_firstfreebit(uint32 map) +{ + uint32 i, mask; + + mask = 0x1; + for (i = 0; i < BITS_PER_MAP; i++) { + if (!(mask & map)) + return (i); + mask = mask << 1; + } + return (i); +} + +/* + * _hash_freeovflpage() - + * + * Mark this overflow page as free and return a buffer with + * the page that follows it (which may be defined as + * InvalidBuffer). + * + */ +Buffer +_hash_freeovflpage(Relation rel, Buffer ovflbuf) +{ + HashMetaPage metap; + Buffer metabuf; + Buffer mapbuf; + BlockNumber prevblkno; + BlockNumber blkno; + BlockNumber nextblkno; + HashPageOpaque ovflopaque; + Page ovflpage; + Page mappage; + OverflowPageAddress addr; + SplitNumber splitnum; + uint32 *freep; + uint32 ovflpgno; + int32 bitmappage, bitmapbit; + Bucket bucket; + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE); + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + ovflpage = BufferGetPage(ovflbuf); + _hash_checkpage(ovflpage, LH_OVERFLOW_PAGE); + ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage); + addr = ovflopaque->hasho_oaddr; + nextblkno = ovflopaque->hasho_nextblkno; + prevblkno = ovflopaque->hasho_prevblkno; + bucket = ovflopaque->hasho_bucket; + (void) memset(ovflpage, 0, BufferGetPageSize(ovflbuf)); + _hash_wrtbuf(rel, ovflbuf); + + /* + * fix up the bucket chain. this is a doubly-linked list, so we + * must fix up the bucket chain members behind and ahead of the + * overflow page being deleted. + * + * XXX this should look like: + * - lock prev/next + * - modify/write prev/next (how to do write ordering with a + * doubly-linked list???) + * - unlock prev/next + */ + if (BlockNumberIsValid(prevblkno)) { + Buffer prevbuf = _hash_getbuf(rel, prevblkno, HASH_WRITE); + Page prevpage = BufferGetPage(prevbuf); + HashPageOpaque prevopaque = + (HashPageOpaque) PageGetSpecialPointer(prevpage); + + _hash_checkpage(prevpage, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + Assert(prevopaque->hasho_bucket == bucket); + prevopaque->hasho_nextblkno = nextblkno; + _hash_wrtbuf(rel, prevbuf); + } + if (BlockNumberIsValid(nextblkno)) { + Buffer nextbuf = _hash_getbuf(rel, nextblkno, HASH_WRITE); + Page nextpage = BufferGetPage(nextbuf); + HashPageOpaque nextopaque = + (HashPageOpaque) PageGetSpecialPointer(nextpage); + + _hash_checkpage(nextpage, LH_OVERFLOW_PAGE); + Assert(nextopaque->hasho_bucket == bucket); + nextopaque->hasho_prevblkno = prevblkno; + _hash_wrtbuf(rel, nextbuf); + } + + /* + * Fix up the overflow page bitmap that tracks this particular + * overflow page. The bitmap can be found in the MetaPageData + * array element hashm_mapp[bitmappage]. + */ + splitnum = (addr >> SPLITSHIFT); + ovflpgno = + (splitnum ? metap->SPARES[splitnum - 1] : 0) + (addr & SPLITMASK) - 1; + + if (ovflpgno < metap->LAST_FREED) { + metap->LAST_FREED = ovflpgno; + } + + bitmappage = (ovflpgno >> (metap->BSHIFT + BYTE_TO_BIT)); + bitmapbit = ovflpgno & (BMPGSZ_BIT(metap) - 1); + + blkno = metap->hashm_mapp[bitmappage]; + mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE); + mappage = BufferGetPage(mapbuf); + _hash_checkpage(mappage, LH_BITMAP_PAGE); + freep = HashPageGetBitmap(mappage); + CLRBIT(freep, bitmapbit); + _hash_wrtbuf(rel, mapbuf); + + _hash_relbuf(rel, metabuf, HASH_WRITE); + + /* + * now instantiate the page that replaced this one, + * if it exists, and return that buffer with a write lock. + */ + if (BlockNumberIsValid(nextblkno)) { + return (_hash_getbuf(rel, nextblkno, HASH_WRITE)); + } else { + return (InvalidBuffer); + } +} + + +/* + * _hash_initbitmap() + * + * Initialize a new bitmap page. The metapage has a write-lock upon + * entering the function. + * + * 'pnum' is the OverflowPageAddress of the new bitmap page. + * 'nbits' is how many bits to clear (i.e., make available) in the new + * bitmap page. the remainder of the bits (as well as the first bit, + * representing the bitmap page itself) will be set. + * 'ndx' is the 0-based offset of the new bitmap page within the + * metapage's array of bitmap page OverflowPageAddresses. + */ + +#define INT_MASK ((1 << INT_TO_BIT) -1) + +int32 +_hash_initbitmap(Relation rel, + HashMetaPage metap, + int32 pnum, + int32 nbits, + int32 ndx) +{ + Buffer buf; + BlockNumber blkno; + Page pg; + HashPageOpaque op; + uint32 *freep; + int clearbytes, clearints; + + blkno = OADDR_TO_BLKNO(pnum); + buf = _hash_getbuf(rel, blkno, HASH_WRITE); + pg = BufferGetPage(buf); + _hash_pageinit(pg, BufferGetPageSize(buf)); + op = (HashPageOpaque) PageGetSpecialPointer(pg); + op->hasho_oaddr = InvalidOvflAddress; + op->hasho_prevblkno = InvalidBlockNumber; + op->hasho_nextblkno = InvalidBlockNumber; + op->hasho_flag = LH_BITMAP_PAGE; + op->hasho_bucket = -1; + + freep = HashPageGetBitmap(pg); + + /* set all of the bits above 'nbits' to 1 */ + clearints = ((nbits - 1) >> INT_TO_BIT) + 1; + clearbytes = clearints << INT_TO_BYTE; + (void) memset((char *) freep, 0, clearbytes); + (void) memset(((char *) freep) + clearbytes, 0xFF, + BMPGSZ_BYTE(metap) - clearbytes); + freep[clearints - 1] = ALL_SET << (nbits & INT_MASK); + + /* bit 0 represents the new bitmap page */ + SETBIT(freep, 0); + + /* metapage already has a write lock */ + metap->hashm_nmaps++; + metap->hashm_mapp[ndx] = blkno; + + /* write out the new bitmap page (releasing its locks) */ + _hash_wrtbuf(rel, buf); + + return (0); +} + + +/* + * _hash_squeezebucket(rel, bucket) + * + * Try to squeeze the tuples onto pages occuring earlier in the + * bucket chain in an attempt to free overflow pages. When we start + * the "squeezing", the page from which we start taking tuples (the + * "read" page) is the last bucket in the bucket chain and the page + * onto which we start squeezing tuples (the "write" page) is the + * first page in the bucket chain. The read page works backward and + * the write page works forward; the procedure terminates when the + * read page and write page are the same page. + */ +void +_hash_squeezebucket(Relation rel, + HashMetaPage metap, + Bucket bucket) +{ + Buffer wbuf; + Buffer rbuf; + BlockNumber wblkno; + BlockNumber rblkno; + Page wpage; + Page rpage; + HashPageOpaque wopaque; + HashPageOpaque ropaque; + OffsetNumber woffnum; + OffsetNumber roffnum; + HashItem hitem; + int itemsz; + +/* elog(DEBUG, "_hash_squeezebucket: squeezing bucket %d", bucket); */ + + /* + * start squeezing into the base bucket page. + */ + wblkno = BUCKET_TO_BLKNO(bucket); + wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE); + wpage = BufferGetPage(wbuf); + _hash_checkpage(wpage, LH_BUCKET_PAGE); + wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage); + + /* + * if there aren't any overflow pages, there's nothing to squeeze. + */ + if (!BlockNumberIsValid(wopaque->hasho_nextblkno)) { + _hash_relbuf(rel, wbuf, HASH_WRITE); + return; + } + + /* + * find the last page in the bucket chain by starting at the base + * bucket page and working forward. + * + * XXX if chains tend to be long, we should probably move forward + * using HASH_READ and then _hash_chgbufaccess to HASH_WRITE when + * we reach the end. if they are short we probably don't care + * very much. if the hash function is working at all, they had + * better be short.. + */ + ropaque = wopaque; + do { + rblkno = ropaque->hasho_nextblkno; + if (ropaque != wopaque) { + _hash_relbuf(rel, rbuf, HASH_WRITE); + } + rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE); + rpage = BufferGetPage(rbuf); + _hash_checkpage(rpage, LH_OVERFLOW_PAGE); + Assert(!PageIsEmpty(rpage)); + ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage); + Assert(ropaque->hasho_bucket == bucket); + } while (BlockNumberIsValid(ropaque->hasho_nextblkno)); + + /* + * squeeze the tuples. + */ + roffnum = FirstOffsetNumber; + for(;;) { + hitem = (HashItem) PageGetItem(rpage, PageGetItemId(rpage, roffnum)); + itemsz = IndexTupleDSize(hitem->hash_itup) + + (sizeof(HashItemData) - sizeof(IndexTupleData)); + itemsz = DOUBLEALIGN(itemsz); + + /* + * walk up the bucket chain, looking for a page big enough for + * this item. + */ + while (PageGetFreeSpace(wpage) < itemsz) { + wblkno = wopaque->hasho_nextblkno; + + _hash_wrtbuf(rel, wbuf); + + if (!BlockNumberIsValid(wblkno) || (rblkno == wblkno)) { + _hash_wrtbuf(rel, rbuf); + /* wbuf is already released */ + return; + } + + wbuf = _hash_getbuf(rel, wblkno, HASH_WRITE); + wpage = BufferGetPage(wbuf); + _hash_checkpage(wpage, LH_OVERFLOW_PAGE); + Assert(!PageIsEmpty(wpage)); + wopaque = (HashPageOpaque) PageGetSpecialPointer(wpage); + Assert(wopaque->hasho_bucket == bucket); + } + + /* + * if we're here, we have found room so insert on the "write" + * page. + */ + woffnum = OffsetNumberNext(PageGetMaxOffsetNumber(wpage)); + (void) PageAddItem(wpage, (Item) hitem, itemsz, woffnum, LP_USED); + + /* + * delete the tuple from the "read" page. + * PageIndexTupleDelete repacks the ItemId array, so 'roffnum' + * will be "advanced" to the "next" ItemId. + */ + PageIndexTupleDelete(rpage, roffnum); + _hash_wrtnorelbuf(rel, rbuf); + + /* + * if the "read" page is now empty because of the deletion, + * free it. + */ + if (PageIsEmpty(rpage) && (ropaque->hasho_flag & LH_OVERFLOW_PAGE)) { + rblkno = ropaque->hasho_prevblkno; + Assert(BlockNumberIsValid(rblkno)); + + /* + * free this overflow page. the extra _hash_relbuf is + * because _hash_freeovflpage gratuitously returns the + * next page (we want the previous page and will get it + * ourselves later). + */ + rbuf = _hash_freeovflpage(rel, rbuf); + if (BufferIsValid(rbuf)) { + _hash_relbuf(rel, rbuf, HASH_WRITE); + } + + if (rblkno == wblkno) { + /* rbuf is already released */ + _hash_wrtbuf(rel, wbuf); + return; + } + + rbuf = _hash_getbuf(rel, rblkno, HASH_WRITE); + rpage = BufferGetPage(rbuf); + _hash_checkpage(rpage, LH_OVERFLOW_PAGE); + Assert(!PageIsEmpty(rpage)); + ropaque = (HashPageOpaque) PageGetSpecialPointer(rpage); + Assert(ropaque->hasho_bucket == bucket); + + roffnum = FirstOffsetNumber; + } + } +} diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c new file mode 100644 index 00000000000..2c6ebed8350 --- /dev/null +++ b/src/backend/access/hash/hashpage.c @@ -0,0 +1,669 @@ +/*------------------------------------------------------------------------- + * + * hashpage.c-- + * Hash table page management code for the Postgres hash access method + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * Postgres hash pages look like ordinary relation pages. The opaque + * data at high addresses includes information about the page including + * whether a page is an overflow page or a true bucket, the block + * numbers of the preceding and following pages, and the overflow + * address of the page if it is an overflow page. + * + * The first page in a hash relation, page zero, is special -- it stores + * information describing the hash table; it is referred to as teh + * "meta page." Pages one and higher store the actual data. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/genam.h" +#include "access/hash.h" + +static void _hash_setpagelock(Relation rel, BlockNumber blkno, int access); +static void _hash_unsetpagelock(Relation rel, BlockNumber blkno, int access); +static void _hash_splitpage(Relation rel, Buffer metabuf, Bucket obucket, Bucket nbucket); + +/* + * We use high-concurrency locking on hash indices. There are two cases in + * which we don't do locking. One is when we're building the index. + * Since the creating transaction has not committed, no one can see + * the index, and there's no reason to share locks. The second case + * is when we're just starting up the database system. We use some + * special-purpose initialization code in the relation cache manager + * (see utils/cache/relcache.c) to allow us to do indexed scans on + * the system catalogs before we'd normally be able to. This happens + * before the lock table is fully initialized, so we can't use it. + * Strictly speaking, this violates 2pl, but we don't do 2pl on the + * system catalogs anyway. + */ + + +#define USELOCKING (!BuildingHash && !IsInitProcessingMode()) + + +/* + * _hash_metapinit() -- Initialize the metadata page of a hash index, + * the two buckets that we begin with and the initial + * bitmap page. + */ +void +_hash_metapinit(Relation rel) +{ + HashMetaPage metap; + HashPageOpaque pageopaque; + Buffer metabuf; + Buffer buf; + Page pg; + int nbuckets; + uint32 nelem; /* number elements */ + uint32 lg2nelem; /* _hash_log2(nelem) */ + uint32 nblocks; + uint16 i; + + /* can't be sharing this with anyone, now... */ + if (USELOCKING) + RelationSetLockForWrite(rel); + + if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0) { + elog(WARN, "Cannot initialize non-empty hash table %s", + RelationGetRelationName(rel)); + } + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE); + pg = BufferGetPage(metabuf); + metap = (HashMetaPage) pg; + _hash_pageinit(pg, BufferGetPageSize(metabuf)); + + metap->hashm_magic = HASH_MAGIC; + metap->hashm_version = HASH_VERSION; + metap->hashm_nkeys = 0; + metap->hashm_nmaps = 0; + metap->hashm_ffactor = DEFAULT_FFACTOR; + metap->hashm_bsize = BufferGetPageSize(metabuf); + metap->hashm_bshift = _hash_log2(metap->hashm_bsize); + for (i = metap->hashm_bshift; i > 0; --i) { + if ((1 << i) < (metap->hashm_bsize - + (DOUBLEALIGN(sizeof(PageHeaderData)) + + DOUBLEALIGN(sizeof(HashPageOpaqueData))))) { + break; + } + } + Assert(i); + metap->hashm_bmsize = 1 << i; + metap->hashm_procid = index_getprocid(rel, 1, HASHPROC); + + /* + * Make nelem = 2 rather than 0 so that we end up allocating space + * for the next greater power of two number of buckets. + */ + nelem = 2; + lg2nelem = 1; /*_hash_log2(MAX(nelem, 2)) */ + nbuckets = 2; /*1 << lg2nelem */ + + memset((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares)); + memset((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp)); + + metap->hashm_spares[lg2nelem] = 2; /* lg2nelem + 1 */ + metap->hashm_spares[lg2nelem + 1] = 2; /* lg2nelem + 1 */ + metap->hashm_ovflpoint = 1; /* lg2nelem */ + metap->hashm_lastfreed = 2; + + metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */ + metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */ + + pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg); + pageopaque->hasho_oaddr = InvalidOvflAddress; + pageopaque->hasho_prevblkno = InvalidBlockNumber; + pageopaque->hasho_nextblkno = InvalidBlockNumber; + pageopaque->hasho_flag = LH_META_PAGE; + pageopaque->hasho_bucket = -1; + + /* + * First bitmap page is at: splitpoint lg2nelem page offset 1 which + * turns out to be page 3. Couldn't initialize page 3 until we created + * the first two buckets above. + */ + if (_hash_initbitmap(rel, metap, OADDR_OF(lg2nelem, 1), lg2nelem + 1, 0)) + elog(WARN, "Problem with _hash_initbitmap."); + + /* all done */ + _hash_wrtnorelbuf(rel, metabuf); + + /* + * initialize the first two buckets + */ + for (i = 0; i <= 1; i++) { + buf = _hash_getbuf(rel, BUCKET_TO_BLKNO(i), HASH_WRITE); + pg = BufferGetPage(buf); + _hash_pageinit(pg, BufferGetPageSize(buf)); + pageopaque = (HashPageOpaque) PageGetSpecialPointer(pg); + pageopaque->hasho_oaddr = InvalidOvflAddress; + pageopaque->hasho_prevblkno = InvalidBlockNumber; + pageopaque->hasho_nextblkno = InvalidBlockNumber; + pageopaque->hasho_flag = LH_BUCKET_PAGE; + pageopaque->hasho_bucket = i; + _hash_wrtbuf(rel, buf); + } + + _hash_relbuf(rel, metabuf, HASH_WRITE); + + if (USELOCKING) + RelationUnsetLockForWrite(rel); +} + +/* + * _hash_getbuf() -- Get a buffer by block number for read or write. + * + * When this routine returns, the appropriate lock is set on the + * requested buffer its reference count is correct. + * + * XXX P_NEW is not used because, unlike the tree structures, we + * need the bucket blocks to be at certain block numbers. we must + * depend on the caller to call _hash_pageinit on the block if it + * knows that this is a new block. + */ +Buffer +_hash_getbuf(Relation rel, BlockNumber blkno, int access) +{ + Buffer buf; + + if (blkno == P_NEW) { + elog(WARN, "_hash_getbuf: internal error: hash AM does not use P_NEW"); + } + switch (access) { + case HASH_WRITE: + case HASH_READ: + _hash_setpagelock(rel, blkno, access); + break; + default: + elog(WARN, "_hash_getbuf: invalid access (%d) on new blk: %.*s", + access, NAMEDATALEN, RelationGetRelationName(rel)); + break; + } + buf = ReadBuffer(rel, blkno); + + /* ref count and lock type are correct */ + return (buf); +} + +/* + * _hash_relbuf() -- release a locked buffer. + */ +void +_hash_relbuf(Relation rel, Buffer buf, int access) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + + switch (access) { + case HASH_WRITE: + case HASH_READ: + _hash_unsetpagelock(rel, blkno, access); + break; + default: + elog(WARN, "_hash_relbuf: invalid access (%d) on blk %x: %.*s", + access, blkno, NAMEDATALEN, RelationGetRelationName(rel)); + } + + ReleaseBuffer(buf); +} + +/* + * _hash_wrtbuf() -- write a hash page to disk. + * + * This routine releases the lock held on the buffer and our reference + * to it. It is an error to call _hash_wrtbuf() without a write lock + * or a reference to the buffer. + */ +void +_hash_wrtbuf(Relation rel, Buffer buf) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + WriteBuffer(buf); + _hash_unsetpagelock(rel, blkno, HASH_WRITE); +} + +/* + * _hash_wrtnorelbuf() -- write a hash page to disk, but do not release + * our reference or lock. + * + * It is an error to call _hash_wrtnorelbuf() without a write lock + * or a reference to the buffer. + */ +void +_hash_wrtnorelbuf(Relation rel, Buffer buf) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + WriteNoReleaseBuffer(buf); +} + +Page +_hash_chgbufaccess(Relation rel, + Buffer *bufp, + int from_access, + int to_access) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(*bufp); + + switch (from_access) { + case HASH_WRITE: + _hash_wrtbuf(rel, *bufp); + break; + case HASH_READ: + _hash_relbuf(rel, *bufp, from_access); + break; + default: + elog(WARN, "_hash_chgbufaccess: invalid access (%d) on blk %x: %.*s", + from_access, blkno, NAMEDATALEN, RelationGetRelationName(rel)); + break; + } + *bufp = _hash_getbuf(rel, blkno, to_access); + return (BufferGetPage(*bufp)); +} + +/* + * _hash_pageinit() -- Initialize a new page. + */ +void +_hash_pageinit(Page page, Size size) +{ + Assert(((PageHeader) page)->pd_lower == 0); + Assert(((PageHeader) page)->pd_upper == 0); + Assert(((PageHeader) page)->pd_special == 0); + + /* + * Cargo-cult programming -- don't really need this to be zero, but + * creating new pages is an infrequent occurrence and it makes me feel + * good when I know they're empty. + */ + memset(page, 0, size); + + PageInit(page, size, sizeof(HashPageOpaqueData)); +} + +static void +_hash_setpagelock(Relation rel, + BlockNumber blkno, + int access) +{ + ItemPointerData iptr; + + if (USELOCKING) { + ItemPointerSet(&iptr, blkno, 1); + + switch (access) { + case HASH_WRITE: + RelationSetSingleWLockPage(rel, &iptr); + break; + case HASH_READ: + RelationSetSingleRLockPage(rel, &iptr); + break; + default: + elog(WARN, "_hash_setpagelock: invalid access (%d) on blk %x: %.*s", + access, blkno, NAMEDATALEN, RelationGetRelationName(rel)); + break; + } + } +} + +static void +_hash_unsetpagelock(Relation rel, + BlockNumber blkno, + int access) +{ + ItemPointerData iptr; + + if (USELOCKING) { + ItemPointerSet(&iptr, blkno, 1); + + switch (access) { + case HASH_WRITE: + RelationUnsetSingleWLockPage(rel, &iptr); + break; + case HASH_READ: + RelationUnsetSingleRLockPage(rel, &iptr); + break; + default: + elog(WARN, "_hash_unsetpagelock: invalid access (%d) on blk %x: %.*s", + access, blkno, NAMEDATALEN, RelationGetRelationName(rel)); + break; + } + } +} + +void +_hash_pagedel(Relation rel, ItemPointer tid) +{ + Buffer buf; + Buffer metabuf; + Page page; + BlockNumber blkno; + OffsetNumber offno; + HashMetaPage metap; + HashPageOpaque opaque; + + blkno = ItemPointerGetBlockNumber(tid); + offno = ItemPointerGetOffsetNumber(tid); + + buf = _hash_getbuf(rel, blkno, HASH_WRITE); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + + PageIndexTupleDelete(page, offno); + _hash_wrtnorelbuf(rel, buf); + + if (PageIsEmpty(page) && (opaque->hasho_flag & LH_OVERFLOW_PAGE)) { + buf = _hash_freeovflpage(rel, buf); + if (BufferIsValid(buf)) { + _hash_relbuf(rel, buf, HASH_WRITE); + } + } else { + _hash_relbuf(rel, buf, HASH_WRITE); + } + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE); + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + ++metap->hashm_nkeys; + _hash_wrtbuf(rel, metabuf); +} + +void +_hash_expandtable(Relation rel, Buffer metabuf) +{ + HashMetaPage metap; + Bucket old_bucket; + Bucket new_bucket; + uint32 spare_ndx; + +/* elog(DEBUG, "_hash_expandtable: expanding..."); */ + + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE); + new_bucket = ++metap->MAX_BUCKET; + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ); + old_bucket = (metap->MAX_BUCKET & metap->LOW_MASK); + + /* + * If the split point is increasing (MAX_BUCKET's log base 2 + * * increases), we need to copy the current contents of the spare + * split bucket to the next bucket. + */ + spare_ndx = _hash_log2(metap->MAX_BUCKET + 1); + if (spare_ndx > metap->OVFL_POINT) { + + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE); + metap->SPARES[spare_ndx] = metap->SPARES[metap->OVFL_POINT]; + metap->OVFL_POINT = spare_ndx; + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ); + } + + if (new_bucket > metap->HIGH_MASK) { + + /* Starting a new doubling */ + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_READ, HASH_WRITE); + metap->LOW_MASK = metap->HIGH_MASK; + metap->HIGH_MASK = new_bucket | metap->LOW_MASK; + metap = (HashMetaPage) _hash_chgbufaccess(rel, &metabuf, HASH_WRITE, HASH_READ); + + } + /* Relocate records to the new bucket */ + _hash_splitpage(rel, metabuf, old_bucket, new_bucket); +} + + +/* + * _hash_splitpage -- split 'obucket' into 'obucket' and 'nbucket' + * + * this routine is actually misnamed -- we are splitting a bucket that + * consists of a base bucket page and zero or more overflow (bucket + * chain) pages. + */ +static void +_hash_splitpage(Relation rel, + Buffer metabuf, + Bucket obucket, + Bucket nbucket) +{ + Bucket bucket; + Buffer obuf; + Buffer nbuf; + Buffer ovflbuf; + BlockNumber oblkno; + BlockNumber nblkno; + bool null; + Datum datum; + HashItem hitem; + HashPageOpaque oopaque; + HashPageOpaque nopaque; + HashMetaPage metap; + IndexTuple itup; + int itemsz; + OffsetNumber ooffnum; + OffsetNumber noffnum; + OffsetNumber omaxoffnum; + Page opage; + Page npage; + TupleDesc itupdesc; + +/* elog(DEBUG, "_hash_splitpage: splitting %d into %d,%d", + obucket, obucket, nbucket); +*/ + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + /* get the buffers & pages */ + oblkno = BUCKET_TO_BLKNO(obucket); + nblkno = BUCKET_TO_BLKNO(nbucket); + obuf = _hash_getbuf(rel, oblkno, HASH_WRITE); + nbuf = _hash_getbuf(rel, nblkno, HASH_WRITE); + opage = BufferGetPage(obuf); + npage = BufferGetPage(nbuf); + + /* initialize the new bucket */ + _hash_pageinit(npage, BufferGetPageSize(nbuf)); + nopaque = (HashPageOpaque) PageGetSpecialPointer(npage); + nopaque->hasho_prevblkno = InvalidBlockNumber; + nopaque->hasho_nextblkno = InvalidBlockNumber; + nopaque->hasho_flag = LH_BUCKET_PAGE; + nopaque->hasho_oaddr = InvalidOvflAddress; + nopaque->hasho_bucket = nbucket; + _hash_wrtnorelbuf(rel, nbuf); + + /* + * make sure the old bucket isn't empty. advance 'opage' and + * friends through the overflow bucket chain until we find a + * non-empty page. + * + * XXX we should only need this once, if we are careful to + * preserve the invariant that overflow pages are never empty. + */ + _hash_checkpage(opage, LH_BUCKET_PAGE); + oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + if (PageIsEmpty(opage)) { + oblkno = oopaque->hasho_nextblkno; + _hash_relbuf(rel, obuf, HASH_WRITE); + if (!BlockNumberIsValid(oblkno)) { + /* + * the old bucket is completely empty; of course, the new + * bucket will be as well, but since it's a base bucket + * page we don't care. + */ + _hash_relbuf(rel, nbuf, HASH_WRITE); + return; + } + obuf = _hash_getbuf(rel, oblkno, HASH_WRITE); + opage = BufferGetPage(obuf); + _hash_checkpage(opage, LH_OVERFLOW_PAGE); + if (PageIsEmpty(opage)) { + elog(WARN, "_hash_splitpage: empty overflow page %d", oblkno); + } + oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + } + + /* + * we are now guaranteed that 'opage' is not empty. partition the + * tuples in the old bucket between the old bucket and the new + * bucket, advancing along their respective overflow bucket chains + * and adding overflow pages as needed. + */ + ooffnum = FirstOffsetNumber; + omaxoffnum = PageGetMaxOffsetNumber(opage); + for (;;) { + /* + * at each iteration through this loop, each of these variables + * should be up-to-date: obuf opage oopaque ooffnum omaxoffnum + */ + + /* check if we're at the end of the page */ + if (ooffnum > omaxoffnum) { + /* at end of page, but check for overflow page */ + oblkno = oopaque->hasho_nextblkno; + if (BlockNumberIsValid(oblkno)) { + /* + * we ran out of tuples on this particular page, but + * we have more overflow pages; re-init values. + */ + _hash_wrtbuf(rel, obuf); + obuf = _hash_getbuf(rel, oblkno, HASH_WRITE); + opage = BufferGetPage(obuf); + _hash_checkpage(opage, LH_OVERFLOW_PAGE); + oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + + /* we're guaranteed that an ovfl page has at least 1 tuple */ + if (PageIsEmpty(opage)) { + elog(WARN, "_hash_splitpage: empty ovfl page %d!", + oblkno); + } + ooffnum = FirstOffsetNumber; + omaxoffnum = PageGetMaxOffsetNumber(opage); + } else { + /* + * we're at the end of the bucket chain, so now we're + * really done with everything. before quitting, call + * _hash_squeezebucket to ensure the tuples in the + * bucket (including the overflow pages) are packed as + * tightly as possible. + */ + _hash_wrtbuf(rel, obuf); + _hash_wrtbuf(rel, nbuf); + _hash_squeezebucket(rel, metap, obucket); + return; + } + } + + /* hash on the tuple */ + hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum)); + itup = &(hitem->hash_itup); + itupdesc = RelationGetTupleDescriptor(rel); + datum = index_getattr(itup, 1, itupdesc, &null); + bucket = _hash_call(rel, metap, datum); + + if (bucket == nbucket) { + /* + * insert the tuple into the new bucket. if it doesn't + * fit on the current page in the new bucket, we must + * allocate a new overflow page and place the tuple on + * that page instead. + */ + itemsz = IndexTupleDSize(hitem->hash_itup) + + (sizeof(HashItemData) - sizeof(IndexTupleData)); + + itemsz = DOUBLEALIGN(itemsz); + + if (PageGetFreeSpace(npage) < itemsz) { + ovflbuf = _hash_addovflpage(rel, &metabuf, nbuf); + _hash_wrtbuf(rel, nbuf); + nbuf = ovflbuf; + npage = BufferGetPage(nbuf); + _hash_checkpage(npage, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + } + + noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage)); + (void) PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED); + _hash_wrtnorelbuf(rel, nbuf); + + /* + * now delete the tuple from the old bucket. after this + * section of code, 'ooffnum' will actually point to the + * ItemId to which we would point if we had advanced it + * before the deletion (PageIndexTupleDelete repacks the + * ItemId array). this also means that 'omaxoffnum' is + * exactly one less than it used to be, so we really can + * just decrement it instead of calling + * PageGetMaxOffsetNumber. + */ + PageIndexTupleDelete(opage, ooffnum); + _hash_wrtnorelbuf(rel, obuf); + omaxoffnum = OffsetNumberPrev(omaxoffnum); + + /* + * tidy up. if the old page was an overflow page and it + * is now empty, we must free it (we want to preserve the + * invariant that overflow pages cannot be empty). + */ + if (PageIsEmpty(opage) && + (oopaque->hasho_flag & LH_OVERFLOW_PAGE)) { + obuf = _hash_freeovflpage(rel, obuf); + + /* check that we're not through the bucket chain */ + if (BufferIsInvalid(obuf)) { + _hash_wrtbuf(rel, nbuf); + _hash_squeezebucket(rel, metap, obucket); + return; + } + + /* + * re-init. again, we're guaranteed that an ovfl page + * has at least one tuple. + */ + opage = BufferGetPage(obuf); + _hash_checkpage(opage, LH_OVERFLOW_PAGE); + oblkno = BufferGetBlockNumber(obuf); + oopaque = (HashPageOpaque) PageGetSpecialPointer(opage); + if (PageIsEmpty(opage)) { + elog(WARN, "_hash_splitpage: empty overflow page %d", + oblkno); + } + ooffnum = FirstOffsetNumber; + omaxoffnum = PageGetMaxOffsetNumber(opage); + } + } else { + /* + * the tuple stays on this page. we didn't move anything, + * so we didn't delete anything and therefore we don't + * have to change 'omaxoffnum'. + * + * XXX any hash value from [0, nbucket-1] will map to this + * bucket, which doesn't make sense to me. + */ + ooffnum = OffsetNumberNext(ooffnum); + } + } + /*NOTREACHED*/ +} diff --git a/src/backend/access/hash/hashscan.c b/src/backend/access/hash/hashscan.c new file mode 100644 index 00000000000..c4cce0e70d9 --- /dev/null +++ b/src/backend/access/hash/hashscan.c @@ -0,0 +1,172 @@ +/*------------------------------------------------------------------------- + * + * hashscan.c-- + * manage scans on hash tables + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashscan.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + * NOTES + * Because we can be doing an index scan on a relation while we + * update it, we need to avoid missing data that moves around in + * the index. The routines and global variables in this file + * guarantee that all scans in the local address space stay + * correctly positioned. This is all we need to worry about, since + * write locking guarantees that no one else will be on the same + * page at the same time as we are. + * + * The scheme is to manage a list of active scans in the current + * backend. Whenever we add or remove records from an index, we + * check the list of active scans to see if any has been affected. + * A scan is affected only if it is on the same relation, and the + * same page, as the update. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/sdir.h" +#include "access/hash.h" + +static void _hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno); +static bool _hash_scantouched(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno); + +typedef struct HashScanListData { + IndexScanDesc hashsl_scan; + struct HashScanListData *hashsl_next; +} HashScanListData; + +typedef HashScanListData *HashScanList; + +static HashScanList HashScans = (HashScanList) NULL; + +/* + * _Hash_regscan() -- register a new scan. + */ +void +_hash_regscan(IndexScanDesc scan) +{ + HashScanList new_el; + + new_el = (HashScanList) palloc(sizeof(HashScanListData)); + new_el->hashsl_scan = scan; + new_el->hashsl_next = HashScans; + HashScans = new_el; +} + +/* + * _hash_dropscan() -- drop a scan from the scan list + */ +void +_hash_dropscan(IndexScanDesc scan) +{ + HashScanList chk, last; + + last = (HashScanList) NULL; + for (chk = HashScans; + chk != (HashScanList) NULL && chk->hashsl_scan != scan; + chk = chk->hashsl_next) { + last = chk; + } + + if (chk == (HashScanList) NULL) + elog(WARN, "hash scan list trashed; can't find 0x%lx", scan); + + if (last == (HashScanList) NULL) + HashScans = chk->hashsl_next; + else + last->hashsl_next = chk->hashsl_next; + +#ifdef PERFECT_MEM + pfree (chk); +#endif /* PERFECT_MEM */ +} + +void +_hash_adjscans(Relation rel, ItemPointer tid) +{ + HashScanList l; + Oid relid; + + relid = rel->rd_id; + for (l = HashScans; l != (HashScanList) NULL; l = l->hashsl_next) { + if (relid == l->hashsl_scan->relation->rd_id) + _hash_scandel(l->hashsl_scan, ItemPointerGetBlockNumber(tid), + ItemPointerGetOffsetNumber(tid)); + } +} + +static void +_hash_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno) +{ + ItemPointer current; + Buffer buf; + Buffer metabuf; + HashScanOpaque so; + + if (!_hash_scantouched(scan, blkno, offno)) + return; + + metabuf = _hash_getbuf(scan->relation, HASH_METAPAGE, HASH_READ); + + so = (HashScanOpaque) scan->opaque; + buf = so->hashso_curbuf; + + current = &(scan->currentItemData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) { + _hash_step(scan, &buf, BackwardScanDirection, metabuf); + so->hashso_curbuf = buf; + } + + current = &(scan->currentMarkData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) { + ItemPointerData tmp; + tmp = *current; + *current = scan->currentItemData; + scan->currentItemData = tmp; + _hash_step(scan, &buf, BackwardScanDirection, metabuf); + so->hashso_mrkbuf = buf; + tmp = *current; + *current = scan->currentItemData; + scan->currentItemData = tmp; + } +} + +static bool +_hash_scantouched(IndexScanDesc scan, + BlockNumber blkno, + OffsetNumber offno) +{ + ItemPointer current; + + current = &(scan->currentItemData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) + return (true); + + current = &(scan->currentMarkData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) + return (true); + + return (false); +} diff --git a/src/backend/access/hash/hashsearch.c b/src/backend/access/hash/hashsearch.c new file mode 100644 index 00000000000..056235dec85 --- /dev/null +++ b/src/backend/access/hash/hashsearch.c @@ -0,0 +1,425 @@ +/*------------------------------------------------------------------------- + * + * hashsearch.c-- + * search code for postgres hash tables + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashsearch.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "fmgr.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/skey.h" +#include "access/sdir.h" +#include "access/hash.h" + +/* + * _hash_search() -- Finds the page/bucket that the contains the + * scankey and loads it into *bufP. the buffer has a read lock. + */ +void +_hash_search(Relation rel, + int keysz, + ScanKey scankey, + Buffer *bufP, + HashMetaPage metap) +{ + BlockNumber blkno; + Datum keyDatum; + Bucket bucket; + + if (scankey == (ScanKey) NULL || + (keyDatum = scankey[0].sk_argument) == (Datum) NULL) { + /* + * If the scankey argument is NULL, all tuples will satisfy + * the scan so we start the scan at the first bucket (bucket + * 0). + */ + bucket = 0; + } else { + bucket = _hash_call(rel, metap, keyDatum); + } + + blkno = BUCKET_TO_BLKNO(bucket); + + *bufP = _hash_getbuf(rel, blkno, HASH_READ); +} + +/* + * _hash_next() -- Get the next item in a scan. + * + * On entry, we have a valid currentItemData in the scan, and a + * read lock on the page that contains that item. We do not have + * the page pinned. We return the next item in the scan. On + * exit, we have the page containing the next item locked but not + * pinned. + */ +RetrieveIndexResult +_hash_next(IndexScanDesc scan, ScanDirection dir) +{ + Relation rel; + Buffer buf; + Buffer metabuf; + Page page; + OffsetNumber offnum; + RetrieveIndexResult res; + ItemPointer current; + ItemPointer iptr; + HashItem hitem; + IndexTuple itup; + HashScanOpaque so; + + rel = scan->relation; + so = (HashScanOpaque) scan->opaque; + current = &(scan->currentItemData); + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ); + + /* + * XXX 10 may 91: somewhere there's a bug in our management of the + * cached buffer for this scan. wei discovered it. the following + * is a workaround so he can work until i figure out what's going on. + */ + + if (!BufferIsValid(so->hashso_curbuf)) { + so->hashso_curbuf = _hash_getbuf(rel, + ItemPointerGetBlockNumber(current), + HASH_READ); + } + + /* we still have the buffer pinned and locked */ + buf = so->hashso_curbuf; + + /* + * step to next valid tuple. note that _hash_step releases our + * lock on 'metabuf'; if we switch to a new 'buf' while looking + * for the next tuple, we come back with a lock on that buffer. + */ + if (!_hash_step(scan, &buf, dir, metabuf)) { + return ((RetrieveIndexResult) NULL); + } + + /* if we're here, _hash_step found a valid tuple */ + current = &(scan->currentItemData); + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &hitem->hash_itup; + iptr = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) iptr, (char *) &(itup->t_tid), sizeof(ItemPointerData)); + res = FormRetrieveIndexResult(current, iptr); + + return (res); +} + +static void +_hash_readnext(Relation rel, + Buffer *bufp, Page *pagep, HashPageOpaque *opaquep) +{ + BlockNumber blkno; + + blkno = (*opaquep)->hasho_nextblkno; + _hash_relbuf(rel, *bufp, HASH_READ); + *bufp = InvalidBuffer; + if (BlockNumberIsValid(blkno)) { + *bufp = _hash_getbuf(rel, blkno, HASH_READ); + *pagep = BufferGetPage(*bufp); + _hash_checkpage(*pagep, LH_OVERFLOW_PAGE); + *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep); + Assert(!PageIsEmpty(*pagep)); + } +} + +static void +_hash_readprev(Relation rel, + Buffer *bufp, Page *pagep, HashPageOpaque *opaquep) +{ + BlockNumber blkno; + + blkno = (*opaquep)->hasho_prevblkno; + _hash_relbuf(rel, *bufp, HASH_READ); + *bufp = InvalidBuffer; + if (BlockNumberIsValid(blkno)) { + *bufp = _hash_getbuf(rel, blkno, HASH_READ); + *pagep = BufferGetPage(*bufp); + _hash_checkpage(*pagep, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + *opaquep = (HashPageOpaque) PageGetSpecialPointer(*pagep); + if (PageIsEmpty(*pagep)) { + Assert((*opaquep)->hasho_flag & LH_BUCKET_PAGE); + _hash_relbuf(rel, *bufp, HASH_READ); + *bufp = InvalidBuffer; + } + } +} + +/* + * _hash_first() -- Find the first item in a scan. + * + * Return the RetrieveIndexResult of the first item in the tree that + * satisfies the qualificatin associated with the scan descriptor. On + * exit, the page containing the current index tuple is read locked + * and pinned, and the scan's opaque data entry is updated to + * include the buffer. + */ +RetrieveIndexResult +_hash_first(IndexScanDesc scan, ScanDirection dir) +{ + Relation rel; + Buffer buf; + Buffer metabuf; + Page page; + HashPageOpaque opaque; + HashMetaPage metap; + HashItem hitem; + IndexTuple itup; + ItemPointer current; + ItemPointer iptr; + OffsetNumber offnum; + RetrieveIndexResult res; + HashScanOpaque so; + + rel = scan->relation; + so = (HashScanOpaque) scan->opaque; + current = &(scan->currentItemData); + + metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ); + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + /* + * XXX -- The attribute number stored in the scan key is the attno + * in the heap relation. We need to transmogrify this into + * the index relation attno here. For the moment, we have + * hardwired attno == 1. + */ + + /* find the correct bucket page and load it into buf */ + _hash_search(rel, 1, scan->keyData, &buf, metap); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE); + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + + /* + * if we are scanning forward, we need to find the first non-empty + * page (if any) in the bucket chain. since overflow pages are + * never empty, this had better be either the bucket page or the + * first overflow page. + * + * if we are scanning backward, we always go all the way to the + * end of the bucket chain. + */ + if (PageIsEmpty(page)) { + if (BlockNumberIsValid(opaque->hasho_nextblkno)) { + _hash_readnext(rel, &buf, &page, &opaque); + } else { + ItemPointerSetInvalid(current); + so->hashso_curbuf = InvalidBuffer; + return ((RetrieveIndexResult) NULL); + } + } + if (ScanDirectionIsBackward(dir)) { + while (BlockNumberIsValid(opaque->hasho_nextblkno)) { + _hash_readnext(rel, &buf, &page, &opaque); + } + } + + if (!_hash_step(scan, &buf, dir, metabuf)) { + return ((RetrieveIndexResult) NULL); + } + + /* if we're here, _hash_step found a valid tuple */ + current = &(scan->currentItemData); + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &hitem->hash_itup; + iptr = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) iptr, (char *) &(itup->t_tid), sizeof(ItemPointerData)); + res = FormRetrieveIndexResult(current, iptr); + + return (res); +} + +/* + * _hash_step() -- step to the next valid item in a scan in the bucket. + * + * If no valid record exists in the requested direction, return + * false. Else, return true and set the CurrentItemData for the + * scan to the right thing. + * + * 'bufP' points to the buffer which contains the current page + * that we'll step through. + * + * 'metabuf' is released when this returns. + */ +bool +_hash_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir, Buffer metabuf) +{ + Relation rel; + ItemPointer current; + HashScanOpaque so; + int allbuckets; + HashMetaPage metap; + Buffer buf; + Page page; + HashPageOpaque opaque; + OffsetNumber maxoff; + OffsetNumber offnum; + Bucket bucket; + BlockNumber blkno; + HashItem hitem; + IndexTuple itup; + + rel = scan->relation; + current = &(scan->currentItemData); + so = (HashScanOpaque) scan->opaque; + allbuckets = (scan->numberOfKeys < 1); + + metap = (HashMetaPage) BufferGetPage(metabuf); + _hash_checkpage((Page) metap, LH_META_PAGE); + + buf = *bufP; + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE|LH_OVERFLOW_PAGE); + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + + /* + * If _hash_step is called from _hash_first, current will not be + * valid, so we can't dereference it. However, in that case, we + * presumably want to start at the beginning/end of the page... + */ + maxoff = PageGetMaxOffsetNumber(page); + if (ItemPointerIsValid(current)) { + offnum = ItemPointerGetOffsetNumber(current); + } else { + offnum = InvalidOffsetNumber; + } + + /* + * 'offnum' now points to the last tuple we have seen (if any). + * + * continue to step through tuples until: + * 1) we get to the end of the bucket chain or + * 2) we find a valid tuple. + */ + do { + bucket = opaque->hasho_bucket; + + switch (dir) { + case ForwardScanDirection: + if (offnum != InvalidOffsetNumber) { + offnum = OffsetNumberNext(offnum); /* move forward */ + } else { + offnum = FirstOffsetNumber; /* new page */ + } + while (offnum > maxoff) { + /* + * either this page is empty (maxoff == + * InvalidOffsetNumber) or we ran off the end. + */ + _hash_readnext(rel, &buf, &page, &opaque); + if (BufferIsInvalid(buf)) { /* end of chain */ + if (allbuckets && bucket < metap->hashm_maxbucket) { + ++bucket; + blkno = BUCKET_TO_BLKNO(bucket); + buf = _hash_getbuf(rel, blkno, HASH_READ); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE); + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + Assert(opaque->hasho_bucket == bucket); + while (PageIsEmpty(page) && + BlockNumberIsValid(opaque->hasho_nextblkno)) { + _hash_readnext(rel, &buf, &page, &opaque); + } + maxoff = PageGetMaxOffsetNumber(page); + offnum = FirstOffsetNumber; + } else { + maxoff = offnum = InvalidOffsetNumber; + break; /* while */ + } + } else { + /* _hash_readnext never returns an empty page */ + maxoff = PageGetMaxOffsetNumber(page); + offnum = FirstOffsetNumber; + } + } + break; + case BackwardScanDirection: + if (offnum != InvalidOffsetNumber) { + offnum = OffsetNumberPrev(offnum); /* move back */ + } else { + offnum = maxoff; /* new page */ + } + while (offnum < FirstOffsetNumber) { + /* + * either this page is empty (offnum == + * InvalidOffsetNumber) or we ran off the end. + */ + _hash_readprev(rel, &buf, &page, &opaque); + if (BufferIsInvalid(buf)) { /* end of chain */ + if (allbuckets && bucket > 0) { + --bucket; + blkno = BUCKET_TO_BLKNO(bucket); + buf = _hash_getbuf(rel, blkno, HASH_READ); + page = BufferGetPage(buf); + _hash_checkpage(page, LH_BUCKET_PAGE); + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + Assert(opaque->hasho_bucket == bucket); + while (BlockNumberIsValid(opaque->hasho_nextblkno)) { + _hash_readnext(rel, &buf, &page, &opaque); + } + maxoff = offnum = PageGetMaxOffsetNumber(page); + } else { + maxoff = offnum = InvalidOffsetNumber; + break; /* while */ + } + } else { + /* _hash_readprev never returns an empty page */ + maxoff = offnum = PageGetMaxOffsetNumber(page); + } + } + break; + default: + /* NoMovementScanDirection */ + /* this should not be reached */ + break; + } + + /* we ran off the end of the world without finding a match */ + if (offnum == InvalidOffsetNumber) { + _hash_relbuf(rel, metabuf, HASH_READ); + *bufP = so->hashso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(current); + return(false); + } + + /* get ready to check this tuple */ + hitem = (HashItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &hitem->hash_itup; + } while (!_hash_checkqual(scan, itup)); + + /* if we made it to here, we've found a valid tuple */ + _hash_relbuf(rel, metabuf, HASH_READ); + blkno = BufferGetBlockNumber(buf); + *bufP = so->hashso_curbuf = buf; + ItemPointerSet(current, blkno, offnum); + return(true); +} diff --git a/src/backend/access/hash/hashstrat.c b/src/backend/access/hash/hashstrat.c new file mode 100644 index 00000000000..cac2a58690e --- /dev/null +++ b/src/backend/access/hash/hashstrat.c @@ -0,0 +1,104 @@ +/*------------------------------------------------------------------------- + * + * btstrat.c-- + * Srategy map entries for the btree indexed access method + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/Attic/hashstrat.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/genam.h" +#include "access/hash.h" + +/* + * only one valid strategy for hash tables: equality. + */ + +static StrategyNumber HTNegate[1] = { + InvalidStrategy +}; + +static StrategyNumber HTCommute[1] = { + HTEqualStrategyNumber +}; + +static StrategyNumber HTNegateCommute[1] = { + InvalidStrategy +}; + +static StrategyEvaluationData HTEvaluationData = { + /* XXX static for simplicity */ + + HTMaxStrategyNumber, + (StrategyTransformMap)HTNegate, + (StrategyTransformMap)HTCommute, + (StrategyTransformMap)HTNegateCommute, + {NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL} +}; + +/* ---------------------------------------------------------------- + * RelationGetHashStrategy + * ---------------------------------------------------------------- + */ + +StrategyNumber +_hash_getstrat(Relation rel, + AttrNumber attno, + RegProcedure proc) +{ + StrategyNumber strat; + + strat = RelationGetStrategy(rel, attno, &HTEvaluationData, proc); + + Assert(StrategyNumberIsValid(strat)); + + return (strat); +} + +bool +_hash_invokestrat(Relation rel, + AttrNumber attno, + StrategyNumber strat, + Datum left, + Datum right) +{ + return (RelationInvokeStrategy(rel, &HTEvaluationData, attno, strat, + left, right)); +} + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c new file mode 100644 index 00000000000..f8f49fe7983 --- /dev/null +++ b/src/backend/access/hash/hashutil.c @@ -0,0 +1,147 @@ +/*------------------------------------------------------------------------- + * + * btutils.c-- + * Utility code for Postgres btree implementation. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.1.1.1 1996/07/09 06:21:10 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/iqual.h" +#include "access/hash.h" + +ScanKey +_hash_mkscankey(Relation rel, IndexTuple itup, HashMetaPage metap) +{ + ScanKey skey; + TupleDesc itupdesc; + int natts; + AttrNumber i; + Datum arg; + RegProcedure proc; + bool null; + + natts = rel->rd_rel->relnatts; + itupdesc = RelationGetTupleDescriptor(rel); + + skey = (ScanKey) palloc(natts * sizeof(ScanKeyData)); + + for (i = 0; i < natts; i++) { + arg = index_getattr(itup, i + 1, itupdesc, &null); + proc = metap->hashm_procid; + ScanKeyEntryInitialize(&skey[i], + 0x0, (AttrNumber) (i + 1), proc, arg); + } + + return (skey); +} + +void +_hash_freeskey(ScanKey skey) +{ + pfree(skey); +} + + +bool +_hash_checkqual(IndexScanDesc scan, IndexTuple itup) +{ + if (scan->numberOfKeys > 0) + return (index_keytest(itup, + RelationGetTupleDescriptor(scan->relation), + scan->numberOfKeys, scan->keyData)); + else + return (true); +} + +HashItem +_hash_formitem(IndexTuple itup) +{ + int nbytes_hitem; + HashItem hitem; + Size tuplen; + + /* disallow nulls in hash keys */ + if (itup->t_info & INDEX_NULL_MASK) + elog(WARN, "hash indices cannot include null keys"); + + /* make a copy of the index tuple with room for the sequence number */ + tuplen = IndexTupleSize(itup); + nbytes_hitem = tuplen + + (sizeof(HashItemData) - sizeof(IndexTupleData)); + + hitem = (HashItem) palloc(nbytes_hitem); + memmove((char *) &(hitem->hash_itup), (char *) itup, tuplen); + + return (hitem); +} + +Bucket +_hash_call(Relation rel, HashMetaPage metap, Datum key) +{ + uint32 n; + Bucket bucket; + RegProcedure proc; + + proc = metap->hashm_procid; + n = (uint32) fmgr(proc, key); + bucket = n & metap->hashm_highmask; + if (bucket > metap->hashm_maxbucket) + bucket = bucket & metap->hashm_lowmask; + return (bucket); +} + +/* + * _hash_log2 -- returns ceil(lg2(num)) + */ +uint32 +_hash_log2(uint32 num) +{ + uint32 i, limit; + + limit = 1; + for (i = 0; limit < num; limit = limit << 1, i++) + ; + return (i); +} + +/* + * _hash_checkpage -- sanity checks on the format of all hash pages + */ +void +_hash_checkpage(Page page, int flags) +{ + PageHeader ph = (PageHeader) page; + HashPageOpaque opaque; + + Assert(page); + Assert(ph->pd_lower >= (sizeof(PageHeaderData) - sizeof(ItemIdData))); +#if 1 + Assert(ph->pd_upper <= + (BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData)))); + Assert(ph->pd_special == + (BLCKSZ - DOUBLEALIGN(sizeof(HashPageOpaqueData)))); + Assert(ph->pd_opaque.od_pagesize == BLCKSZ); +#endif + if (flags) { + opaque = (HashPageOpaque) PageGetSpecialPointer(page); + Assert(opaque->hasho_flag & flags); + } +} diff --git a/src/backend/access/heap/Makefile.inc b/src/backend/access/heap/Makefile.inc new file mode 100644 index 00000000000..f4f4bbb7031 --- /dev/null +++ b/src/backend/access/heap/Makefile.inc @@ -0,0 +1,14 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/heap +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= heapam.c hio.c stats.c diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c new file mode 100644 index 00000000000..4bf31efd832 --- /dev/null +++ b/src/backend/access/heap/heapam.c @@ -0,0 +1,1507 @@ +/*------------------------------------------------------------------------- + * + * heapam.c-- + * heap access method code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + * + * INTERFACE ROUTINES + * heapgettup - fetch next heap tuple from a scan + * heap_open - open a heap relation by relationId + * heap_openr - open a heap relation by name + * heap_close - close a heap relation + * heap_beginscan - begin relation scan + * heap_rescan - restart a relation scan + * heap_endscan - end relation scan + * heap_getnext - retrieve next tuple in scan + * heap_fetch - retrive tuple with tid + * heap_insert - insert tuple into a relation + * heap_delete - delete a tuple from a relation + * heap_replace - replace a tuple in a relation with another tuple + * heap_markpos - mark scan position + * heap_restrpos - restore position to marked location + * + * NOTES + * This file contains the heap_ routines which implement + * the POSTGRES heap access method used for all POSTGRES + * relations. + * + * OLD COMMENTS + * struct relscan hints: (struct should be made AM independent?) + * + * rs_ctid is the tid of the last tuple returned by getnext. + * rs_ptid and rs_ntid are the tids of the previous and next tuples + * returned by getnext, respectively. NULL indicates an end of + * scan (either direction); NON indicates an unknow value. + * + * possible combinations: + * rs_p rs_c rs_n interpretation + * NULL NULL NULL empty scan + * NULL NULL NON at begining of scan + * NULL NULL t1 at begining of scan (with cached tid) + * NON NULL NULL at end of scan + * t1 NULL NULL at end of scan (with cached tid) + * NULL t1 NULL just returned only tuple + * NULL t1 NON just returned first tuple + * NULL t1 t2 returned first tuple (with cached tid) + * NON t1 NULL just returned last tuple + * t2 t1 NULL returned last tuple (with cached tid) + * t1 t2 NON in the middle of a forward scan + * NON t2 t1 in the middle of a reverse scan + * ti tj tk in the middle of a scan (w cached tid) + * + * Here NULL is ...tup == NULL && ...buf == InvalidBuffer, + * and NON is ...tup == NULL && ...buf == UnknownBuffer. + * + * Currently, the NONTID values are not cached with their actual + * values by getnext. Values may be cached by markpos since it stores + * all three tids. + * + * NOTE: the calls to elog() must stop. Should decide on an interface + * between the general and specific AM calls. + * + * XXX probably do not need a free tuple routine for heaps. + * Huh? Free tuple is not necessary for tuples returned by scans, but + * is necessary for tuples which are returned by + * RelationGetTupleByItemPointer. -hirohama + * + *------------------------------------------------------------------------- + */ +#include +#include + +#include "postgres.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/hio.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "access/skey.h" + +#include "utils/tqual.h" +#include "access/valid.h" +#include "access/xact.h" + +#include "catalog/catalog.h" +#include "catalog/catname.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/bufpage.h" +#include "storage/itemid.h" +#include "storage/itemptr.h" +#include "storage/lmgr.h" + +#include "tcop/tcopdebug.h" +#include "miscadmin.h" + +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "fmgr.h" +#include "utils/inval.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/rel.h" +#include "utils/relcache.h" + +static bool ImmediateInvalidation; + +/* ---------------------------------------------------------------- + * heap support routines + * ---------------------------------------------------------------- + */ + +/* ---------------- + * initsdesc - sdesc code common to heap_beginscan and heap_rescan + * ---------------- + */ +static void +initsdesc(HeapScanDesc sdesc, + Relation relation, + int atend, + unsigned nkeys, + ScanKey key) +{ + if (!RelationGetNumberOfBlocks(relation)) { + /* ---------------- + * relation is empty + * ---------------- + */ + sdesc->rs_ntup = sdesc->rs_ctup = sdesc->rs_ptup = NULL; + sdesc->rs_nbuf = sdesc->rs_cbuf = sdesc->rs_pbuf = InvalidBuffer; + } else if (atend) { + /* ---------------- + * reverse scan + * ---------------- + */ + sdesc->rs_ntup = sdesc->rs_ctup = NULL; + sdesc->rs_nbuf = sdesc->rs_cbuf = InvalidBuffer; + sdesc->rs_ptup = NULL; + sdesc->rs_pbuf = UnknownBuffer; + } else { + /* ---------------- + * forward scan + * ---------------- + */ + sdesc->rs_ctup = sdesc->rs_ptup = NULL; + sdesc->rs_cbuf = sdesc->rs_pbuf = InvalidBuffer; + sdesc->rs_ntup = NULL; + sdesc->rs_nbuf = UnknownBuffer; + } /* invalid too */ + + /* we don't have a marked position... */ + ItemPointerSetInvalid(&(sdesc->rs_mptid)); + ItemPointerSetInvalid(&(sdesc->rs_mctid)); + ItemPointerSetInvalid(&(sdesc->rs_mntid)); + ItemPointerSetInvalid(&(sdesc->rs_mcd)); + + /* ---------------- + * copy the scan key, if appropriate + * ---------------- + */ + if (key != NULL) + memmove(sdesc->rs_key, key, nkeys * sizeof(ScanKeyData)); +} + +/* ---------------- + * unpinsdesc - code common to heap_rescan and heap_endscan + * ---------------- + */ +static void +unpinsdesc(HeapScanDesc sdesc) +{ + if (BufferIsValid(sdesc->rs_pbuf)) { + ReleaseBuffer(sdesc->rs_pbuf); + } + + /* ------------------------------------ + * Scan will pin buffer one for each non-NULL tuple pointer + * (ptup, ctup, ntup), so they have to be unpinned multiple + * times. + * ------------------------------------ + */ + if (BufferIsValid(sdesc->rs_cbuf)) { + ReleaseBuffer(sdesc->rs_cbuf); + } + + if (BufferIsValid(sdesc->rs_nbuf)) { + ReleaseBuffer(sdesc->rs_nbuf); + } +} + +/* ------------------------------------------ + * nextpage + * + * figure out the next page to scan after the current page + * taking into account of possible adjustment of degrees of + * parallelism + * ------------------------------------------ + */ +static int +nextpage(int page, int dir) +{ + return((dir<0)?page-1:page+1); +} + +/* ---------------- + * heapgettup - fetch next heap tuple + * + * routine used by heap_getnext() which does most of the + * real work in scanning tuples. + * ---------------- + */ +static HeapTuple +heapgettup(Relation relation, + ItemPointer tid, + int dir, + Buffer *b, + TimeQual timeQual, + int nkeys, + ScanKey key) +{ + ItemId lpp; + Page dp; + int page; + int pages; + int lines; + HeapTuple rtup; + OffsetNumber lineoff; + int linesleft; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_heapgettup); + IncrHeapAccessStat(global_heapgettup); + + /* ---------------- + * debugging stuff + * + * check validity of arguments, here and for other functions too + * Note: no locking manipulations needed--this is a local function + * ---------------- + */ +#ifdef HEAPDEBUGALL + if (ItemPointerIsValid(tid)) { + elog(DEBUG, "heapgettup(%.16s, tid=0x%x[%d,%d], dir=%d, ...)", + RelationGetRelationName(relation), tid, tid->ip_blkid, + tid->ip_posid, dir); + } else { + elog(DEBUG, "heapgettup(%.16s, tid=0x%x, dir=%d, ...)", + RelationGetRelationName(relation), tid, dir); + } + elog(DEBUG, "heapgettup(..., b=0x%x, timeQ=0x%x, nkeys=%d, key=0x%x", + b, timeQual, nkeys, key); + if (timeQual == SelfTimeQual) { + elog(DEBUG, "heapgettup: relation(%c)=`%.16s', SelfTimeQual", + relation->rd_rel->relkind, &relation->rd_rel->relname); + } else { + elog(DEBUG, "heapgettup: relation(%c)=`%.16s', timeQual=%d", + relation->rd_rel->relkind, &relation->rd_rel->relname, + timeQual); + } +#endif /* !defined(HEAPDEBUGALL) */ + + if (!ItemPointerIsValid(tid)) { + Assert(!PointerIsValid(tid)); + } + + /* ---------------- + * return null immediately if relation is empty + * ---------------- + */ + if (!(pages = relation->rd_nblocks)) + return (NULL); + + /* ---------------- + * calculate next starting lineoff, given scan direction + * ---------------- + */ + if (!dir) { + /* ---------------- + * ``no movement'' scan direction + * ---------------- + */ + /* assume it is a valid TID XXX */ + if (ItemPointerIsValid(tid) == false) { + *b = InvalidBuffer; + return (NULL); + } + *b = RelationGetBufferWithBuffer(relation, + ItemPointerGetBlockNumber(tid), + *b); + +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(*b)) { + elog(WARN, "heapgettup: failed ReadBuffer"); + } +#endif + + dp = (Page) BufferGetPage(*b); + lineoff = ItemPointerGetOffsetNumber(tid); + lpp = PageGetItemId(dp, lineoff); + + rtup = (HeapTuple)PageGetItem((Page) dp, lpp); + return (rtup); + + } else if (dir < 0) { + /* ---------------- + * reverse scan direction + * ---------------- + */ + if (ItemPointerIsValid(tid) == false) { + tid = NULL; + } + if (tid == NULL) { + page = pages - 1; /* final page */ + } else { + page = ItemPointerGetBlockNumber(tid); /* current page */ + } + if (page < 0) { + *b = InvalidBuffer; + return (NULL); + } + + *b = RelationGetBufferWithBuffer(relation, page, *b); +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(*b)) { + elog(WARN, "heapgettup: failed ReadBuffer"); + } +#endif + + dp = (Page) BufferGetPage(*b); + lines = PageGetMaxOffsetNumber(dp); + if (tid == NULL) { + lineoff = lines; /* final offnum */ + } else { + lineoff = /* previous offnum */ + OffsetNumberPrev(ItemPointerGetOffsetNumber(tid)); + } + /* page and lineoff now reference the physically previous tid */ + + } else { + /* ---------------- + * forward scan direction + * ---------------- + */ + if (ItemPointerIsValid(tid) == false) { + page = 0; /* first page */ + lineoff = FirstOffsetNumber; /* first offnum */ + } else { + page = ItemPointerGetBlockNumber(tid); /* current page */ + lineoff = /* next offnum */ + OffsetNumberNext(ItemPointerGetOffsetNumber(tid)); + } + + if (page >= pages) { + *b = InvalidBuffer; + return (NULL); + } + /* page and lineoff now reference the physically next tid */ + + *b = RelationGetBufferWithBuffer(relation, page, *b); +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(*b)) { + elog(WARN, "heapgettup: failed ReadBuffer"); + } +#endif + + dp = (Page) BufferGetPage(*b); + lines = PageGetMaxOffsetNumber(dp); + } + + /* 'dir' is now non-zero */ + + /* ---------------- + * calculate line pointer and number of remaining items + * to check on this page. + * ---------------- + */ + lpp = PageGetItemId(dp, lineoff); + if (dir < 0) { + linesleft = lineoff - 1; + } else { + linesleft = lines - lineoff; + } + + /* ---------------- + * advance the scan until we find a qualifying tuple or + * run out of stuff to scan + * ---------------- + */ + for (;;) { + while (linesleft >= 0) { + /* ---------------- + * if current tuple qualifies, return it. + * ---------------- + */ + if ((rtup = heap_tuple_satisfies(lpp, relation, (PageHeader) dp, + timeQual, nkeys, key)) != NULL) { + ItemPointer iptr = &(rtup->t_ctid); + if (ItemPointerGetBlockNumber(iptr) != page) { + /* + * set block id to the correct page number + * --- this is a hack to support the virtual fragment + * concept + */ + ItemPointerSetBlockNumber(iptr, page); + } + return (rtup); + } + + /* ---------------- + * otherwise move to the next item on the page + * ---------------- + */ + --linesleft; + if (dir < 0) { + --lpp; /* move back in this page's ItemId array */ + } else { + ++lpp; /* move forward in this page's ItemId array */ + } + } + + /* ---------------- + * if we get here, it means we've exhausted the items on + * this page and it's time to move to the next.. + * ---------------- + */ + page = nextpage(page, dir); + + /* ---------------- + * return NULL if we've exhausted all the pages.. + * ---------------- + */ + if (page < 0 || page >= pages) { + if (BufferIsValid(*b)) + ReleaseBuffer(*b); + *b = InvalidBuffer; + return (NULL); + } + + *b = ReleaseAndReadBuffer(*b, relation, page); + +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(*b)) { + elog(WARN, "heapgettup: failed ReadBuffer"); + } +#endif + dp = (Page) BufferGetPage(*b); + lines = lineoff = PageGetMaxOffsetNumber((Page) dp); + linesleft = lines - 1; + if (dir < 0) { + lpp = PageGetItemId(dp, lineoff); + } else { + lpp = PageGetItemId(dp, FirstOffsetNumber); + } + } +} + +void +doinsert(Relation relation, HeapTuple tup) +{ + RelationPutHeapTupleAtEnd(relation, tup); + return; +} + +/* + * HeapScanIsValid is now a macro in relscan.h -cim 4/27/91 + */ + +/* ---------------- + * SetHeapAccessMethodImmediateInvalidation + * ---------------- + */ +void +SetHeapAccessMethodImmediateInvalidation(bool on) +{ + ImmediateInvalidation = on; +} + +/* ---------------------------------------------------------------- + * heap access method interface + * ---------------------------------------------------------------- + */ +/* ---------------- + * heap_open - open a heap relation by relationId + * + * presently the relcache routines do all the work we need + * to open/close heap relations. + * ---------------- + */ +Relation +heap_open(Oid relationId) +{ + Relation r; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_open); + IncrHeapAccessStat(global_open); + + r = (Relation) RelationIdGetRelation(relationId); + + if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX) { + elog(WARN, "%s is an index relation", r->rd_rel->relname.data); + } + + return (r); +} + +/* ---------------- + * heap_openr - open a heap relation by name + * + * presently the relcache routines do all the work we need + * to open/close heap relations. + * ---------------- + */ +Relation +heap_openr(char *relationName) +{ + Relation r; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_openr); + IncrHeapAccessStat(global_openr); + + r = RelationNameGetRelation(relationName); + + if (RelationIsValid(r) && r->rd_rel->relkind == RELKIND_INDEX) { + elog(WARN, "%s is an index relation", r->rd_rel->relname.data); + } + + return (r); +} + +/* ---------------- + * heap_close - close a heap relation + * + * presently the relcache routines do all the work we need + * to open/close heap relations. + * ---------------- + */ +void +heap_close(Relation relation) +{ + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_close); + IncrHeapAccessStat(global_close); + + (void) RelationClose(relation); +} + + +/* ---------------- + * heap_beginscan - begin relation scan + * ---------------- + */ +HeapScanDesc +heap_beginscan(Relation relation, + int atend, + TimeQual timeQual, + unsigned nkeys, + ScanKey key) +{ + HeapScanDesc sdesc; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_beginscan); + IncrHeapAccessStat(global_beginscan); + + /* ---------------- + * sanity checks + * ---------------- + */ + if (RelationIsValid(relation) == false) + elog(WARN, "heap_beginscan: !RelationIsValid(relation)"); + + /* ---------------- + * set relation level read lock + * ---------------- + */ + RelationSetLockForRead(relation); + + /* XXX someday assert SelfTimeQual if relkind == RELKIND_UNCATALOGED */ + if (relation->rd_rel->relkind == RELKIND_UNCATALOGED) { + timeQual = SelfTimeQual; + } + + /* ---------------- + * increment relation ref count while scanning relation + * ---------------- + */ + RelationIncrementReferenceCount(relation); + + /* ---------------- + * allocate and initialize scan descriptor + * ---------------- + */ + sdesc = (HeapScanDesc) palloc(sizeof(HeapScanDescData)); + + relation->rd_nblocks = smgrnblocks(relation->rd_rel->relsmgr, relation); + sdesc->rs_rd = relation; + + if (nkeys) { + /* + * we do this here instead of in initsdesc() because heap_rescan also + * calls initsdesc() and we don't want to allocate memory again + */ + sdesc->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys); + } else { + sdesc->rs_key = NULL; + } + + initsdesc(sdesc, relation, atend, nkeys, key); + + sdesc->rs_atend = atend; + sdesc->rs_tr = timeQual; + sdesc->rs_nkeys = (short)nkeys; + + return (sdesc); +} + +/* ---------------- + * heap_rescan - restart a relation scan + * ---------------- + */ +void +heap_rescan(HeapScanDesc sdesc, + bool scanFromEnd, + ScanKey key) +{ + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_rescan); + IncrHeapAccessStat(global_rescan); + + /* Note: set relation level read lock is still set */ + + /* ---------------- + * unpin scan buffers + * ---------------- + */ + unpinsdesc(sdesc); + + /* ---------------- + * reinitialize scan descriptor + * ---------------- + */ + initsdesc(sdesc, sdesc->rs_rd, scanFromEnd, sdesc->rs_nkeys, key); + sdesc->rs_atend = (bool) scanFromEnd; +} + +/* ---------------- + * heap_endscan - end relation scan + * + * See how to integrate with index scans. + * Check handling if reldesc caching. + * ---------------- + */ +void +heap_endscan(HeapScanDesc sdesc) +{ + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_endscan); + IncrHeapAccessStat(global_endscan); + + /* Note: no locking manipulations needed */ + + /* ---------------- + * unpin scan buffers + * ---------------- + */ + unpinsdesc(sdesc); + + /* ---------------- + * decrement relation reference count and free scan descriptor storage + * ---------------- + */ + RelationDecrementReferenceCount(sdesc->rs_rd); + + /* ---------------- + * Non 2-phase read locks on catalog relations + * ---------------- + */ + if ( IsSystemRelationName(RelationGetRelationName(sdesc->rs_rd)->data) ) + + RelationUnsetLockForRead(sdesc->rs_rd); + + pfree(sdesc); /* XXX */ +} + +/* ---------------- + * heap_getnext - retrieve next tuple in scan + * + * Fix to work with index relations. + * ---------------- + */ + +#ifdef HEAPDEBUGALL +#define HEAPDEBUG_1 \ +elog(DEBUG, "heap_getnext([%s,nkeys=%d],backw=%d,0x%x) called", \ + sdesc->rs_rd->rd_rel->relname.data, sdesc->rs_nkeys, backw, b) + +#define HEAPDEBUG_2 \ + elog(DEBUG, "heap_getnext called with backw (no tracing yet)") + +#define HEAPDEBUG_3 \ + elog(DEBUG, "heap_getnext returns NULL at end") + +#define HEAPDEBUG_4 \ + elog(DEBUG, "heap_getnext valid buffer UNPIN'd") + +#define HEAPDEBUG_5 \ + elog(DEBUG, "heap_getnext next tuple was cached") + +#define HEAPDEBUG_6 \ + elog(DEBUG, "heap_getnext returning EOS") + +#define HEAPDEBUG_7 \ + elog(DEBUG, "heap_getnext returning tuple"); +#else +#define HEAPDEBUG_1 +#define HEAPDEBUG_2 +#define HEAPDEBUG_3 +#define HEAPDEBUG_4 +#define HEAPDEBUG_5 +#define HEAPDEBUG_6 +#define HEAPDEBUG_7 +#endif /* !defined(HEAPDEBUGALL) */ + + +HeapTuple +heap_getnext(HeapScanDesc scandesc, + int backw, + Buffer *b) +{ + register HeapScanDesc sdesc = scandesc; + Buffer localb; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_getnext); + IncrHeapAccessStat(global_getnext); + + /* Note: no locking manipulations needed */ + + /* ---------------- + * argument checks + * ---------------- + */ + if (sdesc == NULL) + elog(WARN, "heap_getnext: NULL relscan"); + + /* ---------------- + * initialize return buffer to InvalidBuffer + * ---------------- + */ + if (! PointerIsValid(b)) b = &localb; + (*b) = InvalidBuffer; + + HEAPDEBUG_1; /* heap_getnext( info ) */ + + if (backw) { + /* ---------------- + * handle reverse scan + * ---------------- + */ + HEAPDEBUG_2; /* heap_getnext called with backw */ + + if (sdesc->rs_ptup == sdesc->rs_ctup && + BufferIsInvalid(sdesc->rs_pbuf)) + { + if (BufferIsValid(sdesc->rs_nbuf)) + ReleaseBuffer(sdesc->rs_nbuf); + return (NULL); + } + + /* + * Copy the "current" tuple/buffer + * to "next". Pin/unpin the buffers + * accordingly + */ + if (sdesc->rs_nbuf != sdesc->rs_cbuf) { + if (BufferIsValid(sdesc->rs_nbuf)) + ReleaseBuffer(sdesc->rs_nbuf); + if (BufferIsValid(sdesc->rs_cbuf)) + IncrBufferRefCount(sdesc->rs_cbuf); + } + sdesc->rs_ntup = sdesc->rs_ctup; + sdesc->rs_nbuf = sdesc->rs_cbuf; + + if (sdesc->rs_ptup != NULL) { + if (sdesc->rs_cbuf != sdesc->rs_pbuf) { + if (BufferIsValid(sdesc->rs_cbuf)) + ReleaseBuffer(sdesc->rs_cbuf); + if (BufferIsValid(sdesc->rs_pbuf)) + IncrBufferRefCount(sdesc->rs_pbuf); + } + sdesc->rs_ctup = sdesc->rs_ptup; + sdesc->rs_cbuf = sdesc->rs_pbuf; + } else { /* NONTUP */ + ItemPointer iptr; + + iptr = (sdesc->rs_ctup != NULL) ? + &(sdesc->rs_ctup->t_ctid) : (ItemPointer) NULL; + + /* Don't release sdesc->rs_cbuf at this point, because + heapgettup doesn't increase PrivateRefCount if it + is already set. On a backward scan, both rs_ctup and rs_ntup + usually point to the same buffer page, so + PrivateRefCount[rs_cbuf] should be 2 (or more, if for instance + ctup is stored in a TupleTableSlot). - 01/09/94 */ + + sdesc->rs_ctup = (HeapTuple) + heapgettup(sdesc->rs_rd, + iptr, + -1, + &(sdesc->rs_cbuf), + sdesc->rs_tr, + sdesc->rs_nkeys, + sdesc->rs_key); + } + + if (sdesc->rs_ctup == NULL && !BufferIsValid(sdesc->rs_cbuf)) + { + if (BufferIsValid(sdesc->rs_pbuf)) + ReleaseBuffer(sdesc->rs_pbuf); + sdesc->rs_ptup = NULL; + sdesc->rs_pbuf = InvalidBuffer; + if (BufferIsValid(sdesc->rs_nbuf)) + ReleaseBuffer(sdesc->rs_nbuf); + sdesc->rs_ntup = NULL; + sdesc->rs_nbuf = InvalidBuffer; + return (NULL); + } + + if (BufferIsValid(sdesc->rs_pbuf)) + ReleaseBuffer(sdesc->rs_pbuf); + sdesc->rs_ptup = NULL; + sdesc->rs_pbuf = UnknownBuffer; + + } else { + /* ---------------- + * handle forward scan + * ---------------- + */ + if (sdesc->rs_ctup == sdesc->rs_ntup && + BufferIsInvalid(sdesc->rs_nbuf)) { + if (BufferIsValid(sdesc->rs_pbuf)) + ReleaseBuffer(sdesc->rs_pbuf); + HEAPDEBUG_3; /* heap_getnext returns NULL at end */ + return (NULL); + } + + /* + * Copy the "current" tuple/buffer + * to "previous". Pin/unpin the buffers + * accordingly + */ + if (sdesc->rs_pbuf != sdesc->rs_cbuf) { + if (BufferIsValid(sdesc->rs_pbuf)) + ReleaseBuffer(sdesc->rs_pbuf); + if (BufferIsValid(sdesc->rs_cbuf)) + IncrBufferRefCount(sdesc->rs_cbuf); + } + sdesc->rs_ptup = sdesc->rs_ctup; + sdesc->rs_pbuf = sdesc->rs_cbuf; + + if (sdesc->rs_ntup != NULL) { + if (sdesc->rs_cbuf != sdesc->rs_nbuf) { + if (BufferIsValid(sdesc->rs_cbuf)) + ReleaseBuffer(sdesc->rs_cbuf); + if (BufferIsValid(sdesc->rs_nbuf)) + IncrBufferRefCount(sdesc->rs_nbuf); + } + sdesc->rs_ctup = sdesc->rs_ntup; + sdesc->rs_cbuf = sdesc->rs_nbuf; + HEAPDEBUG_5; /* heap_getnext next tuple was cached */ + } else { /* NONTUP */ + ItemPointer iptr; + + iptr = (sdesc->rs_ctup != NULL) ? + &sdesc->rs_ctup->t_ctid : (ItemPointer) NULL; + + /* Don't release sdesc->rs_cbuf at this point, because + heapgettup doesn't increase PrivateRefCount if it + is already set. On a forward scan, both rs_ctup and rs_ptup + usually point to the same buffer page, so + PrivateRefCount[rs_cbuf] should be 2 (or more, if for instance + ctup is stored in a TupleTableSlot). - 01/09/93 */ + + sdesc->rs_ctup = (HeapTuple) + heapgettup(sdesc->rs_rd, + iptr, + 1, + &sdesc->rs_cbuf, + sdesc->rs_tr, + sdesc->rs_nkeys, + sdesc->rs_key); + } + + if (sdesc->rs_ctup == NULL && !BufferIsValid(sdesc->rs_cbuf)) { + if (BufferIsValid(sdesc->rs_nbuf)) + ReleaseBuffer(sdesc->rs_nbuf); + sdesc->rs_ntup = NULL; + sdesc->rs_nbuf = InvalidBuffer; + if (BufferIsValid(sdesc->rs_pbuf)) + ReleaseBuffer(sdesc->rs_pbuf); + sdesc->rs_ptup = NULL; + sdesc->rs_pbuf = InvalidBuffer; + HEAPDEBUG_6; /* heap_getnext returning EOS */ + return (NULL); + } + + if (BufferIsValid(sdesc->rs_nbuf)) + ReleaseBuffer(sdesc->rs_nbuf); + sdesc->rs_ntup = NULL; + sdesc->rs_nbuf = UnknownBuffer; + } + + /* ---------------- + * if we get here it means we have a new current scan tuple, so + * point to the proper return buffer and return the tuple. + * ---------------- + */ + (*b) = sdesc->rs_cbuf; + + HEAPDEBUG_7; /* heap_getnext returning tuple */ + + return (sdesc->rs_ctup); +} + +/* ---------------- + * heap_fetch - retrive tuple with tid + * + * Currently ignores LP_IVALID during processing! + * ---------------- + */ +HeapTuple +heap_fetch(Relation relation, + TimeQual timeQual, + ItemPointer tid, + Buffer *b) +{ + ItemId lp; + Buffer buffer; + PageHeader dp; + HeapTuple tuple; + OffsetNumber offnum; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_fetch); + IncrHeapAccessStat(global_fetch); + + /* + * Note: This is collosally expensive - does two system calls per + * indexscan tuple fetch. Not good, and since we should be doing + * page level locking by the scanner anyway, it is commented out. + */ + + /* RelationSetLockForTupleRead(relation, tid); */ + + /* ---------------- + * get the buffer from the relation descriptor + * Note that this does a buffer pin. + * ---------------- + */ + + buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); + +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(buffer)) { + elog(WARN, "heap_fetch: %s relation: ReadBuffer(%lx) failed", + &relation->rd_rel->relname, (long)tid); + } +#endif + + /* ---------------- + * get the item line pointer corresponding to the requested tid + * ---------------- + */ + dp = (PageHeader) BufferGetPage(buffer); + offnum = ItemPointerGetOffsetNumber(tid); + lp = PageGetItemId(dp, offnum); + + /* ---------------- + * more sanity checks + * ---------------- + */ + + Assert(ItemIdIsUsed(lp)); + + /* ---------------- + * check time qualification of tid + * ---------------- + */ + + tuple = heap_tuple_satisfies(lp, relation, dp, + timeQual, 0,(ScanKey)NULL); + + if (tuple == NULL) + { + ReleaseBuffer(buffer); + return (NULL); + } + + /* ---------------- + * all checks passed, now either return a copy of the tuple + * or pin the buffer page and return a pointer, depending on + * whether caller gave us a valid b. + * ---------------- + */ + + if (PointerIsValid(b)) { + *b = buffer; + } else { + tuple = heap_copytuple(tuple); + ReleaseBuffer(buffer); + } + return (tuple); +} + +/* ---------------- + * heap_insert - insert tuple + * + * The assignment of t_min (and thus the others) should be + * removed eventually. + * + * Currently places the tuple onto the last page. If there is no room, + * it is placed on new pages. (Heap relations) + * Note that concurrent inserts during a scan will probably have + * unexpected results, though this will be fixed eventually. + * + * Fix to work with indexes. + * ---------------- + */ +Oid +heap_insert(Relation relation, HeapTuple tup) +{ + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_insert); + IncrHeapAccessStat(global_insert); + + /* ---------------- + * set relation level write lock. If this is a "local" relation (not + * visible to others), we don't need to set a write lock. + * ---------------- + */ + if (!relation->rd_islocal) + RelationSetLockForWrite(relation); + + /* ---------------- + * If the object id of this tuple has already been assigned, trust + * the caller. There are a couple of ways this can happen. At initial + * db creation, the backend program sets oids for tuples. When we + * define an index, we set the oid. Finally, in the future, we may + * allow users to set their own object ids in order to support a + * persistent object store (objects need to contain pointers to one + * another). + * ---------------- + */ + if (!OidIsValid(tup->t_oid)) { + tup->t_oid = newoid(); + LastOidProcessed = tup->t_oid; + } + + TransactionIdStore(GetCurrentTransactionId(), &(tup->t_xmin)); + tup->t_cmin = GetCurrentCommandId(); + StoreInvalidTransactionId(&(tup->t_xmax)); + tup->t_tmin = INVALID_ABSTIME; + tup->t_tmax = CURRENT_ABSTIME; + + doinsert(relation, tup); + + if ( IsSystemRelationName(RelationGetRelationName(relation)->data)) { + RelationUnsetLockForWrite(relation); + + /* ---------------- + * invalidate caches (only works for system relations) + * ---------------- + */ + SetRefreshWhenInvalidate(ImmediateInvalidation); + RelationInvalidateHeapTuple(relation, tup); + SetRefreshWhenInvalidate((bool)!ImmediateInvalidation); + } + + return(tup->t_oid); +} + +/* ---------------- + * heap_delete - delete a tuple + * + * Must decide how to handle errors. + * ---------------- + */ +void +heap_delete(Relation relation, ItemPointer tid) +{ + ItemId lp; + HeapTuple tp; + PageHeader dp; + Buffer b; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_delete); + IncrHeapAccessStat(global_delete); + + /* ---------------- + * sanity check + * ---------------- + */ + Assert(ItemPointerIsValid(tid)); + + /* ---------------- + * set relation level write lock + * ---------------- + */ + RelationSetLockForWrite(relation); + + b = ReadBuffer(relation, ItemPointerGetBlockNumber(tid)); + +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(b)) { /* XXX L_SH better ??? */ + elog(WARN, "heap_delete: failed ReadBuffer"); + } +#endif /* NO_BUFFERISVALID */ + + dp = (PageHeader) BufferGetPage(b); + lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(tid)); + + /* ---------------- + * check that we're deleteing a valid item + * ---------------- + */ + if (!(tp = heap_tuple_satisfies(lp, relation, dp, + NowTimeQual, 0, (ScanKey) NULL))) { + + /* XXX call something else */ + ReleaseBuffer(b); + + elog(WARN, "heap_delete: (am)invalid tid"); + } + + /* ---------------- + * get the tuple and lock tell the buffer manager we want + * exclusive access to the page + * ---------------- + */ + + /* ---------------- + * store transaction information of xact deleting the tuple + * ---------------- + */ + TransactionIdStore(GetCurrentTransactionId(), &(tp->t_xmax)); + tp->t_cmax = GetCurrentCommandId(); + ItemPointerSetInvalid(&tp->t_chain); + + /* ---------------- + * invalidate caches + * ---------------- + */ + SetRefreshWhenInvalidate(ImmediateInvalidation); + RelationInvalidateHeapTuple(relation, tp); + SetRefreshWhenInvalidate((bool)!ImmediateInvalidation); + + WriteBuffer(b); + if ( IsSystemRelationName(RelationGetRelationName(relation)->data) ) + RelationUnsetLockForWrite(relation); +} + +/* ---------------- + * heap_replace - replace a tuple + * + * Must decide how to handle errors. + * + * Fix arguments, work with indexes. + * + * 12/30/93 - modified the return value to be 1 when + * a non-functional update is detected. This + * prevents the calling routine from updating + * indices unnecessarily. -kw + * + * ---------------- + */ +int +heap_replace(Relation relation, ItemPointer otid, HeapTuple tup) +{ + ItemId lp; + HeapTuple tp; + Page dp; + Buffer buffer; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_replace); + IncrHeapAccessStat(global_replace); + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(ItemPointerIsValid(otid)); + + /* ---------------- + * set relation level write lock + * ---------------- + */ + if (!relation->rd_islocal) + RelationSetLockForWrite(relation); + + buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid)); +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(buffer)) { + /* XXX L_SH better ??? */ + elog(WARN, "amreplace: failed ReadBuffer"); + } +#endif /* NO_BUFFERISVALID */ + + dp = (Page) BufferGetPage(buffer); + lp = PageGetItemId(dp, ItemPointerGetOffsetNumber(otid)); + + /* ---------------- + * logically delete old item + * ---------------- + */ + + tp = (HeapTuple) PageGetItem(dp, lp); + Assert(HeapTupleIsValid(tp)); + + /* ----------------- + * the following test should be able to catch all non-functional + * update attempts and shut out all ghost tuples. + * XXX In the future, Spyros may need to update the rule lock on a tuple + * more than once within the same command and same transaction. + * He will have to introduce a new flag to override the following check. + * -- Wei + * + * ----------------- + */ + + if (TupleUpdatedByCurXactAndCmd(tp)) { + elog(NOTICE, "Non-functional update, only first update is performed"); + if ( IsSystemRelationName(RelationGetRelationName(relation)->data) ) + RelationUnsetLockForWrite(relation); + ReleaseBuffer(buffer); + return(1); + } + + /* ---------------- + * check that we're replacing a valid item - + * + * NOTE that this check must follow the non-functional update test + * above as it can happen that we try to 'replace' the same tuple + * twice in a single transaction. The second time around the + * tuple will fail the NowTimeQual. We don't want to abort the + * xact, we only want to flag the 'non-functional' NOTICE. -mer + * ---------------- + */ + if (!heap_tuple_satisfies(lp, + relation, + (PageHeader)dp, + NowTimeQual, + 0, + (ScanKey)NULL)) + { + ReleaseBuffer(buffer); + elog(WARN, "heap_replace: (am)invalid otid"); + } + + /* XXX order problems if not atomic assignment ??? */ + tup->t_oid = tp->t_oid; + TransactionIdStore(GetCurrentTransactionId(), &(tup->t_xmin)); + tup->t_cmin = GetCurrentCommandId(); + StoreInvalidTransactionId(&(tup->t_xmax)); + tup->t_tmin = INVALID_ABSTIME; + tup->t_tmax = CURRENT_ABSTIME; + ItemPointerSetInvalid(&tup->t_chain); + + /* ---------------- + * insert new item + * ---------------- + */ + if ((unsigned)DOUBLEALIGN(tup->t_len) <= PageGetFreeSpace((Page) dp)) { + RelationPutHeapTuple(relation, BufferGetBlockNumber(buffer), tup); + } else { + /* ---------------- + * new item won't fit on same page as old item, have to look + * for a new place to put it. + * ---------------- + */ + doinsert(relation, tup); + } + + /* ---------------- + * new item in place, now record transaction information + * ---------------- + */ + TransactionIdStore(GetCurrentTransactionId(), &(tp->t_xmax)); + tp->t_cmax = GetCurrentCommandId(); + tp->t_chain = tup->t_ctid; + + /* ---------------- + * invalidate caches + * ---------------- + */ + SetRefreshWhenInvalidate(ImmediateInvalidation); + RelationInvalidateHeapTuple(relation, tp); + SetRefreshWhenInvalidate((bool)!ImmediateInvalidation); + + WriteBuffer(buffer); + + if ( IsSystemRelationName(RelationGetRelationName(relation)->data) ) + RelationUnsetLockForWrite(relation); + + return(0); +} + +/* ---------------- + * heap_markpos - mark scan position + * + * Note: + * Should only one mark be maintained per scan at one time. + * Check if this can be done generally--say calls to get the + * next/previous tuple and NEVER pass struct scandesc to the + * user AM's. Now, the mark is sent to the executor for safekeeping. + * Probably can store this info into a GENERAL scan structure. + * + * May be best to change this call to store the marked position + * (up to 2?) in the scan structure itself. + * Fix to use the proper caching structure. + * ---------------- + */ +void +heap_markpos(HeapScanDesc sdesc) +{ + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_markpos); + IncrHeapAccessStat(global_markpos); + + /* Note: no locking manipulations needed */ + + if (sdesc->rs_ptup == NULL && + BufferIsUnknown(sdesc->rs_pbuf)) { /* == NONTUP */ + sdesc->rs_ptup = (HeapTuple) + heapgettup(sdesc->rs_rd, + (sdesc->rs_ctup == NULL) ? + (ItemPointer)NULL : &sdesc->rs_ctup->t_ctid, + -1, + &sdesc->rs_pbuf, + sdesc->rs_tr, + sdesc->rs_nkeys, + sdesc->rs_key); + + } else if (sdesc->rs_ntup == NULL && + BufferIsUnknown(sdesc->rs_nbuf)) { /* == NONTUP */ + sdesc->rs_ntup = (HeapTuple) + heapgettup(sdesc->rs_rd, + (sdesc->rs_ctup == NULL) ? + (ItemPointer)NULL : &sdesc->rs_ctup->t_ctid, + 1, + &sdesc->rs_nbuf, + sdesc->rs_tr, + sdesc->rs_nkeys, + sdesc->rs_key); + } + + /* ---------------- + * Should not unpin the buffer pages. They may still be in use. + * ---------------- + */ + if (sdesc->rs_ptup != NULL) { + sdesc->rs_mptid = sdesc->rs_ptup->t_ctid; + } else { + ItemPointerSetInvalid(&sdesc->rs_mptid); + } + if (sdesc->rs_ctup != NULL) { + sdesc->rs_mctid = sdesc->rs_ctup->t_ctid; + } else { + ItemPointerSetInvalid(&sdesc->rs_mctid); + } + if (sdesc->rs_ntup != NULL) { + sdesc->rs_mntid = sdesc->rs_ntup->t_ctid; + } else { + ItemPointerSetInvalid(&sdesc->rs_mntid); + } +} + +/* ---------------- + * heap_restrpos - restore position to marked location + * + * Note: there are bad side effects here. If we were past the end + * of a relation when heapmarkpos is called, then if the relation is + * extended via insert, then the next call to heaprestrpos will set + * cause the added tuples to be visible when the scan continues. + * Problems also arise if the TID's are rearranged!!! + * + * Now pins buffer once for each valid tuple pointer (rs_ptup, + * rs_ctup, rs_ntup) referencing it. + * - 01/13/94 + * + * XXX might be better to do direct access instead of + * using the generality of heapgettup(). + * + * XXX It is very possible that when a scan is restored, that a tuple + * XXX which previously qualified may fail for time range purposes, unless + * XXX some form of locking exists (ie., portals currently can act funny. + * ---------------- + */ +void +heap_restrpos(HeapScanDesc sdesc) +{ + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_restrpos); + IncrHeapAccessStat(global_restrpos); + + /* XXX no amrestrpos checking that ammarkpos called */ + + /* Note: no locking manipulations needed */ + + unpinsdesc(sdesc); + + /* force heapgettup to pin buffer for each loaded tuple */ + sdesc->rs_pbuf = InvalidBuffer; + sdesc->rs_cbuf = InvalidBuffer; + sdesc->rs_nbuf = InvalidBuffer; + + if (!ItemPointerIsValid(&sdesc->rs_mptid)) { + sdesc->rs_ptup = NULL; + } else { + sdesc->rs_ptup = (HeapTuple) + heapgettup(sdesc->rs_rd, + &sdesc->rs_mptid, + 0, + &sdesc->rs_pbuf, + NowTimeQual, + 0, + (ScanKey) NULL); + } + + if (!ItemPointerIsValid(&sdesc->rs_mctid)) { + sdesc->rs_ctup = NULL; + } else { + sdesc->rs_ctup = (HeapTuple) + heapgettup(sdesc->rs_rd, + &sdesc->rs_mctid, + 0, + &sdesc->rs_cbuf, + NowTimeQual, + 0, + (ScanKey) NULL); + } + + if (!ItemPointerIsValid(&sdesc->rs_mntid)) { + sdesc->rs_ntup = NULL; + } else { + sdesc->rs_ntup = (HeapTuple) + heapgettup(sdesc->rs_rd, + &sdesc->rs_mntid, + 0, + &sdesc->rs_nbuf, + NowTimeQual, + 0, + (ScanKey) NULL); + } +} diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c new file mode 100644 index 00000000000..457e1174a30 --- /dev/null +++ b/src/backend/access/heap/hio.c @@ -0,0 +1,195 @@ +/*------------------------------------------------------------------------- + * + * hio.c-- + * POSTGRES heap access method input/output code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Id: hio.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "c.h" + +#include "access/heapam.h" +#include "access/hio.h" +#include "access/htup.h" + +#include "storage/block.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/bufpage.h" +#include "storage/itemid.h" +#include "storage/itemptr.h" +#include "storage/off.h" + +#include "utils/memutils.h" +#include "utils/elog.h" +#include "utils/rel.h" + +/* + * amputunique - place tuple at tid + * Currently on errors, calls elog. Perhaps should return -1? + * Possible errors include the addition of a tuple to the page + * between the time the linep is chosen and the page is L_UP'd. + * + * This should be coordinated with the B-tree code. + * Probably needs to have an amdelunique to allow for + * internal index records to be deleted and reordered as needed. + * For the heap AM, this should never be needed. + */ +void +RelationPutHeapTuple(Relation relation, + BlockNumber blockIndex, + HeapTuple tuple) +{ + Buffer buffer; + Page pageHeader; + BlockNumber numberOfBlocks; + OffsetNumber offnum; + unsigned int len; + ItemId itemId; + Item item; + + /* ---------------- + * increment access statistics + * ---------------- + */ + IncrHeapAccessStat(local_RelationPutHeapTuple); + IncrHeapAccessStat(global_RelationPutHeapTuple); + + Assert(RelationIsValid(relation)); + Assert(HeapTupleIsValid(tuple)); + + numberOfBlocks = RelationGetNumberOfBlocks(relation); + Assert(blockIndex < numberOfBlocks); + + buffer = ReadBuffer(relation, blockIndex); +#ifndef NO_BUFFERISVALID + if (!BufferIsValid(buffer)) { + elog(WARN, "RelationPutHeapTuple: no buffer for %ld in %s", + blockIndex, &relation->rd_rel->relname); + } +#endif + + pageHeader = (Page)BufferGetPage(buffer); + len = (unsigned)DOUBLEALIGN(tuple->t_len); /* be conservative */ + Assert((int)len <= PageGetFreeSpace(pageHeader)); + + offnum = PageAddItem((Page)pageHeader, (Item)tuple, + tuple->t_len, InvalidOffsetNumber, LP_USED); + + itemId = PageGetItemId((Page)pageHeader, offnum); + item = PageGetItem((Page)pageHeader, itemId); + + ItemPointerSet(&((HeapTuple)item)->t_ctid, blockIndex, offnum); + + WriteBuffer(buffer); + /* return an accurate tuple */ + ItemPointerSet(&tuple->t_ctid, blockIndex, offnum); +} + +/* + * The heap_insert routines "know" that a buffer page is initialized to + * zero when a BlockExtend operation is performed. + */ + +#define PageIsNew(page) ((page)->pd_upper == 0) + +/* + * This routine is another in the series of attempts to reduce the number + * of I/O's and system calls executed in the various benchmarks. In + * particular, this routine is used to append data to the end of a relation + * file without excessive lseeks. This code should do no more than 2 semops + * in the ideal case. + * + * Eventually, we should cache the number of blocks in a relation somewhere. + * Until that time, this code will have to do an lseek to determine the number + * of blocks in a relation. + * + * This code should ideally do at most 4 semops, 1 lseek, and possibly 1 write + * to do an append; it's possible to eliminate 2 of the semops if we do direct + * buffer stuff (!); the lseek and the write can go if we get + * RelationGetNumberOfBlocks to be useful. + * + * NOTE: This code presumes that we have a write lock on the relation. + * + * Also note that this routine probably shouldn't have to exist, and does + * screw up the call graph rather badly, but we are wasting so much time and + * system resources being massively general that we are losing badly in our + * performance benchmarks. + */ +void +RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple) +{ + Buffer buffer; + Page pageHeader; + BlockNumber lastblock; + OffsetNumber offnum; + unsigned int len; + ItemId itemId; + Item item; + + Assert(RelationIsValid(relation)); + Assert(HeapTupleIsValid(tuple)); + + /* + * XXX This does an lseek - VERY expensive - but at the moment it + * is the only way to accurately determine how many blocks are in + * a relation. A good optimization would be to get this to actually + * work properly. + */ + + lastblock = RelationGetNumberOfBlocks(relation); + + if (lastblock == 0) + { + buffer = ReadBuffer(relation, lastblock); + pageHeader = (Page)BufferGetPage(buffer); + if (PageIsNew((PageHeader) pageHeader)) + { + buffer = ReleaseAndReadBuffer(buffer, relation, P_NEW); + pageHeader = (Page)BufferGetPage(buffer); + PageInit(pageHeader, BufferGetPageSize(buffer), 0); + } + } + else + buffer = ReadBuffer(relation, lastblock - 1); + + pageHeader = (Page)BufferGetPage(buffer); + len = (unsigned)DOUBLEALIGN(tuple->t_len); /* be conservative */ + + /* + * Note that this is true if the above returned a bogus page, which + * it will do for a completely empty relation. + */ + + if (len > PageGetFreeSpace(pageHeader)) + { + buffer = ReleaseAndReadBuffer(buffer, relation, P_NEW); + pageHeader = (Page)BufferGetPage(buffer); + PageInit(pageHeader, BufferGetPageSize(buffer), 0); + + if (len > PageGetFreeSpace(pageHeader)) + elog(WARN, "Tuple is too big: size %d", len); + } + + offnum = PageAddItem((Page)pageHeader, (Item)tuple, + tuple->t_len, InvalidOffsetNumber, LP_USED); + + itemId = PageGetItemId((Page)pageHeader, offnum); + item = PageGetItem((Page)pageHeader, itemId); + + lastblock = BufferGetBlockNumber(buffer); + + ItemPointerSet(&((HeapTuple)item)->t_ctid, lastblock, offnum); + + /* return an accurate tuple */ + ItemPointerSet(&tuple->t_ctid, lastblock, offnum); + + WriteBuffer(buffer); +} diff --git a/src/backend/access/heap/stats.c b/src/backend/access/heap/stats.c new file mode 100644 index 00000000000..d41d01ac1ba --- /dev/null +++ b/src/backend/access/heap/stats.c @@ -0,0 +1,329 @@ +/*------------------------------------------------------------------------- + * + * stats.c-- + * heap access method debugging statistic collection routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/heap/Attic/stats.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + * NOTES + * initam should be moved someplace else. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/heapam.h" + +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "utils/elog.h" +#include "utils/mcxt.h" + +/* ---------------- + * InitHeapAccessStatistics + * ---------------- + */ +HeapAccessStatistics heap_access_stats = (HeapAccessStatistics) NULL; + +void +InitHeapAccessStatistics() +{ + MemoryContext oldContext; + HeapAccessStatistics stats; + + /* ---------------- + * make sure we don't initialize things twice + * ---------------- + */ + if (heap_access_stats != NULL) + return; + + /* ---------------- + * allocate statistics structure from the top memory context + * ---------------- + */ + oldContext = MemoryContextSwitchTo(TopMemoryContext); + + stats = (HeapAccessStatistics) + palloc(sizeof(HeapAccessStatisticsData)); + + /* ---------------- + * initialize fields to default values + * ---------------- + */ + stats->global_open = 0; + stats->global_openr = 0; + stats->global_close = 0; + stats->global_beginscan = 0; + stats->global_rescan = 0; + stats->global_endscan = 0; + stats->global_getnext = 0; + stats->global_fetch = 0; + stats->global_insert = 0; + stats->global_delete = 0; + stats->global_replace = 0; + stats->global_markpos = 0; + stats->global_restrpos = 0; + stats->global_BufferGetRelation = 0; + stats->global_RelationIdGetRelation = 0; + stats->global_RelationIdGetRelation_Buf = 0; + stats->global_getreldesc = 0; + stats->global_heapgettup = 0; + stats->global_RelationPutHeapTuple = 0; + stats->global_RelationPutLongHeapTuple = 0; + + stats->local_open = 0; + stats->local_openr = 0; + stats->local_close = 0; + stats->local_beginscan = 0; + stats->local_rescan = 0; + stats->local_endscan = 0; + stats->local_getnext = 0; + stats->local_fetch = 0; + stats->local_insert = 0; + stats->local_delete = 0; + stats->local_replace = 0; + stats->local_markpos = 0; + stats->local_restrpos = 0; + stats->local_BufferGetRelation = 0; + stats->local_RelationIdGetRelation = 0; + stats->local_RelationIdGetRelation_Buf = 0; + stats->local_getreldesc = 0; + stats->local_heapgettup = 0; + stats->local_RelationPutHeapTuple = 0; + stats->local_RelationPutLongHeapTuple = 0; + stats->local_RelationNameGetRelation = 0; + stats->global_RelationNameGetRelation = 0; + + /* ---------------- + * record init times + * ---------------- + */ + time(&stats->init_global_timestamp); + time(&stats->local_reset_timestamp); + time(&stats->last_request_timestamp); + + /* ---------------- + * return to old memory context + * ---------------- + */ + (void) MemoryContextSwitchTo(oldContext); + + heap_access_stats = stats; +} + +/* ---------------- + * ResetHeapAccessStatistics + * ---------------- + */ +void +ResetHeapAccessStatistics() +{ + HeapAccessStatistics stats; + + /* ---------------- + * do nothing if stats aren't initialized + * ---------------- + */ + if (heap_access_stats == NULL) + return; + + stats = heap_access_stats; + + /* ---------------- + * reset local counts + * ---------------- + */ + stats->local_open = 0; + stats->local_openr = 0; + stats->local_close = 0; + stats->local_beginscan = 0; + stats->local_rescan = 0; + stats->local_endscan = 0; + stats->local_getnext = 0; + stats->local_fetch = 0; + stats->local_insert = 0; + stats->local_delete = 0; + stats->local_replace = 0; + stats->local_markpos = 0; + stats->local_restrpos = 0; + stats->local_BufferGetRelation = 0; + stats->local_RelationIdGetRelation = 0; + stats->local_RelationIdGetRelation_Buf = 0; + stats->local_getreldesc = 0; + stats->local_heapgettup = 0; + stats->local_RelationPutHeapTuple = 0; + stats->local_RelationPutLongHeapTuple = 0; + + /* ---------------- + * reset local timestamps + * ---------------- + */ + time(&stats->local_reset_timestamp); + time(&stats->last_request_timestamp); +} + +/* ---------------- + * GetHeapAccessStatistics + * ---------------- + */ +HeapAccessStatistics GetHeapAccessStatistics() +{ + HeapAccessStatistics stats; + + /* ---------------- + * return nothing if stats aren't initialized + * ---------------- + */ + if (heap_access_stats == NULL) + return NULL; + + /* ---------------- + * record the current request time + * ---------------- + */ + time(&heap_access_stats->last_request_timestamp); + + /* ---------------- + * allocate a copy of the stats and return it to the caller. + * ---------------- + */ + stats = (HeapAccessStatistics) + palloc(sizeof(HeapAccessStatisticsData)); + + memmove(stats, + heap_access_stats, + sizeof(HeapAccessStatisticsData)); + + return stats; +} + +/* ---------------- + * PrintHeapAccessStatistics + * ---------------- + */ +void +PrintHeapAccessStatistics(HeapAccessStatistics stats) +{ + /* ---------------- + * return nothing if stats aren't valid + * ---------------- + */ + if (stats == NULL) + return; + + printf("======== heap am statistics ========\n"); + printf("init_global_timestamp: %s", + ctime(&(stats->init_global_timestamp))); + + printf("local_reset_timestamp: %s", + ctime(&(stats->local_reset_timestamp))); + + printf("last_request_timestamp: %s", + ctime(&(stats->last_request_timestamp))); + + printf("local/global_open: %6d/%6d\n", + stats->local_open, stats->global_open); + + printf("local/global_openr: %6d/%6d\n", + stats->local_openr, stats->global_openr); + + printf("local/global_close: %6d/%6d\n", + stats->local_close, stats->global_close); + + printf("local/global_beginscan: %6d/%6d\n", + stats->local_beginscan, stats->global_beginscan); + + printf("local/global_rescan: %6d/%6d\n", + stats->local_rescan, stats->global_rescan); + + printf("local/global_endscan: %6d/%6d\n", + stats->local_endscan, stats->global_endscan); + + printf("local/global_getnext: %6d/%6d\n", + stats->local_getnext, stats->global_getnext); + + printf("local/global_fetch: %6d/%6d\n", + stats->local_fetch, stats->global_fetch); + + printf("local/global_insert: %6d/%6d\n", + stats->local_insert, stats->global_insert); + + printf("local/global_delete: %6d/%6d\n", + stats->local_delete, stats->global_delete); + + printf("local/global_replace: %6d/%6d\n", + stats->local_replace, stats->global_replace); + + printf("local/global_markpos: %6d/%6d\n", + stats->local_markpos, stats->global_markpos); + + printf("local/global_restrpos: %6d/%6d\n", + stats->local_restrpos, stats->global_restrpos); + + printf("================\n"); + + printf("local/global_BufferGetRelation: %6d/%6d\n", + stats->local_BufferGetRelation, + stats->global_BufferGetRelation); + + printf("local/global_RelationIdGetRelation: %6d/%6d\n", + stats->local_RelationIdGetRelation, + stats->global_RelationIdGetRelation); + + printf("local/global_RelationIdGetRelation_Buf: %6d/%6d\n", + stats->local_RelationIdGetRelation_Buf, + stats->global_RelationIdGetRelation_Buf); + + printf("local/global_getreldesc: %6d/%6d\n", + stats->local_getreldesc, stats->global_getreldesc); + + printf("local/global_heapgettup: %6d/%6d\n", + stats->local_heapgettup, stats->global_heapgettup); + + printf("local/global_RelationPutHeapTuple: %6d/%6d\n", + stats->local_RelationPutHeapTuple, + stats->global_RelationPutHeapTuple); + + printf("local/global_RelationPutLongHeapTuple: %6d/%6d\n", + stats->local_RelationPutLongHeapTuple, + stats->global_RelationPutLongHeapTuple); + + printf("===================================\n"); + + printf("\n"); +} + +/* ---------------- + * PrintAndFreeHeapAccessStatistics + * ---------------- + */ +void +PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats) +{ + PrintHeapAccessStatistics(stats); + if (stats != NULL) + pfree(stats); +} + +/* ---------------------------------------------------------------- + * access method initialization + * ---------------------------------------------------------------- + */ +/* ---------------- + * initam should someday be moved someplace else. + * ---------------- + */ +void +initam() +{ + /* ---------------- + * initialize heap statistics. + * ---------------- + */ + InitHeapAccessStatistics(); +} diff --git a/src/backend/access/heapam.h b/src/backend/access/heapam.h new file mode 100644 index 00000000000..9938dbeea77 --- /dev/null +++ b/src/backend/access/heapam.h @@ -0,0 +1,149 @@ +/*------------------------------------------------------------------------- + * + * heapam.h-- + * POSTGRES heap access method definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: heapam.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HEAPAM_H +#define HEAPAM_H + +#include + +#include "postgres.h" + +#include "access/attnum.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/tqual.h" +#include "access/tupdesc.h" +#include "storage/smgr.h" +#include "utils/rel.h" + +/* ---------------------------------------------------------------- + * heap access method statistics + * ---------------------------------------------------------------- + */ + +typedef struct HeapAccessStatisticsData { + time_t init_global_timestamp; /* time global statistics started */ + time_t local_reset_timestamp; /* last time local reset was done */ + time_t last_request_timestamp; /* last time stats were requested */ + + int global_open; + int global_openr; + int global_close; + int global_beginscan; + int global_rescan; + int global_endscan; + int global_getnext; + int global_fetch; + int global_insert; + int global_delete; + int global_replace; + int global_markpos; + int global_restrpos; + int global_BufferGetRelation; + int global_RelationIdGetRelation; + int global_RelationIdGetRelation_Buf; + int global_RelationNameGetRelation; + int global_getreldesc; + int global_heapgettup; + int global_RelationPutHeapTuple; + int global_RelationPutLongHeapTuple; + + int local_open; + int local_openr; + int local_close; + int local_beginscan; + int local_rescan; + int local_endscan; + int local_getnext; + int local_fetch; + int local_insert; + int local_delete; + int local_replace; + int local_markpos; + int local_restrpos; + int local_BufferGetRelation; + int local_RelationIdGetRelation; + int local_RelationIdGetRelation_Buf; + int local_RelationNameGetRelation; + int local_getreldesc; + int local_heapgettup; + int local_RelationPutHeapTuple; + int local_RelationPutLongHeapTuple; +} HeapAccessStatisticsData; + +typedef HeapAccessStatisticsData *HeapAccessStatistics; + +#define IncrHeapAccessStat(x) \ + (heap_access_stats == NULL ? 0 : (heap_access_stats->x)++) + +extern HeapAccessStatistics heap_access_stats; /* in stats.c */ + +/* ---------------- + * function prototypes for heap access method + * ---------------- + */ +/* heap_create, heap_creatr, and heap_destroy are declared in catalog/heap.h */ +#include "catalog/heap.h" + +/* heapam.c */ +extern void doinsert(Relation relation, HeapTuple tup); +extern void SetHeapAccessMethodImmediateInvalidation(bool on); + +extern Relation heap_open(Oid relationId); +extern Relation heap_openr(char *relationName); +extern void heap_close(Relation relation); +extern HeapScanDesc heap_beginscan(Relation relation, int atend, + TimeQual timeQual, unsigned nkeys, ScanKey key); +extern void heap_rescan(HeapScanDesc sdesc, bool scanFromEnd, ScanKey key); +extern void heap_endscan(HeapScanDesc sdesc); +extern HeapTuple heap_getnext(HeapScanDesc scandesc, int backw, Buffer *b); +extern HeapTuple heap_fetch(Relation relation, TimeQual timeQual, + ItemPointer tid, Buffer *b); +extern Oid heap_insert(Relation relation, HeapTuple tup); +extern void heap_delete(Relation relation, ItemPointer tid); +extern int heap_replace(Relation relation, ItemPointer otid, + HeapTuple tup); +extern void heap_markpos(HeapScanDesc sdesc); +extern void heap_restrpos(HeapScanDesc sdesc); + +/* in common/heaptuple.c */ +extern Size ComputeDataSize(TupleDesc tupleDesc, Datum value[], char nulls[]); +extern void DataFill(char *data, TupleDesc tupleDesc, + Datum value[], char nulls[], char *infomask, + bits8 bit[]); +extern int heap_attisnull(HeapTuple tup, int attnum); +extern int heap_sysattrlen(AttrNumber attno); +extern bool heap_sysattrbyval(AttrNumber attno); +extern char *heap_getsysattr(HeapTuple tup, Buffer b, int attnum); +extern char *fastgetattr(HeapTuple tup, unsigned attnum, + TupleDesc att, bool *isnull); +extern char *heap_getattr(HeapTuple tup, Buffer b, int attnum, + TupleDesc att, bool *isnull); +extern HeapTuple heap_copytuple(HeapTuple tuple); +extern void heap_deformtuple(HeapTuple tuple, TupleDesc tdesc, + Datum values[], char nulls[]); +extern HeapTuple heap_formtuple(TupleDesc tupleDescriptor, + Datum value[], char nulls[]); +extern HeapTuple heap_modifytuple(HeapTuple tuple, Buffer buffer, + Relation relation, Datum replValue[], char replNull[], char repl[]); +HeapTuple heap_addheader(uint32 natts, int structlen, char *structure); + +/* in common/heap/stats.c */ +extern void InitHeapAccessStatistics(void); +extern void ResetHeapAccessStatistics(void); +extern HeapAccessStatistics GetHeapAccessStatistics(void); +extern void PrintHeapAccessStatistics(HeapAccessStatistics stats); +extern void PrintAndFreeHeapAccessStatistics(HeapAccessStatistics stats); +extern void initam(void); + +#endif /* HEAPAM_H */ diff --git a/src/backend/access/hio.h b/src/backend/access/hio.h new file mode 100644 index 00000000000..4a699ffcd98 --- /dev/null +++ b/src/backend/access/hio.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * hio.h-- + * POSTGRES heap access method input/output definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: hio.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HIO_H +#define HIO_H + +#include "c.h" + +#include "storage/block.h" +#include "access/htup.h" +#include "utils/rel.h" + +extern void RelationPutHeapTuple(Relation relation, BlockNumber blockIndex, + HeapTuple tuple); +extern void RelationPutHeapTupleAtEnd(Relation relation, HeapTuple tuple); + +#endif /* HIO_H */ diff --git a/src/backend/access/htup.h b/src/backend/access/htup.h new file mode 100644 index 00000000000..7cf1ecf1762 --- /dev/null +++ b/src/backend/access/htup.h @@ -0,0 +1,115 @@ +/*------------------------------------------------------------------------- + * + * htup.h-- + * POSTGRES heap tuple definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: htup.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HTUP_H +#define HTUP_H + +#include "access/attnum.h" +#include "storage/bufpage.h" /* just to reduce levels of #include */ +#include "storage/itemptr.h" +#include "utils/nabstime.h" + +#define MinHeapTupleBitmapSize 32 /* 8 * 4 */ + +/* check these, they are likely to be more severely limited by t_hoff */ + +#define MaxHeapAttributeNumber 1600 /* 8 * 200 */ + +/* + * to avoid wasting space, the attributes should be layed out in such a + * way to reduce structure padding. + */ +typedef struct HeapTupleData { + + unsigned int t_len; /* length of entire tuple */ + + ItemPointerData t_ctid; /* current TID of this tuple */ + + ItemPointerData t_chain; /* replaced tuple TID */ + + Oid t_oid; /* OID of this tuple -- 4 bytes */ + + CommandId t_cmin; /* insert CID stamp -- 2 bytes each */ + CommandId t_cmax; /* delete CommandId stamp */ + + TransactionId t_xmin; /* insert XID stamp -- 4 bytes each */ + TransactionId t_xmax; /* delete XID stamp */ + + AbsoluteTime t_tmin; /* time stamps -- 4 bytes each */ + AbsoluteTime t_tmax; + + int16 t_natts; /* number of attributes */ + char t_vtype; /* not used - padding */ + + char t_infomask; /* whether tuple as null or variable + * length attributes + */ + + uint8 t_hoff; /* sizeof tuple header */ + + bits8 t_bits[MinHeapTupleBitmapSize / 8]; + /* bit map of domains */ + + /* MORE DATA FOLLOWS AT END OF STRUCT */ +} HeapTupleData; + +typedef HeapTupleData *HeapTuple; + + +#define SelfItemPointerAttributeNumber (-1) +#define ObjectIdAttributeNumber (-2) +#define MinTransactionIdAttributeNumber (-3) +#define MinCommandIdAttributeNumber (-4) +#define MaxTransactionIdAttributeNumber (-5) +#define MaxCommandIdAttributeNumber (-6) +#define ChainItemPointerAttributeNumber (-7) +#define AnchorItemPointerAttributeNumber (-8) +#define MinAbsoluteTimeAttributeNumber (-9) +#define MaxAbsoluteTimeAttributeNumber (-10) +#define VersionTypeAttributeNumber (-11) +#define FirstLowInvalidHeapAttributeNumber (-12) + + +/* ---------------- + * support macros + * ---------------- + */ +#define GETSTRUCT(TUP) (((char *)(TUP)) + ((HeapTuple)(TUP))->t_hoff) + + +/* + * BITMAPLEN(NATTS) - + * Computes minimum size of bitmap given number of domains. + */ +#define BITMAPLEN(NATTS) \ + ((((((int)(NATTS) - 1) >> 3) + 4 - (MinHeapTupleBitmapSize >> 3)) \ + & ~03) + (MinHeapTupleBitmapSize >> 3)) + +/* + * HeapTupleIsValid + * True iff the heap tuple is valid. + */ +#define HeapTupleIsValid(tuple) PointerIsValid(tuple) + +/* + * information stored in t_infomask: + */ +#define HEAP_HASNULL 0x01 /* has null attribute(s) */ +#define HEAP_HASVARLENA 0x02 /* has variable length attribute(s) */ + +#define HeapTupleNoNulls(tuple) \ + (!(((HeapTuple) (tuple))->t_infomask & HEAP_HASNULL)) + +#define HeapTupleAllFixed(tuple) \ + (!(((HeapTuple) (tuple))->t_infomask & HEAP_HASVARLENA)) + +#endif /* HTUP_H */ diff --git a/src/backend/access/ibit.h b/src/backend/access/ibit.h new file mode 100644 index 00000000000..990c23ab4dd --- /dev/null +++ b/src/backend/access/ibit.h @@ -0,0 +1,34 @@ +/*------------------------------------------------------------------------- + * + * ibit.h-- + * POSTGRES index valid attribute bit map definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: ibit.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef IBIT_H +#define IBIT_H + +#include "c.h" +#include "utils/memutils.h" + +typedef struct IndexAttributeBitMapData { + char bits[(MaxIndexAttributeNumber + MaxBitsPerByte - 1) + / MaxBitsPerByte]; +} IndexAttributeBitMapData; + +typedef IndexAttributeBitMapData *IndexAttributeBitMap; + +#define IndexAttributeBitMapSize sizeof(IndexAttributeBitMapData) + +/* + * IndexAttributeBitMapIsValid -- + * True iff attribute bit map is valid. + */ +#define IndexAttributeBitMapIsValid(bits) PointerIsValid(bits) + +#endif /* IBIT_H */ diff --git a/src/backend/access/index/Makefile.inc b/src/backend/access/index/Makefile.inc new file mode 100644 index 00000000000..0bc58830c8f --- /dev/null +++ b/src/backend/access/index/Makefile.inc @@ -0,0 +1,14 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/index +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/index/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= genam.c indexam.c istrat.c diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c new file mode 100644 index 00000000000..3d02ba57009 --- /dev/null +++ b/src/backend/access/index/genam.c @@ -0,0 +1,275 @@ +/*------------------------------------------------------------------------- + * + * genam.c-- + * general index access method routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + * NOTES + * many of the old access method routines have been turned into + * macros and moved to genam.h -cim 4/30/91 + * + *------------------------------------------------------------------------- + */ +/* + * OLD COMMENTS + * Scans are implemented as follows: + * + * `0' represents an invalid item pointer. + * `-' represents an unknown item pointer. + * `X' represents a known item pointers. + * `+' represents known or invalid item pointers. + * `*' represents any item pointers. + * + * State is represented by a triple of these symbols in the order of + * previous, current, next. Note that the case of reverse scans works + * identically. + * + * State Result + * (1) + + - + 0 0 (if the next item pointer is invalid) + * (2) + X - (otherwise) + * (3) * 0 0 * 0 0 (no change) + * (4) + X 0 X 0 0 (shift) + * (5) * + X + X - (shift, add unknown) + * + * All other states cannot occur. + * + * Note: + *It would be possible to cache the status of the previous and + * next item pointer using the flags. + * ---------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/attnum.h" +#include "access/genam.h" +#include "access/heapam.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/sdir.h" +#include "access/skey.h" + +#include "storage/bufmgr.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "catalog/catname.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" + +#include "catalog/index.h" + +/* ---------------------------------------------------------------- + * general access method routines + * + * All indexed access methods use an identical scan structure. + * We don't know how the various AMs do locking, however, so we don't + * do anything about that here. + * + * The intent is that an AM implementor will define a front-end routine + * that calls this one, to fill in the scan, and then does whatever kind + * of locking he wants. + * ---------------------------------------------------------------- + */ + +/* ---------------- + * RelationGetIndexScan -- Create and fill an IndexScanDesc. + * + * This routine creates an index scan structure and sets its contents + * up correctly. This routine calls AMrescan to set up the scan with + * the passed key. + * + * Parameters: + * relation -- index relation for scan. + * scanFromEnd -- if true, begin scan at one of the index's + * endpoints. + * numberOfKeys -- count of scan keys (more than one won't + * necessarily do anything useful, yet). + * key -- the ScanKey for the starting position of the scan. + * + * Returns: + * An initialized IndexScanDesc. + * + * Side Effects: + * Bumps the ref count on the relation to keep it in the cache. + * + * ---------------- + */ +IndexScanDesc +RelationGetIndexScan(Relation relation, + bool scanFromEnd, + uint16 numberOfKeys, + ScanKey key) +{ + IndexScanDesc scan; + + if (! RelationIsValid(relation)) + elog(WARN, "RelationGetIndexScan: relation invalid"); + + scan = (IndexScanDesc) palloc(sizeof(IndexScanDescData)); + + scan->relation = relation; + scan->opaque = NULL; + scan->numberOfKeys = numberOfKeys; + + ItemPointerSetInvalid(&scan->previousItemData); + ItemPointerSetInvalid(&scan->currentItemData); + ItemPointerSetInvalid(&scan->nextItemData); + ItemPointerSetInvalid(&scan->previousMarkData); + ItemPointerSetInvalid(&scan->currentMarkData); + ItemPointerSetInvalid(&scan->nextMarkData); + + if (numberOfKeys > 0) { + scan->keyData = (ScanKey) palloc(sizeof(ScanKeyData) * numberOfKeys); + } else { + scan->keyData = NULL; + } + + index_rescan(scan, scanFromEnd, key); + + return (scan); +} + +/* ---------------- + * IndexScanRestart -- Restart an index scan. + * + * This routine isn't used by any existing access method. It's + * appropriate if relation level locks are what you want. + * + * Returns: + * None. + * + * Side Effects: + * None. + * ---------------- + */ +void +IndexScanRestart(IndexScanDesc scan, + bool scanFromEnd, + ScanKey key) +{ + if (! IndexScanIsValid(scan)) + elog(WARN, "IndexScanRestart: invalid scan"); + + ItemPointerSetInvalid(&scan->previousItemData); + ItemPointerSetInvalid(&scan->currentItemData); + ItemPointerSetInvalid(&scan->nextItemData); + + if (RelationGetNumberOfBlocks(scan->relation) == 0) + scan->flags = ScanUnmarked; + else if (scanFromEnd) + scan->flags = ScanUnmarked | ScanUncheckedPrevious; + else + scan->flags = ScanUnmarked | ScanUncheckedNext; + + scan->scanFromEnd = (bool) scanFromEnd; + + if (scan->numberOfKeys > 0) + memmove(scan->keyData, + key, + scan->numberOfKeys * sizeof(ScanKeyData)); +} + +/* ---------------- + * IndexScanEnd -- End and index scan. + * + * This routine is not used by any existing access method, but is + * suitable for use if you don't want to do sophisticated locking. + * + * Returns: + * None. + * + * Side Effects: + * None. + * ---------------- + */ +void +IndexScanEnd(IndexScanDesc scan) +{ + if (! IndexScanIsValid(scan)) + elog(WARN, "IndexScanEnd: invalid scan"); + + pfree(scan); +} + +/* ---------------- + * IndexScanMarkPosition -- Mark current position in a scan. + * + * This routine isn't used by any existing access method, but is the + * one that AM implementors should use, if they don't want to do any + * special locking. If relation-level locking is sufficient, this is + * the routine for you. + * + * Returns: + * None. + * + * Side Effects: + * None. + * ---------------- + */ +void +IndexScanMarkPosition(IndexScanDesc scan) +{ + RetrieveIndexResult result; + + if (scan->flags & ScanUncheckedPrevious) { + result = + index_getnext(scan, BackwardScanDirection); + + if (result != NULL) { + scan->previousItemData = result->index_iptr; + } else { + ItemPointerSetInvalid(&scan->previousItemData); + } + + } else if (scan->flags & ScanUncheckedNext) { + result = (RetrieveIndexResult) + index_getnext(scan, ForwardScanDirection); + + if (result != NULL) { + scan->nextItemData = result->index_iptr; + } else { + ItemPointerSetInvalid(&scan->nextItemData); + } + } + + scan->previousMarkData = scan->previousItemData; + scan->currentMarkData = scan->currentItemData; + scan->nextMarkData = scan->nextItemData; + + scan->flags = 0x0; /* XXX should have a symbolic name */ +} + +/* ---------------- + * IndexScanRestorePosition -- Restore position on a marked scan. + * + * This routine isn't used by any existing access method, but is the + * one that AM implementors should use if they don't want to do any + * special locking. If relation-level locking is sufficient, then + * this is the one you want. + * + * Returns: + * None. + * + * Side Effects: + * None. + * ---------------- + */ +void +IndexScanRestorePosition(IndexScanDesc scan) +{ + if (scan->flags & ScanUnmarked) + elog(WARN, "IndexScanRestorePosition: no mark to restore"); + + scan->previousItemData = scan->previousMarkData; + scan->currentItemData = scan->currentMarkData; + scan->nextItemData = scan->nextMarkData; + + scan->flags = 0x0; /* XXX should have a symbolic name */ +} diff --git a/src/backend/access/index/indexam.c b/src/backend/access/index/indexam.c new file mode 100644 index 00000000000..bffe3a41f3a --- /dev/null +++ b/src/backend/access/index/indexam.c @@ -0,0 +1,411 @@ +/*------------------------------------------------------------------------- + * + * indexam.c-- + * general index access method routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + * INTERFACE ROUTINES + * index_open - open an index relation by relationId + * index_openr - open a index relation by name + * index_close - close a index relation + * index_beginscan - start a scan of an index + * index_rescan - restart a scan of an index + * index_endscan - end a scan + * index_insert - insert an index tuple into a relation + * index_delete - delete an item from an index relation + * index_markpos - mark a scan position + * index_restrpos - restore a scan position + * index_getnext - get the next tuple from a scan + * ** index_fetch - retrieve tuple with tid + * ** index_replace - replace a tuple + * ** index_getattr - get an attribute from an index tuple + * index_getprocid - get a support procedure id from the rel tuple + * + * IndexScanIsValid - check index scan + * + * NOTES + * This file contains the index_ routines which used + * to be a scattered collection of stuff in access/genam. + * + * The ** routines: index_fetch, index_replace, and index_getattr + * have not yet been implemented. They may not be needed. + * + * old comments + * Scans are implemented as follows: + * + * `0' represents an invalid item pointer. + * `-' represents an unknown item pointer. + * `X' represents a known item pointers. + * `+' represents known or invalid item pointers. + * `*' represents any item pointers. + * + * State is represented by a triple of these symbols in the order of + * previous, current, next. Note that the case of reverse scans works + * identically. + * + * State Result + * (1) + + - + 0 0 (if the next item pointer is invalid) + * (2) + X - (otherwise) + * (3) * 0 0 * 0 0 (no change) + * (4) + X 0 X 0 0 (shift) + * (5) * + X + X - (shift, add unknown) + * + * All other states cannot occur. + * + * Note: It would be possible to cache the status of the previous and + * next item pointer using the flags. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/attnum.h" +#include "access/genam.h" +#include "access/heapam.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/sdir.h" +#include "access/skey.h" +#include "access/funcindex.h" + +#include "storage/lmgr.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/relcache.h" + +#include "catalog/catname.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" + +#include "catalog/index.h" + +#include "fmgr.h" + +/* ---------------- + * undefine macros we aren't going to use that would otherwise + * get in our way.. delete is defined in c.h and the am's are + * defined in heapam.h + * ---------------- + */ +#undef delete +#undef aminsert +#undef amdelete +#undef ambeginscan +#undef amrescan +#undef amendscan +#undef ammarkpos +#undef amrestrpos +#undef amgettuple + +/* ---------------------------------------------------------------- + * macros used in index_ routines + * ---------------------------------------------------------------- + */ +#define RELATION_CHECKS \ +Assert(RelationIsValid(relation)); \ + Assert(PointerIsValid(relation->rd_am)) + +#define SCAN_CHECKS \ + Assert(IndexScanIsValid(scan)); \ + Assert(RelationIsValid(scan->relation)); \ + Assert(PointerIsValid(scan->relation->rd_am)) + +#define GET_REL_PROCEDURE(x,y) \ + CppConcat(procedure = relation->rd_am->,y); \ + if (! RegProcedureIsValid(procedure)) \ + elog(WARN, "index_%s: invalid %s regproc", \ + CppAsString(x), CppAsString(y)) + +#define GET_SCAN_PROCEDURE(x,y) \ + CppConcat(procedure = scan->relation->rd_am->,y); \ + if (! RegProcedureIsValid(procedure)) \ + elog(WARN, "index_%s: invalid %s regproc", \ + CppAsString(x), CppAsString(y)) + + +/* ---------------------------------------------------------------- + * index_ interface functions + * ---------------------------------------------------------------- + */ +/* ---------------- + * index_open - open an index relation by relationId + * + * presently the relcache routines do all the work we need + * to open/close index relations. + * ---------------- + */ +Relation +index_open(Oid relationId) +{ + return RelationIdGetRelation(relationId); +} + +/* ---------------- + * index_openr - open a index relation by name + * + * presently the relcache routines do all the work we need + * to open/close index relations. + * ---------------- + */ +Relation +index_openr(char *relationName) +{ + return RelationNameGetRelation(relationName); +} + +/* ---------------- + * index_close - close a index relation + * + * presently the relcache routines do all the work we need + * to open/close index relations. + * ---------------- + */ +void +index_close(Relation relation) +{ + (void) RelationClose(relation); +} + +/* ---------------- + * index_insert - insert an index tuple into a relation + * ---------------- + */ +InsertIndexResult +index_insert(Relation relation, + IndexTuple indexTuple) +{ + RegProcedure procedure; + InsertIndexResult specificResult; + + RELATION_CHECKS; + GET_REL_PROCEDURE(insert,aminsert); + + /* ---------------- + * have the am's insert proc do all the work. + * ---------------- + */ + specificResult = (InsertIndexResult) + fmgr(procedure, relation, indexTuple, NULL); + + /* ---------------- + * the insert proc is supposed to return a "specific result" and + * this routine has to return a "general result" so after we get + * something back from the insert proc, we allocate a + * "general result" and copy some crap between the two. + * + * As far as I'm concerned all this result shit is needlessly c + * omplicated and should be eliminated. -cim 1/19/91 + * + * mao concurs. regardless of how we feel here, however, it is + * important to free memory we don't intend to return to anyone. + * 2/28/91 + * + * this "general result" crap is now gone. -ay 3/6/95 + * ---------------- + */ + + return (specificResult); +} + +/* ---------------- + * index_delete - delete an item from an index relation + * ---------------- + */ +void +index_delete(Relation relation, ItemPointer indexItem) +{ + RegProcedure procedure; + + RELATION_CHECKS; + GET_REL_PROCEDURE(delete,amdelete); + + (void) fmgr(procedure, relation, indexItem); +} + +/* ---------------- + * index_beginscan - start a scan of an index + * ---------------- + */ +IndexScanDesc +index_beginscan(Relation relation, + bool scanFromEnd, + uint16 numberOfKeys, + ScanKey key) +{ + IndexScanDesc scandesc; + RegProcedure procedure; + + RELATION_CHECKS; + GET_REL_PROCEDURE(beginscan,ambeginscan); + + RelationSetRIntentLock(relation); + + scandesc = (IndexScanDesc) + fmgr(procedure, relation, scanFromEnd, numberOfKeys, key); + + return scandesc; +} + +/* ---------------- + * index_rescan - restart a scan of an index + * ---------------- + */ +void +index_rescan(IndexScanDesc scan, bool scanFromEnd, ScanKey key) +{ + RegProcedure procedure; + + SCAN_CHECKS; + GET_SCAN_PROCEDURE(rescan,amrescan); + + (void) fmgr(procedure, scan, scanFromEnd, key); +} + +/* ---------------- + * index_endscan - end a scan + * ---------------- + */ +void +index_endscan(IndexScanDesc scan) +{ + RegProcedure procedure; + + SCAN_CHECKS; + GET_SCAN_PROCEDURE(endscan,amendscan); + + (void) fmgr(procedure, scan); + + RelationUnsetRIntentLock(scan->relation); +} + +/* ---------------- + * index_markpos - mark a scan position + * ---------------- + */ +void +index_markpos(IndexScanDesc scan) +{ + RegProcedure procedure; + + SCAN_CHECKS; + GET_SCAN_PROCEDURE(markpos,ammarkpos); + + (void) fmgr(procedure, scan); +} + +/* ---------------- + * index_restrpos - restore a scan position + * ---------------- + */ +void +index_restrpos(IndexScanDesc scan) +{ + RegProcedure procedure; + + SCAN_CHECKS; + GET_SCAN_PROCEDURE(restrpos,amrestrpos); + + (void) fmgr(procedure, scan); +} + +/* ---------------- + * index_getnext - get the next tuple from a scan + * + * A RetrieveIndexResult is a index tuple/heap tuple pair + * ---------------- + */ +RetrieveIndexResult +index_getnext(IndexScanDesc scan, + ScanDirection direction) +{ + RegProcedure procedure; + RetrieveIndexResult result; + + SCAN_CHECKS; + GET_SCAN_PROCEDURE(getnext,amgettuple); + + /* ---------------- + * have the am's gettuple proc do all the work. + * ---------------- + */ + result = (RetrieveIndexResult) + fmgr(procedure, scan, direction); + + return result; +} + +/* ---------------- + * index_getprocid + * + * Some indexed access methods may require support routines that are + * not in the operator class/operator model imposed by pg_am. These + * access methods may store the OIDs of registered procedures they + * need in pg_amproc. These registered procedure OIDs are ordered in + * a way that makes sense to the access method, and used only by the + * access method. The general index code doesn't know anything about + * the routines involved; it just builds an ordered list of them for + * each attribute on which an index is defined. + * + * This routine returns the requested procedure OID for a particular + * indexed attribute. + * ---------------- + */ +RegProcedure +index_getprocid(Relation irel, + AttrNumber attnum, + uint16 procnum) +{ + RegProcedure *loc; + int natts; + + natts = irel->rd_rel->relnatts; + + loc = irel->rd_support; + + Assert(loc != NULL); + + return (loc[(natts * (procnum - 1)) + (attnum - 1)]); +} + +Datum +GetIndexValue(HeapTuple tuple, + TupleDesc hTupDesc, + int attOff, + AttrNumber attrNums[], + FuncIndexInfo *fInfo, + bool *attNull, + Buffer buffer) +{ + Datum returnVal; + bool isNull; + + if (PointerIsValid(fInfo) && FIgetProcOid(fInfo) != InvalidOid) { + int i; + Datum *attData = (Datum *)palloc(FIgetnArgs(fInfo)*sizeof(Datum)); + + for (i = 0; i < FIgetnArgs(fInfo); i++) { + attData[i] = (Datum) heap_getattr(tuple, + buffer, + attrNums[i], + hTupDesc, + attNull); + } + returnVal = (Datum)fmgr_array_args(FIgetProcOid(fInfo), + FIgetnArgs(fInfo), + (char **) attData, + &isNull); + pfree(attData); + *attNull = FALSE; + }else { + returnVal = (Datum) heap_getattr(tuple, buffer, attrNums[attOff], + hTupDesc, attNull); + } + return returnVal; +} diff --git a/src/backend/access/index/istrat.c b/src/backend/access/index/istrat.c new file mode 100644 index 00000000000..602d2bd9e94 --- /dev/null +++ b/src/backend/access/index/istrat.c @@ -0,0 +1,679 @@ +/*------------------------------------------------------------------------- + * + * istrat.c-- + * index scan strategy manipulation code and index strategy manipulation + * operator code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/istrat.h" +#include "access/itup.h" /* for MaxIndexAttributeNumber */ +#include "access/skey.h" +#include "utils/tqual.h" /* for NowTimeQual */ + +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/rel.h" + +#include "catalog/catname.h" +#include "catalog/pg_amop.h" +#include "catalog/pg_amproc.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" + +/* ---------------------------------------------------------------- + * misc strategy support routines + * ---------------------------------------------------------------- + */ + +/* + * StrategyNumberIsValid + * StrategyNumberIsInBounds + * StrategyMapIsValid + * StrategyTransformMapIsValid + * IndexStrategyIsValid + * + * ... are now macros in istrat.h -cim 4/27/91 + */ + +/* + * StrategyMapGetScanKeyEntry -- + * Returns a scan key entry of a index strategy mapping member. + * + * Note: + * Assumes that the index strategy mapping is valid. + * Assumes that the index strategy number is valid. + * Bounds checking should be done outside this routine. + */ +ScanKey +StrategyMapGetScanKeyEntry(StrategyMap map, + StrategyNumber strategyNumber) +{ + Assert(StrategyMapIsValid(map)); + Assert(StrategyNumberIsValid(strategyNumber)); + return (&map->entry[strategyNumber - 1]); +} + +/* + * IndexStrategyGetStrategyMap -- + * Returns an index strategy mapping of an index strategy. + * + * Note: + * Assumes that the index strategy is valid. + * Assumes that the number of index strategies is valid. + * Bounds checking should be done outside this routine. + */ +StrategyMap +IndexStrategyGetStrategyMap(IndexStrategy indexStrategy, + StrategyNumber maxStrategyNum, + AttrNumber attrNum) +{ + Assert(IndexStrategyIsValid(indexStrategy)); + Assert(StrategyNumberIsValid(maxStrategyNum)); + Assert(AttributeNumberIsValid(attrNum)); + + maxStrategyNum = AMStrategies(maxStrategyNum); /* XXX */ + return + &indexStrategy->strategyMapData[maxStrategyNum * (attrNum - 1)]; +} + +/* + * AttributeNumberGetIndexStrategySize -- + * Computes the size of an index strategy. + */ +Size +AttributeNumberGetIndexStrategySize(AttrNumber maxAttributeNumber, + StrategyNumber maxStrategyNumber) +{ + maxStrategyNumber = AMStrategies(maxStrategyNumber); /* XXX */ + return + maxAttributeNumber * maxStrategyNumber * sizeof (ScanKeyData); +} + +/* + * StrategyTransformMapIsValid is now a macro in istrat.h -cim 4/27/91 + */ + +/* ---------------- + * StrategyOperatorIsValid + * ---------------- + */ +bool +StrategyOperatorIsValid(StrategyOperator operator, + StrategyNumber maxStrategy) +{ + return (bool) + (PointerIsValid(operator) && + StrategyNumberIsInBounds(operator->strategy, maxStrategy) && + !(operator->flags & ~(SK_NEGATE | SK_COMMUTE))); +} + +/* ---------------- + * StrategyTermIsValid + * ---------------- + */ +bool +StrategyTermIsValid(StrategyTerm term, + StrategyNumber maxStrategy) +{ + Index index; + + if (! PointerIsValid(term) || term->degree == 0) + return false; + + for (index = 0; index < term->degree; index += 1) { + if (! StrategyOperatorIsValid(&term->operatorData[index], + maxStrategy)) { + + return false; + } + } + + return true; +} + +/* ---------------- + * StrategyExpressionIsValid + * ---------------- + */ +bool +StrategyExpressionIsValid(StrategyExpression expression, + StrategyNumber maxStrategy) +{ + StrategyTerm *termP; + + if (!PointerIsValid(expression)) + return true; + + if (!StrategyTermIsValid(expression->term[0], maxStrategy)) + return false; + + termP = &expression->term[1]; + while (StrategyTermIsValid(*termP, maxStrategy)) + termP += 1; + + return (bool) + (! PointerIsValid(*termP)); +} + +/* ---------------- + * StrategyEvaluationIsValid + * ---------------- + */ +bool +StrategyEvaluationIsValid(StrategyEvaluation evaluation) +{ + Index index; + + if (! PointerIsValid(evaluation) || + ! StrategyNumberIsValid(evaluation->maxStrategy) || + ! StrategyTransformMapIsValid(evaluation->negateTransform) || + ! StrategyTransformMapIsValid(evaluation->commuteTransform) || + ! StrategyTransformMapIsValid(evaluation->negateCommuteTransform)) { + + return false; + } + + for (index = 0; index < evaluation->maxStrategy; index += 1) { + if (! StrategyExpressionIsValid(evaluation->expression[index], + evaluation->maxStrategy)) { + + return false; + } + } + return true; +} + +/* ---------------- + * StrategyTermEvaluate + * ---------------- + */ +static bool +StrategyTermEvaluate(StrategyTerm term, + StrategyMap map, + Datum left, + Datum right) +{ + Index index; + long tmpres; + bool result; + StrategyOperator operator; + ScanKey entry; + + for (index = 0, operator = &term->operatorData[0]; + index < term->degree; index += 1, operator += 1) { + + entry = &map->entry[operator->strategy - 1]; + + Assert(RegProcedureIsValid(entry->sk_procedure)); + + switch (operator->flags ^ entry->sk_flags) { + case 0x0: + tmpres = (long) FMGR_PTR2(entry->sk_func, entry->sk_procedure, + left, right); + break; + + case SK_NEGATE: + tmpres = (long) !FMGR_PTR2(entry->sk_func, entry->sk_procedure, + left, right); + break; + + case SK_COMMUTE: + tmpres = (long) FMGR_PTR2(entry->sk_func, entry->sk_procedure, + right, left); + break; + + case SK_NEGATE | SK_COMMUTE: + tmpres = (long) !FMGR_PTR2(entry->sk_func, entry->sk_procedure, + right, left); + break; + + default: + elog(FATAL, "StrategyTermEvaluate: impossible case %d", + operator->flags ^ entry->sk_flags); + } + + result = (bool) tmpres; + if (!result) + return result; + } + + return result; +} + + +/* ---------------- + * RelationGetStrategy + * ---------------- + */ +StrategyNumber +RelationGetStrategy(Relation relation, + AttrNumber attributeNumber, + StrategyEvaluation evaluation, + RegProcedure procedure) +{ + StrategyNumber strategy; + StrategyMap strategyMap; + ScanKey entry; + Index index; + int numattrs; + + Assert(RelationIsValid(relation)); + numattrs = RelationGetNumberOfAttributes(relation); + + Assert(relation->rd_rel->relkind == RELKIND_INDEX); /* XXX use accessor */ + Assert(AttributeNumberIsValid(attributeNumber)); + Assert( (attributeNumber >= 1) && (attributeNumber < 1 + numattrs)); + + Assert(StrategyEvaluationIsValid(evaluation)); + Assert(RegProcedureIsValid(procedure)); + + strategyMap = + IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation), + evaluation->maxStrategy, + attributeNumber); + + /* get a strategy number for the procedure ignoring flags for now */ + for (index = 0; index < evaluation->maxStrategy; index += 1) { + if (strategyMap->entry[index].sk_procedure == procedure) { + break; + } + } + + if (index == evaluation->maxStrategy) + return InvalidStrategy; + + strategy = 1 + index; + entry = StrategyMapGetScanKeyEntry(strategyMap, strategy); + + Assert(!(entry->sk_flags & ~(SK_NEGATE | SK_COMMUTE))); + + switch (entry->sk_flags & (SK_NEGATE | SK_COMMUTE)) { + case 0x0: + return strategy; + + case SK_NEGATE: + strategy = evaluation->negateTransform->strategy[strategy - 1]; + break; + + case SK_COMMUTE: + strategy = evaluation->commuteTransform->strategy[strategy - 1]; + break; + + case SK_NEGATE | SK_COMMUTE: + strategy = evaluation->negateCommuteTransform->strategy[strategy - 1]; + break; + + default: + elog(FATAL, "RelationGetStrategy: impossible case %d", entry->sk_flags); + } + + + if (! StrategyNumberIsInBounds(strategy, evaluation->maxStrategy)) { + if (! StrategyNumberIsValid(strategy)) { + elog(WARN, "RelationGetStrategy: corrupted evaluation"); + } + } + + return strategy; +} + +/* ---------------- + * RelationInvokeStrategy + * ---------------- + */ +bool /* XXX someday, this may return Datum */ +RelationInvokeStrategy(Relation relation, + StrategyEvaluation evaluation, + AttrNumber attributeNumber, + StrategyNumber strategy, + Datum left, + Datum right) +{ + StrategyNumber newStrategy; + StrategyMap strategyMap; + ScanKey entry; + StrategyTermData termData; + int numattrs; + + Assert(RelationIsValid(relation)); + Assert(relation->rd_rel->relkind == RELKIND_INDEX); /* XXX use accessor */ + numattrs = RelationGetNumberOfAttributes(relation); + + Assert(StrategyEvaluationIsValid(evaluation)); + Assert(AttributeNumberIsValid(attributeNumber)); + Assert( (attributeNumber >= 1) && (attributeNumber < 1 + numattrs)); + + Assert(StrategyNumberIsInBounds(strategy, evaluation->maxStrategy)); + + termData.degree = 1; + + strategyMap = + IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation), + evaluation->maxStrategy, + attributeNumber); + + entry = StrategyMapGetScanKeyEntry(strategyMap, strategy); + + if (RegProcedureIsValid(entry->sk_procedure)) { + termData.operatorData[0].strategy = strategy; + termData.operatorData[0].flags = 0x0; + + return + StrategyTermEvaluate(&termData, strategyMap, left, right); + } + + + newStrategy = evaluation->negateTransform->strategy[strategy - 1]; + if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) { + + entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy); + + if (RegProcedureIsValid(entry->sk_procedure)) { + termData.operatorData[0].strategy = newStrategy; + termData.operatorData[0].flags = SK_NEGATE; + + return + StrategyTermEvaluate(&termData, strategyMap, left, right); + } + } + + newStrategy = evaluation->commuteTransform->strategy[strategy - 1]; + if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) { + + entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy); + + if (RegProcedureIsValid(entry->sk_procedure)) { + termData.operatorData[0].strategy = newStrategy; + termData.operatorData[0].flags = SK_COMMUTE; + + return + StrategyTermEvaluate(&termData, strategyMap, left, right); + } + } + + newStrategy = evaluation->negateCommuteTransform->strategy[strategy - 1]; + if (newStrategy != strategy && StrategyNumberIsValid(newStrategy)) { + + entry = StrategyMapGetScanKeyEntry(strategyMap, newStrategy); + + if (RegProcedureIsValid(entry->sk_procedure)) { + termData.operatorData[0].strategy = newStrategy; + termData.operatorData[0].flags = SK_NEGATE | SK_COMMUTE; + + return + StrategyTermEvaluate(&termData, strategyMap, left, right); + } + } + + if (PointerIsValid(evaluation->expression[strategy - 1])) { + StrategyTerm *termP; + + termP = &evaluation->expression[strategy - 1]->term[0]; + while (PointerIsValid(*termP)) { + Index index; + + for (index = 0; index < (*termP)->degree; index += 1) { + entry = StrategyMapGetScanKeyEntry(strategyMap, + (*termP)->operatorData[index].strategy); + + if (! RegProcedureIsValid(entry->sk_procedure)) { + break; + } + } + + if (index == (*termP)->degree) { + return + StrategyTermEvaluate(*termP, strategyMap, left, right); + } + + termP += 1; + } + } + + elog(WARN, "RelationInvokeStrategy: cannot evaluate strategy %d", + strategy); + + /* not reached, just to make compiler happy */ + return FALSE; + + +} + +/* ---------------- + * OperatorRelationFillScanKeyEntry + * ---------------- + */ +static void +OperatorRelationFillScanKeyEntry(Relation operatorRelation, + Oid operatorObjectId, + ScanKey entry) +{ + HeapScanDesc scan; + ScanKeyData scanKeyData; + HeapTuple tuple; + + ScanKeyEntryInitialize(&scanKeyData, 0, + ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(operatorObjectId)); + + scan = heap_beginscan(operatorRelation, false, NowTimeQual, + 1, &scanKeyData); + + tuple = heap_getnext(scan, false, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) { + elog(WARN, "OperatorObjectIdFillScanKeyEntry: unknown operator %lu", + (uint32) operatorObjectId); + } + + entry->sk_flags = 0; + entry->sk_procedure = + ((OperatorTupleForm) GETSTRUCT(tuple))->oprcode; + fmgr_info(entry->sk_procedure, &entry->sk_func, &entry->sk_nargs); + + if (! RegProcedureIsValid(entry->sk_procedure)) { + elog(WARN, + "OperatorObjectIdFillScanKeyEntry: no procedure for operator %lu", + (uint32) operatorObjectId); + } + + heap_endscan(scan); +} + + +/* + * IndexSupportInitialize -- + * Initializes an index strategy and associated support procedures. + */ +void +IndexSupportInitialize(IndexStrategy indexStrategy, + RegProcedure *indexSupport, + Oid indexObjectId, + Oid accessMethodObjectId, + StrategyNumber maxStrategyNumber, + StrategyNumber maxSupportNumber, + AttrNumber maxAttributeNumber) +{ + Relation relation; + Relation operatorRelation; + HeapScanDesc scan; + HeapTuple tuple; + ScanKeyData entry[2]; + StrategyMap map; + AttrNumber attributeNumber; + int attributeIndex; + Oid operatorClassObjectId[ MaxIndexAttributeNumber ]; + + maxStrategyNumber = AMStrategies(maxStrategyNumber); + + ScanKeyEntryInitialize(&entry[0], 0, Anum_pg_index_indexrelid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(indexObjectId)); + + relation = heap_openr(IndexRelationName); + scan = heap_beginscan(relation, false, NowTimeQual, 1, entry); + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) + elog(WARN, "IndexSupportInitialize: corrupted catalogs"); + + /* + * XXX note that the following assumes the INDEX tuple is well formed and + * that the key[] and class[] are 0 terminated. + */ + for (attributeIndex=0; attributeIndexindkey[attributeIndex])) { + if (attributeIndex == 0) { + elog(WARN, "IndexSupportInitialize: no pg_index tuple"); + } + break; + } + + operatorClassObjectId[attributeIndex] + = iform->indclass[attributeIndex]; + } + + heap_endscan(scan); + heap_close(relation); + + /* if support routines exist for this access method, load them */ + if (maxSupportNumber > 0) { + + ScanKeyEntryInitialize(&entry[0], 0, Anum_pg_amproc_amid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(accessMethodObjectId)); + + ScanKeyEntryInitialize(&entry[1], 0, Anum_pg_amproc_amopclaid, + ObjectIdEqualRegProcedure, 0); + +/* relation = heap_openr(Name_pg_amproc); */ + relation = heap_openr(AccessMethodProcedureRelationName); + + + for (attributeNumber = maxAttributeNumber; attributeNumber > 0; + attributeNumber--) { + + int16 support; + Form_pg_amproc form; + RegProcedure *loc; + + loc = &indexSupport[((attributeNumber - 1) * maxSupportNumber)]; + + for (support = maxSupportNumber; --support >= 0; ) { + loc[support] = InvalidOid; + } + + entry[1].sk_argument = + ObjectIdGetDatum(operatorClassObjectId[attributeNumber - 1]); + + scan = heap_beginscan(relation, false, NowTimeQual, 2, entry); + + while (tuple = heap_getnext(scan, 0, (Buffer *)NULL), + HeapTupleIsValid(tuple)) { + + form = (Form_pg_amproc) GETSTRUCT(tuple); + loc[(form->amprocnum - 1)] = form->amproc; + } + + heap_endscan(scan); + } + heap_close(relation); + } + + ScanKeyEntryInitialize(&entry[0], 0, + Anum_pg_amop_amopid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(accessMethodObjectId)); + + ScanKeyEntryInitialize(&entry[1], 0, + Anum_pg_amop_amopclaid, + ObjectIdEqualRegProcedure, 0); + + relation = heap_openr(AccessMethodOperatorRelationName); + operatorRelation = heap_openr(OperatorRelationName); + + for (attributeNumber = maxAttributeNumber; attributeNumber > 0; + attributeNumber--) { + + StrategyNumber strategy; + + entry[1].sk_argument = + ObjectIdGetDatum(operatorClassObjectId[attributeNumber - 1]); + + map = IndexStrategyGetStrategyMap(indexStrategy, + maxStrategyNumber, + attributeNumber); + + for (strategy = 1; strategy <= maxStrategyNumber; strategy++) + ScanKeyEntrySetIllegal(StrategyMapGetScanKeyEntry(map, strategy)); + + scan = heap_beginscan(relation, false, NowTimeQual, 2, entry); + + while (tuple = heap_getnext(scan, 0, (Buffer *)NULL), + HeapTupleIsValid(tuple)) { + Form_pg_amop form; + + form = (Form_pg_amop) GETSTRUCT(tuple); + + OperatorRelationFillScanKeyEntry(operatorRelation, + form->amopopr, + StrategyMapGetScanKeyEntry(map, form->amopstrategy)); + } + + heap_endscan(scan); + } + + heap_close(operatorRelation); + heap_close(relation); +} + +/* ---------------- + * IndexStrategyDisplay + * ---------------- + */ +#ifdef ISTRATDEBUG +int +IndexStrategyDisplay(IndexStrategy indexStrategy, + StrategyNumber numberOfStrategies, + int numberOfAttributes) +{ + StrategyMap strategyMap; + AttrNumber attributeNumber; + StrategyNumber strategyNumber; + + for (attributeNumber = 1; attributeNumber <= numberOfAttributes; + attributeNumber += 1) { + + strategyMap = IndexStrategyGetStrategyMap(indexStrategy, + numberOfStrategies, + attributeNumber); + + for (strategyNumber = 1; + strategyNumber <= AMStrategies(numberOfStrategies); + strategyNumber += 1) { + + printf(":att %d\t:str %d\t:opr 0x%x(%d)\n", + attributeNumber, strategyNumber, + strategyMap->entry[strategyNumber - 1].sk_procedure, + strategyMap->entry[strategyNumber - 1].sk_procedure); + } + } +} +#endif /* defined(ISTRATDEBUG) */ + + diff --git a/src/backend/access/iqual.h b/src/backend/access/iqual.h new file mode 100644 index 00000000000..5fab98a15bd --- /dev/null +++ b/src/backend/access/iqual.h @@ -0,0 +1,32 @@ +/*------------------------------------------------------------------------- + * + * iqual.h-- + * Index scan key qualification definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: iqual.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef IQUAL_H +#define IQUAL_H + +#include "c.h" + +#include "storage/itemid.h" +#include "utils/rel.h" +#include "access/skey.h" + +/* ---------------- + * index tuple qualification support + * ---------------- + */ + +extern int NIndexTupleProcessed; + +extern bool index_keytest(IndexTuple tuple, TupleDesc tupdesc, + int scanKeySize, ScanKey key); + +#endif /* IQUAL_H */ diff --git a/src/backend/access/istrat.h b/src/backend/access/istrat.h new file mode 100644 index 00000000000..201e70e6602 --- /dev/null +++ b/src/backend/access/istrat.h @@ -0,0 +1,80 @@ +/*------------------------------------------------------------------------- + * + * istrat.h-- + * POSTGRES index strategy definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: istrat.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef ISTRAT_H +#define ISTRAT_H + +#include "postgres.h" +#include "access/attnum.h" +#include "access/skey.h" +#include "access/strat.h" +#include "utils/rel.h" /* for Relation */ + +/* + * StrategyNumberIsValid -- + * True iff the strategy number is valid. + */ +#define StrategyNumberIsValid(strategyNumber) \ + ((bool) ((strategyNumber) != InvalidStrategy)) + +/* + * StrategyNumberIsInBounds -- + * True iff strategy number is within given bounds. + * + * Note: + * Assumes StrategyNumber is an unsigned type. + * Assumes the bounded interval to be (0,max]. + */ +#define StrategyNumberIsInBounds(strategyNumber, maxStrategyNumber) \ + ((bool)(InvalidStrategy < (strategyNumber) && \ + (strategyNumber) <= (maxStrategyNumber))) + +/* + * StrategyMapIsValid -- + * True iff the index strategy mapping is valid. + */ +#define StrategyMapIsValid(map) PointerIsValid(map) + +/* + * IndexStrategyIsValid -- + * True iff the index strategy is valid. + */ +#define IndexStrategyIsValid(s) PointerIsValid(s) + +extern ScanKey StrategyMapGetScanKeyEntry(StrategyMap map, + StrategyNumber strategyNumber); +extern StrategyMap IndexStrategyGetStrategyMap(IndexStrategy indexStrategy, + StrategyNumber maxStrategyNum, AttrNumber attrNum); + +extern Size +AttributeNumberGetIndexStrategySize(AttrNumber maxAttributeNumber, + StrategyNumber maxStrategyNumber); +extern bool StrategyOperatorIsValid(StrategyOperator operator, + StrategyNumber maxStrategy); +extern bool StrategyTermIsValid(StrategyTerm term, + StrategyNumber maxStrategy); +extern bool StrategyExpressionIsValid(StrategyExpression expression, + StrategyNumber maxStrategy); +extern bool StrategyEvaluationIsValid(StrategyEvaluation evaluation); +extern StrategyNumber RelationGetStrategy(Relation relation, + AttrNumber attributeNumber, StrategyEvaluation evaluation, + RegProcedure procedure); +extern bool RelationInvokeStrategy(Relation relation, + StrategyEvaluation evaluation, AttrNumber attributeNumber, + StrategyNumber strategy, Datum left, Datum right); +extern void IndexSupportInitialize(IndexStrategy indexStrategy, + RegProcedure *indexSupport, Oid indexObjectId, + Oid accessMethodObjectId, StrategyNumber maxStrategyNumber, + StrategyNumber maxSupportNumber, AttrNumber maxAttributeNumber); + + +#endif /* ISTRAT_H */ diff --git a/src/backend/access/itup.h b/src/backend/access/itup.h new file mode 100644 index 00000000000..028bf430b0d --- /dev/null +++ b/src/backend/access/itup.h @@ -0,0 +1,104 @@ +/*------------------------------------------------------------------------- + * + * itup.h-- + * POSTGRES index tuple definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: itup.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef ITUP_H +#define ITUP_H + +#include "c.h" +#include "access/ibit.h" +#include "access/tupdesc.h" /* for TupleDesc */ +#include "storage/itemptr.h" + +#define MaxIndexAttributeNumber 7 + +typedef struct IndexTupleData { + ItemPointerData t_tid; /* reference TID to base tuple */ + + /* + * t_info is layed out in the following fashion: + * + * 15th (leftmost) bit: "has nulls" bit + * 14th bit: "has varlenas" bit + * 13th bit: "has rules" bit - (removed ay 11/94) + * bits 12-0 bit: size of tuple. + */ + + unsigned short t_info; /* various info about tuple */ + + /* + * please make sure sizeof(IndexTupleData) is MAXALIGN'ed. + * See IndexInfoFindDataOffset() for the reason. + */ + +} IndexTupleData; /* MORE DATA FOLLOWS AT END OF STRUCT */ + +typedef IndexTupleData *IndexTuple; + + +typedef struct InsertIndexResultData { + ItemPointerData pointerData; +} InsertIndexResultData; + +typedef InsertIndexResultData *InsertIndexResult; + + +typedef struct RetrieveIndexResultData { + ItemPointerData index_iptr; + ItemPointerData heap_iptr; +} RetrieveIndexResultData; + +typedef RetrieveIndexResultData *RetrieveIndexResult; + + +/*----------------- + * PredInfo - + * used for partial indices + *----------------- + */ +typedef struct PredInfo { + Node *pred; + Node *oldPred; +} PredInfo; + + +/* ---------------- + * externs + * ---------------- + */ + +#define INDEX_SIZE_MASK 0x1FFF +#define INDEX_NULL_MASK 0x8000 +#define INDEX_VAR_MASK 0x4000 + +#define IndexTupleSize(itup) (((IndexTuple) (itup))->t_info & 0x1FFF) +#define IndexTupleDSize(itup) ((itup).t_info & 0x1FFF) +#define IndexTupleNoNulls(itup) (!(((IndexTuple) (itup))->t_info & 0x8000)) +#define IndexTupleAllFixed(itup) (!(((IndexTuple) (itup))->t_info & 0x4000)) + +#define IndexTupleHasMinHeader(itup) (IndexTupleNoNulls(itup)) + + +/* indextuple.h */ +extern IndexTuple index_formtuple(TupleDesc tupleDescriptor, + Datum value[], char null[]); +extern char *fastgetiattr(IndexTuple tup, int attnum, + TupleDesc att, bool *isnull); +extern Datum index_getattr(IndexTuple tuple, AttrNumber attNum, + TupleDesc tupDesc, bool *isNullOutP); +extern RetrieveIndexResult +FormRetrieveIndexResult(ItemPointer indexItemPointer, + ItemPointer heapItemPointer); +extern void CopyIndexTuple(IndexTuple source, IndexTuple *target); + + +#endif /* ITUP_H */ + diff --git a/src/backend/access/nbtree.h b/src/backend/access/nbtree.h new file mode 100644 index 00000000000..d5c37a23950 --- /dev/null +++ b/src/backend/access/nbtree.h @@ -0,0 +1,264 @@ +/*------------------------------------------------------------------------- + * + * nbtree.h-- + * header file for postgres btree access method implementation. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nbtree.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NBTREE_H +#define NBTREE_H + +#include "access/attnum.h" +#include "access/itup.h" +#include "access/htup.h" +#include "access/tupdesc.h" + +#include "access/istrat.h" +#include "access/funcindex.h" +#include "access/relscan.h" +#include "access/sdir.h" +#include "nodes/pg_list.h" + +/* + * BTPageOpaqueData -- At the end of every page, we store a pointer + * to both siblings in the tree. See Lehman and Yao's paper for more + * info. In addition, we need to know what sort of page this is + * (leaf or internal), and whether the page is available for reuse. + * + * Lehman and Yao's algorithm requires a ``high key'' on every page. + * The high key on a page is guaranteed to be greater than or equal + * to any key that appears on this page. Our insertion algorithm + * guarantees that we can use the initial least key on our right + * sibling as the high key. We allocate space for the line pointer + * to the high key in the opaque data at the end of the page. + * + * Rightmost pages in the tree have no high key. + */ + +typedef struct BTPageOpaqueData { + BlockNumber btpo_prev; + BlockNumber btpo_next; + uint16 btpo_flags; + +#define BTP_LEAF (1 << 0) +#define BTP_ROOT (1 << 1) +#define BTP_FREE (1 << 2) +#define BTP_META (1 << 3) + +} BTPageOpaqueData; + +typedef BTPageOpaqueData *BTPageOpaque; + +/* + * ScanOpaqueData is used to remember which buffers we're currently + * examining in the scan. We keep these buffers locked and pinned + * and recorded in the opaque entry of the scan in order to avoid + * doing a ReadBuffer() for every tuple in the index. This avoids + * semop() calls, which are expensive. + */ + +typedef struct BTScanOpaqueData { + Buffer btso_curbuf; + Buffer btso_mrkbuf; +} BTScanOpaqueData; + +typedef BTScanOpaqueData *BTScanOpaque; + +/* + * BTItems are what we store in the btree. Each item has an index + * tuple, including key and pointer values. In addition, we must + * guarantee that all tuples in the index are unique, in order to + * satisfy some assumptions in Lehman and Yao. The way that we do + * this is by generating a new OID for every insertion that we do in + * the tree. This adds eight bytes to the size of btree index + * tuples. Note that we do not use the OID as part of a composite + * key; the OID only serves as a unique identifier for a given index + * tuple (logical position within a page). + */ + +typedef struct BTItemData { + Oid bti_oid; + int32 bti_dummy; /* padding to make bti_itup + * align at 8-byte boundary + */ + IndexTupleData bti_itup; +} BTItemData; + +typedef BTItemData *BTItem; + +/* + * BTStackData -- As we descend a tree, we push the (key, pointer) + * pairs from internal nodes onto a private stack. If we split a + * leaf, we use this stack to walk back up the tree and insert data + * into parent nodes (and possibly to split them, too). Lehman and + * Yao's update algorithm guarantees that under no circumstances can + * our private stack give us an irredeemably bad picture up the tree. + * Again, see the paper for details. + */ + +typedef struct BTStackData { + BlockNumber bts_blkno; + OffsetNumber bts_offset; + BTItem bts_btitem; + struct BTStackData *bts_parent; +} BTStackData; + +typedef BTStackData *BTStack; + +/* + * We need to be able to tell the difference between read and write + * requests for pages, in order to do locking correctly. + */ + +#define BT_READ 0 +#define BT_WRITE 1 + +/* + * Similarly, the difference between insertion and non-insertion binary + * searches on a given page makes a difference when we're descending the + * tree. + */ + +#define BT_INSERTION 0 +#define BT_DESCENT 1 + +/* + * In general, the btree code tries to localize its knowledge about + * page layout to a couple of routines. However, we need a special + * value to indicate "no page number" in those places where we expect + * page numbers. + */ + +#define P_NONE 0 +#define P_LEFTMOST(opaque) ((opaque)->btpo_prev == P_NONE) +#define P_RIGHTMOST(opaque) ((opaque)->btpo_next == P_NONE) + +#define P_HIKEY ((OffsetNumber) 1) +#define P_FIRSTKEY ((OffsetNumber) 2) + +/* + * Strategy numbers -- ordering of these is <, <=, =, >=, > + */ + +#define BTLessStrategyNumber 1 +#define BTLessEqualStrategyNumber 2 +#define BTEqualStrategyNumber 3 +#define BTGreaterEqualStrategyNumber 4 +#define BTGreaterStrategyNumber 5 +#define BTMaxStrategyNumber 5 + +/* + * When a new operator class is declared, we require that the user + * supply us with an amproc procedure for determining whether, for + * two keys a and b, a < b, a = b, or a > b. This routine must + * return < 0, 0, > 0, respectively, in these three cases. Since we + * only have one such proc in amproc, it's number 1. + */ + +#define BTORDER_PROC 1 + + +/* + * prototypes for functions in nbtinsert.c + */ +extern InsertIndexResult _bt_doinsert(Relation rel, BTItem btitem); +extern bool _bt_itemcmp(Relation rel, Size keysz, BTItem item1, BTItem item2, + StrategyNumber strat); + +/* + * prototypes for functions in nbtpage.c + */ +extern void _bt_metapinit(Relation rel); +extern void _bt_checkmeta(Relation rel); +extern Buffer _bt_getroot(Relation rel, int access); +extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access); +extern void _bt_relbuf(Relation rel, Buffer buf, int access); +extern void _bt_wrtbuf(Relation rel, Buffer buf); +extern void _bt_wrtnorelbuf(Relation rel, Buffer buf); +extern void _bt_pageinit(Page page, Size size); +extern void _bt_metaproot(Relation rel, BlockNumber rootbknum); +extern Buffer _bt_getstackbuf(Relation rel, BTStack stack, int access); +extern void _bt_setpagelock(Relation rel, BlockNumber blkno, int access); +extern void _bt_unsetpagelock(Relation rel, BlockNumber blkno, int access); +extern void _bt_pagedel(Relation rel, ItemPointer tid); + +/* + * prototypes for functions in nbtree.c + */ +extern bool BuildingBtree; /* in nbtree.c */ + +extern void btbuild(Relation heap, Relation index, int natts, + AttrNumber *attnum, IndexStrategy istrat, uint16 pcount, + Datum *params, FuncIndexInfo *finfo, PredInfo *predInfo); +extern InsertIndexResult btinsert(Relation rel, IndexTuple itup); +extern char *btgettuple(IndexScanDesc scan, ScanDirection dir); +extern char *btbeginscan(Relation rel, bool fromEnd, uint16 keysz, + ScanKey scankey); + +extern void btrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey); +extern void btmovescan(IndexScanDesc scan, Datum v); +extern void btendscan(IndexScanDesc scan); +extern void btmarkpos(IndexScanDesc scan); +extern void btrestrpos(IndexScanDesc scan); +extern void btdelete(Relation rel, ItemPointer tid); + +/* + * prototypes for functions in nbtscan.c + */ +extern void _bt_regscan(IndexScanDesc scan); +extern void _bt_dropscan(IndexScanDesc scan); +extern void _bt_adjscans(Relation rel, ItemPointer tid); +extern void _bt_scandel(IndexScanDesc scan, BlockNumber blkno, + OffsetNumber offno); +extern bool _bt_scantouched(IndexScanDesc scan, BlockNumber blkno, + OffsetNumber offno); + +/* + * prototypes for functions in nbtsearch.c + */ +extern BTStack _bt_search(Relation rel, int keysz, ScanKey scankey, + Buffer *bufP); +extern Buffer _bt_moveright(Relation rel, Buffer buf, int keysz, + ScanKey scankey, int access); +extern bool _bt_skeycmp(Relation rel, Size keysz, ScanKey scankey, + Page page, ItemId itemid, StrategyNumber strat); +extern OffsetNumber _bt_binsrch(Relation rel, Buffer buf, int keysz, + ScanKey scankey, int srchtype); +extern RetrieveIndexResult _bt_next(IndexScanDesc scan, ScanDirection dir); +extern RetrieveIndexResult _bt_first(IndexScanDesc scan, ScanDirection dir); +extern bool _bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir); + +/* + * prototypes for functions in nbtstrat.c + */ +extern StrategyNumber _bt_getstrat(Relation rel, AttrNumber attno, + RegProcedure proc); +extern bool _bt_invokestrat(Relation rel, AttrNumber attno, + StrategyNumber strat, Datum left, Datum right); + +/* + * prototypes for functions in nbtutils.c + */ +extern ScanKey _bt_mkscankey(Relation rel, IndexTuple itup); +extern void _bt_freeskey(ScanKey skey); +extern void _bt_freestack(BTStack stack); +extern void _bt_orderkeys(Relation relation, uint16 *numberOfKeys, + ScanKey key); +extern bool _bt_checkqual(IndexScanDesc scan, IndexTuple itup); +extern BTItem _bt_formitem(IndexTuple itup); + +/* + * prototypes for functions in nbtsort.c + */ +extern void *_bt_spoolinit(Relation index, int ntapes); +extern void _bt_spooldestroy(void *spool); +extern void _bt_spool(Relation index, BTItem btitem, void *spool); +extern void _bt_upperbuild(Relation index, BlockNumber blk, int level); +extern void _bt_leafbuild(Relation index, void *spool); + +#endif /* NBTREE_H */ diff --git a/src/backend/access/nbtree/Makefile.inc b/src/backend/access/nbtree/Makefile.inc new file mode 100644 index 00000000000..50854008c01 --- /dev/null +++ b/src/backend/access/nbtree/Makefile.inc @@ -0,0 +1,15 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/nbtree (btree acess methods) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:11 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= nbtcompare.c nbtinsert.c nbtpage.c nbtree.c nbtscan.c nbtsearch.c \ + nbtstrat.c nbtutils.c nbtsort.c diff --git a/src/backend/access/nbtree/README b/src/backend/access/nbtree/README new file mode 100644 index 00000000000..a204ad4af08 --- /dev/null +++ b/src/backend/access/nbtree/README @@ -0,0 +1,68 @@ +$Header: /cvsroot/pgsql/src/backend/access/nbtree/README,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + +This directory contains a correct implementation of Lehman and Yao's +btree management algorithm that supports concurrent access for Postgres. +We have made the following changes in order to incorporate their algorithm +into Postgres: + + + The requirement that all btree keys be unique is too onerous, + but the algorithm won't work correctly without it. As a result, + this implementation adds an OID (guaranteed to be unique) to + every key in the index. This guarantees uniqueness within a set + of duplicates. Space overhead is four bytes. + + For this reason, when we're passed an index tuple to store by the + common access method code, we allocate a larger one and copy the + supplied tuple into it. No Postgres code outside of the btree + access method knows about this xid or sequence number. + + + Lehman and Yao don't require read locks, but assume that in- + memory copies of tree nodes are unshared. Postgres shares + in-memory buffers among backends. As a result, we do page- + level read locking on btree nodes in order to guarantee that + no record is modified while we are examining it. This reduces + concurrency but guaranteees correct behavior. + + + Read locks on a page are held for as long as a scan has a pointer + to the page. However, locks are always surrendered before the + sibling page lock is acquired (for readers), so we remain deadlock- + free. I will do a formal proof if I get bored anytime soon. + +In addition, the following things are handy to know: + + + Page zero of every btree is a meta-data page. This page stores + the location of the root page, a pointer to a list of free + pages, and other stuff that's handy to know. + + + This algorithm doesn't really work, since it requires ordered + writes, and UNIX doesn't support ordered writes. + + + There's one other case where we may screw up in this + implementation. When we start a scan, we descend the tree + to the key nearest the one in the qual, and once we get there, + position ourselves correctly for the qual type (eg, <, >=, etc). + If we happen to step off a page, decide we want to get back to + it, and fetch the page again, and if some bad person has split + the page and moved the last tuple we saw off of it, then the + code complains about botched concurrency in an elog(WARN, ...) + and gives up the ghost. This is the ONLY violation of Lehman + and Yao's guarantee of correct behavior that I am aware of in + this code. + +Notes to operator class implementors: + + With this implementation, we require the user to supply us with + a procedure for pg_amproc. This procedure should take two keys + A and B and return < 0, 0, or > 0 if A < B, A = B, or A > B, + respectively. See the contents of that relation for the btree + access method for some samples. + +Notes to mao for implementation document: + + On deletions, we need to adjust the position of active scans on + the index. The code in nbtscan.c handles this. We don't need to + do this for splits because of the way splits are handled; if they + happen behind us, we'll automatically go to the next page, and if + they happen in front of us, we're not affected by them. For + insertions, if we inserted a tuple behind the current scan location + on the current scan page, we move one space ahead. diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c new file mode 100644 index 00000000000..e567b3c44cb --- /dev/null +++ b/src/backend/access/nbtree/nbtcompare.c @@ -0,0 +1,173 @@ +/*------------------------------------------------------------------------- + * + * btcompare.c-- + * Comparison functions for btree access method. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtcompare.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + * NOTES + * These functions are stored in pg_amproc. For each operator class + * defined on btrees, they compute + * + * compare(a, b): + * < 0 if a < b, + * = 0 if a == b, + * > 0 if a > b. + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "utils/nabstime.h" + +int32 +btint2cmp(int16 a, int16 b) +{ + return ((int32) (a - b)); +} + +int32 +btint4cmp(int32 a, int32 b) +{ + return (a - b); +} + +int32 +btint24cmp(int16 a, int32 b) +{ + return (((int32) a) - b); +} + +int32 +btint42cmp(int32 a, int16 b) +{ + return (a - ((int32) b)); +} + +int32 +btfloat4cmp(float32 a, float32 b) +{ + if (*a > *b) + return (1); + else if (*a == *b) + return (0); + else + return (-1); +} + +int32 +btfloat8cmp(float64 a, float64 b) +{ + if (*a > *b) + return (1); + else if (*a == *b) + return (0); + else + return (-1); +} + +int32 +btoidcmp(Oid a, Oid b) +{ + if (a > b) + return (1); + else if (a == b) + return (0); + else + return (-1); +} + +int32 +btabstimecmp(AbsoluteTime a, AbsoluteTime b) +{ + if (AbsoluteTimeIsBefore(a, b)) + return (1); + else if (AbsoluteTimeIsBefore(b, a)) + return (-1); + else + return (0); +} + +int32 +btcharcmp(char a, char b) +{ + return ((int32) (a - b)); +} + +int32 +btchar2cmp(uint16 a, uint16 b) +{ + return (strncmp((char *) &a, (char *) &b, 2)); +} + +int32 +btchar4cmp(uint32 a, uint32 b) +{ + return (strncmp((char *) &a, (char *) &b, 4)); +} + +int32 +btchar8cmp(char *a, char *b) +{ + return (strncmp(a, b, 8)); +} + +int32 +btchar16cmp(char *a, char *b) +{ + return (strncmp(a, b, 16)); +} + +int32 +btnamecmp(NameData *a, NameData *b) +{ + return (strncmp(a->data, b->data, NAMEDATALEN)); +} + +int32 +bttextcmp(struct varlena *a, struct varlena *b) +{ + char *ap, *bp; + int len; + int res; + + ap = VARDATA(a); + bp = VARDATA(b); + + /* len is the length of the shorter of the two strings */ + if ((len = VARSIZE(a)) > VARSIZE(b)) + len = VARSIZE(b); + + /* len includes the four bytes in which string length is stored */ + len -= sizeof(VARSIZE(a)); + + /* + * If the two strings differ in the first len bytes, or if they're + * the same in the first len bytes and they're both len bytes long, + * we're done. + */ + + res = 0; + if (len > 0) { + do { + res = (int) (*ap++ - *bp++); + len--; + } while (res == 0 && len != 0); + } + + if (res != 0 || VARSIZE(a) == VARSIZE(b)) + return (res); + + /* + * The two strings are the same in the first len bytes, and they + * are of different lengths. + */ + + if (VARSIZE(a) < VARSIZE(b)) + return (-1); + else + return (1); +} diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c new file mode 100644 index 00000000000..536c0aa385d --- /dev/null +++ b/src/backend/access/nbtree/nbtinsert.c @@ -0,0 +1,831 @@ +/*------------------------------------------------------------------------- + * + * btinsert.c-- + * Item insertion in Lehman and Yao btrees for Postgres. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/nbtree.h" + +static InsertIndexResult _bt_insertonpg(Relation rel, Buffer buf, BTStack stack, int keysz, ScanKey scankey, BTItem btitem, BTItem afteritem); +static Buffer _bt_split(Relation rel, Buffer buf); +static OffsetNumber _bt_findsplitloc(Relation rel, Page page, OffsetNumber start, OffsetNumber maxoff, Size llimit); +static void _bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf); +static OffsetNumber _bt_pgaddtup(Relation rel, Buffer buf, int keysz, ScanKey itup_scankey, Size itemsize, BTItem btitem, BTItem afteritem); +static bool _bt_goesonpg(Relation rel, Buffer buf, Size keysz, ScanKey scankey, BTItem afteritem); +static void _bt_updateitem(Relation rel, Size keysz, Buffer buf, Oid bti_oid, BTItem newItem); + +/* + * _bt_doinsert() -- Handle insertion of a single btitem in the tree. + * + * This routine is called by the public interface routines, btbuild + * and btinsert. By here, btitem is filled in, and has a unique + * (xid, seqno) pair. + */ +InsertIndexResult +_bt_doinsert(Relation rel, BTItem btitem) +{ + ScanKey itup_scankey; + IndexTuple itup; + BTStack stack; + Buffer buf; + BlockNumber blkno; + int natts; + InsertIndexResult res; + + itup = &(btitem->bti_itup); + + /* we need a scan key to do our search, so build one */ + itup_scankey = _bt_mkscankey(rel, itup); + natts = rel->rd_rel->relnatts; + + /* find the page containing this key */ + stack = _bt_search(rel, natts, itup_scankey, &buf); + blkno = BufferGetBlockNumber(buf); + + /* trade in our read lock for a write lock */ + _bt_relbuf(rel, buf, BT_READ); + buf = _bt_getbuf(rel, blkno, BT_WRITE); + + /* + * If the page was split between the time that we surrendered our + * read lock and acquired our write lock, then this page may no + * longer be the right place for the key we want to insert. In this + * case, we need to move right in the tree. See Lehman and Yao for + * an excruciatingly precise description. + */ + + buf = _bt_moveright(rel, buf, natts, itup_scankey, BT_WRITE); + + /* do the insertion */ + res = _bt_insertonpg(rel, buf, stack, natts, itup_scankey, + btitem, (BTItem) NULL); + + /* be tidy */ + _bt_freestack(stack); + _bt_freeskey(itup_scankey); + + return (res); +} + +/* + * _bt_insertonpg() -- Insert a tuple on a particular page in the index. + * + * This recursive procedure does the following things: + * + * + if necessary, splits the target page. + * + finds the right place to insert the tuple (taking into + * account any changes induced by a split). + * + inserts the tuple. + * + if the page was split, pops the parent stack, and finds the + * right place to insert the new child pointer (by walking + * right using information stored in the parent stack). + * + invoking itself with the appropriate tuple for the right + * child page on the parent. + * + * On entry, we must have the right buffer on which to do the + * insertion, and the buffer must be pinned and locked. On return, + * we will have dropped both the pin and the write lock on the buffer. + * + * The locking interactions in this code are critical. You should + * grok Lehman and Yao's paper before making any changes. In addition, + * you need to understand how we disambiguate duplicate keys in this + * implementation, in order to be able to find our location using + * L&Y "move right" operations. Since we may insert duplicate user + * keys, and since these dups may propogate up the tree, we use the + * 'afteritem' parameter to position ourselves correctly for the + * insertion on internal pages. + */ +static InsertIndexResult +_bt_insertonpg(Relation rel, + Buffer buf, + BTStack stack, + int keysz, + ScanKey scankey, + BTItem btitem, + BTItem afteritem) +{ + InsertIndexResult res; + Page page; + Buffer rbuf; + Buffer pbuf; + Page rpage; + ScanKey newskey; + BTItem ritem; + BTPageOpaque rpageop; + BlockNumber rbknum, itup_blkno; + OffsetNumber itup_off; + int itemsz; + InsertIndexResult newres; + BTItem new_item = (BTItem) NULL; + BTItem lowLeftItem; + + page = BufferGetPage(buf); + itemsz = IndexTupleDSize(btitem->bti_itup) + + (sizeof(BTItemData) - sizeof(IndexTupleData)); + + itemsz = DOUBLEALIGN(itemsz); /* be safe, PageAddItem will do this + but we need to be consistent */ + + if (PageGetFreeSpace(page) < itemsz) { + + /* split the buffer into left and right halves */ + rbuf = _bt_split(rel, buf); + + /* which new page (left half or right half) gets the tuple? */ + if (_bt_goesonpg(rel, buf, keysz, scankey, afteritem)) { + /* left page */ + itup_off = _bt_pgaddtup(rel, buf, keysz, scankey, + itemsz, btitem, afteritem); + itup_blkno = BufferGetBlockNumber(buf); + } else { + /* right page */ + itup_off = _bt_pgaddtup(rel, rbuf, keysz, scankey, + itemsz, btitem, afteritem); + itup_blkno = BufferGetBlockNumber(rbuf); + } + + /* + * By here, + * + * + our target page has been split; + * + the original tuple has been inserted; + * + we have write locks on both the old (left half) and new + * (right half) buffers, after the split; and + * + we have the key we want to insert into the parent. + * + * Do the parent insertion. We need to hold onto the locks for + * the child pages until we locate the parent, but we can release + * them before doing the actual insertion (see Lehman and Yao for + * the reasoning). + */ + + if (stack == (BTStack) NULL) { + + /* create a new root node and release the split buffers */ + _bt_newroot(rel, buf, rbuf); + _bt_relbuf(rel, buf, BT_WRITE); + _bt_relbuf(rel, rbuf, BT_WRITE); + + } else { + + /* form a index tuple that points at the new right page */ + rbknum = BufferGetBlockNumber(rbuf); + rpage = BufferGetPage(rbuf); + rpageop = (BTPageOpaque) PageGetSpecialPointer(rpage); + + /* + * By convention, the first entry (0) on every + * non-rightmost page is the high key for that page. In + * order to get the lowest key on the new right page, we + * actually look at its second (1) entry. + */ + + if (! P_RIGHTMOST(rpageop)) { + ritem = (BTItem) PageGetItem(rpage, + PageGetItemId(rpage, P_FIRSTKEY)); + } else { + ritem = (BTItem) PageGetItem(rpage, + PageGetItemId(rpage, P_HIKEY)); + } + + /* get a unique btitem for this key */ + new_item = _bt_formitem(&(ritem->bti_itup)); + + ItemPointerSet(&(new_item->bti_itup.t_tid), rbknum, P_HIKEY); + + /* find the parent buffer */ + pbuf = _bt_getstackbuf(rel, stack, BT_WRITE); + + /* + * If the key of new_item is < than the key of the item + * in the parent page pointing to the left page + * (stack->bts_btitem), we have to update the latter key; + * otherwise the keys on the parent page wouldn't be + * monotonically increasing after we inserted the new + * pointer to the right page (new_item). This only + * happens if our left page is the leftmost page and a + * new minimum key had been inserted before, which is not + * reflected in the parent page but didn't matter so + * far. If there are duplicate keys and this new minimum + * key spills over to our new right page, we get an + * inconsistency if we don't update the left key in the + * parent page. + */ + + if (_bt_itemcmp(rel, keysz, stack->bts_btitem, new_item, + BTGreaterStrategyNumber)) { + lowLeftItem = + (BTItem) PageGetItem(page, + PageGetItemId(page, P_FIRSTKEY)); + /* page must have right pointer after split */ + _bt_updateitem(rel, keysz, pbuf, stack->bts_btitem->bti_oid, + lowLeftItem); + } + + /* don't need the children anymore */ + _bt_relbuf(rel, buf, BT_WRITE); + _bt_relbuf(rel, rbuf, BT_WRITE); + + newskey = _bt_mkscankey(rel, &(new_item->bti_itup)); + newres = _bt_insertonpg(rel, pbuf, stack->bts_parent, + keysz, newskey, new_item, + stack->bts_btitem); + + /* be tidy */ + pfree(newres); + pfree(newskey); + pfree(new_item); + } + } else { + itup_off = _bt_pgaddtup(rel, buf, keysz, scankey, + itemsz, btitem, afteritem); + itup_blkno = BufferGetBlockNumber(buf); + + _bt_relbuf(rel, buf, BT_WRITE); + } + + /* by here, the new tuple is inserted */ + res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); + ItemPointerSet(&(res->pointerData), itup_blkno, itup_off); + + return (res); +} + +/* + * _bt_split() -- split a page in the btree. + * + * On entry, buf is the page to split, and is write-locked and pinned. + * Returns the new right sibling of buf, pinned and write-locked. The + * pin and lock on buf are maintained. + */ +static Buffer +_bt_split(Relation rel, Buffer buf) +{ + Buffer rbuf; + Page origpage; + Page leftpage, rightpage; + BTPageOpaque ropaque, lopaque, oopaque; + Buffer sbuf; + Page spage; + BTPageOpaque sopaque; + Size itemsz; + ItemId itemid; + BTItem item; + OffsetNumber leftoff, rightoff; + OffsetNumber start; + OffsetNumber maxoff; + OffsetNumber firstright; + OffsetNumber i; + Size llimit; + + rbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); + origpage = BufferGetPage(buf); + leftpage = PageGetTempPage(origpage, sizeof(BTPageOpaqueData)); + rightpage = BufferGetPage(rbuf); + + _bt_pageinit(rightpage, BufferGetPageSize(rbuf)); + _bt_pageinit(leftpage, BufferGetPageSize(buf)); + + /* init btree private data */ + oopaque = (BTPageOpaque) PageGetSpecialPointer(origpage); + lopaque = (BTPageOpaque) PageGetSpecialPointer(leftpage); + ropaque = (BTPageOpaque) PageGetSpecialPointer(rightpage); + + /* if we're splitting this page, it won't be the root when we're done */ + oopaque->btpo_flags &= ~BTP_ROOT; + lopaque->btpo_flags = ropaque->btpo_flags = oopaque->btpo_flags; + lopaque->btpo_prev = oopaque->btpo_prev; + ropaque->btpo_prev = BufferGetBlockNumber(buf); + lopaque->btpo_next = BufferGetBlockNumber(rbuf); + ropaque->btpo_next = oopaque->btpo_next; + + /* + * If the page we're splitting is not the rightmost page at its + * level in the tree, then the first (0) entry on the page is the + * high key for the page. We need to copy that to the right + * half. Otherwise (meaning the rightmost page case), we should + * treat the line pointers beginning at zero as user data. + * + * We leave a blank space at the start of the line table for the + * left page. We'll come back later and fill it in with the high + * key item we get from the right key. + */ + + leftoff = P_FIRSTKEY; + ropaque->btpo_next = oopaque->btpo_next; + if (! P_RIGHTMOST(oopaque)) { + /* splitting a non-rightmost page, start at the first data item */ + start = P_FIRSTKEY; + + /* copy the original high key to the new page */ + itemid = PageGetItemId(origpage, P_HIKEY); + itemsz = ItemIdGetLength(itemid); + item = (BTItem) PageGetItem(origpage, itemid); + (void) PageAddItem(rightpage, (Item) item, itemsz, P_HIKEY, LP_USED); + rightoff = P_FIRSTKEY; + } else { + /* splitting a rightmost page, "high key" is the first data item */ + start = P_HIKEY; + + /* the new rightmost page will not have a high key */ + rightoff = P_HIKEY; + } + maxoff = PageGetMaxOffsetNumber(origpage); + llimit = PageGetFreeSpace(leftpage) / 2; + firstright = _bt_findsplitloc(rel, origpage, start, maxoff, llimit); + + for (i = start; i <= maxoff; i = OffsetNumberNext(i)) { + itemid = PageGetItemId(origpage, i); + itemsz = ItemIdGetLength(itemid); + item = (BTItem) PageGetItem(origpage, itemid); + + /* decide which page to put it on */ + if (i < firstright) { + (void) PageAddItem(leftpage, (Item) item, itemsz, leftoff, + LP_USED); + leftoff = OffsetNumberNext(leftoff); + } else { + (void) PageAddItem(rightpage, (Item) item, itemsz, rightoff, + LP_USED); + rightoff = OffsetNumberNext(rightoff); + } + } + + /* + * Okay, page has been split, high key on right page is correct. Now + * set the high key on the left page to be the min key on the right + * page. + */ + + if (P_RIGHTMOST(ropaque)) { + itemid = PageGetItemId(rightpage, P_HIKEY); + } else { + itemid = PageGetItemId(rightpage, P_FIRSTKEY); + } + itemsz = ItemIdGetLength(itemid); + item = (BTItem) PageGetItem(rightpage, itemid); + + /* + * We left a hole for the high key on the left page; fill it. The + * modal crap is to tell the page manager to put the new item on the + * page and not screw around with anything else. Whoever designed + * this interface has presumably crawled back into the dung heap they + * came from. No one here will admit to it. + */ + + PageManagerModeSet(OverwritePageManagerMode); + (void) PageAddItem(leftpage, (Item) item, itemsz, P_HIKEY, LP_USED); + PageManagerModeSet(ShufflePageManagerMode); + + /* + * By here, the original data page has been split into two new halves, + * and these are correct. The algorithm requires that the left page + * never move during a split, so we copy the new left page back on top + * of the original. Note that this is not a waste of time, since we + * also require (in the page management code) that the center of a + * page always be clean, and the most efficient way to guarantee this + * is just to compact the data by reinserting it into a new left page. + */ + + PageRestoreTempPage(leftpage, origpage); + + /* write these guys out */ + _bt_wrtnorelbuf(rel, rbuf); + _bt_wrtnorelbuf(rel, buf); + + /* + * Finally, we need to grab the right sibling (if any) and fix the + * prev pointer there. We are guaranteed that this is deadlock-free + * since no other writer will be moving holding a lock on that page + * and trying to move left, and all readers release locks on a page + * before trying to fetch its neighbors. + */ + + if (! P_RIGHTMOST(ropaque)) { + sbuf = _bt_getbuf(rel, ropaque->btpo_next, BT_WRITE); + spage = BufferGetPage(sbuf); + sopaque = (BTPageOpaque) PageGetSpecialPointer(spage); + sopaque->btpo_prev = BufferGetBlockNumber(rbuf); + + /* write and release the old right sibling */ + _bt_wrtbuf(rel, sbuf); + } + + /* split's done */ + return (rbuf); +} + +/* + * _bt_findsplitloc() -- find a safe place to split a page. + * + * In order to guarantee the proper handling of searches for duplicate + * keys, the first duplicate in the chain must either be the first + * item on the page after the split, or the entire chain must be on + * one of the two pages. That is, + * [1 2 2 2 3 4 5] + * must become + * [1] [2 2 2 3 4 5] + * or + * [1 2 2 2] [3 4 5] + * but not + * [1 2 2] [2 3 4 5]. + * However, + * [2 2 2 2 2 3 4] + * may be split as + * [2 2 2 2] [2 3 4]. + */ +static OffsetNumber +_bt_findsplitloc(Relation rel, + Page page, + OffsetNumber start, + OffsetNumber maxoff, + Size llimit) +{ + OffsetNumber i; + OffsetNumber saferight; + ItemId nxtitemid, safeitemid; + BTItem safeitem, nxtitem; + IndexTuple safetup, nxttup; + Size nbytes; + TupleDesc itupdesc; + int natts; + int attno; + Datum attsafe; + Datum attnext; + bool null; + + itupdesc = RelationGetTupleDescriptor(rel); + natts = rel->rd_rel->relnatts; + + saferight = start; + safeitemid = PageGetItemId(page, saferight); + nbytes = ItemIdGetLength(safeitemid) + sizeof(ItemIdData); + safeitem = (BTItem) PageGetItem(page, safeitemid); + safetup = &(safeitem->bti_itup); + + i = OffsetNumberNext(start); + + while (nbytes < llimit) { + + /* check the next item on the page */ + nxtitemid = PageGetItemId(page, i); + nbytes += (ItemIdGetLength(nxtitemid) + sizeof(ItemIdData)); + nxtitem = (BTItem) PageGetItem(page, nxtitemid); + nxttup = &(nxtitem->bti_itup); + + /* test against last known safe item */ + for (attno = 1; attno <= natts; attno++) { + attsafe = index_getattr(safetup, attno, itupdesc, &null); + attnext = index_getattr(nxttup, attno, itupdesc, &null); + + /* + * If the tuple we're looking at isn't equal to the last safe one + * we saw, then it's our new safe tuple. + */ + + if (!_bt_invokestrat(rel, attno, BTEqualStrategyNumber, + attsafe, attnext)) { + safetup = nxttup; + saferight = i; + + /* break is for the attno for loop */ + break; + } + } + i = OffsetNumberNext(i); + } + + /* + * If the chain of dups starts at the beginning of the page and extends + * past the halfway mark, we can split it in the middle. + */ + + if (saferight == start) + saferight = i; + + return (saferight); +} + +/* + * _bt_newroot() -- Create a new root page for the index. + * + * We've just split the old root page and need to create a new one. + * In order to do this, we add a new root page to the file, then lock + * the metadata page and update it. This is guaranteed to be deadlock- + * free, because all readers release their locks on the metadata page + * before trying to lock the root, and all writers lock the root before + * trying to lock the metadata page. We have a write lock on the old + * root page, so we have not introduced any cycles into the waits-for + * graph. + * + * On entry, lbuf (the old root) and rbuf (its new peer) are write- + * locked. We don't drop the locks in this routine; that's done by + * the caller. On exit, a new root page exists with entries for the + * two new children. The new root page is neither pinned nor locked. + */ +static void +_bt_newroot(Relation rel, Buffer lbuf, Buffer rbuf) +{ + Buffer rootbuf; + Page lpage, rpage, rootpage; + BlockNumber lbkno, rbkno; + BlockNumber rootbknum; + BTPageOpaque rootopaque; + ItemId itemid; + BTItem item; + Size itemsz; + BTItem new_item; + + /* get a new root page */ + rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); + rootpage = BufferGetPage(rootbuf); + _bt_pageinit(rootpage, BufferGetPageSize(rootbuf)); + + /* set btree special data */ + rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage); + rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE; + rootopaque->btpo_flags |= BTP_ROOT; + + /* + * Insert the internal tuple pointers. + */ + + lbkno = BufferGetBlockNumber(lbuf); + rbkno = BufferGetBlockNumber(rbuf); + lpage = BufferGetPage(lbuf); + rpage = BufferGetPage(rbuf); + + /* + * step over the high key on the left page while building the + * left page pointer. + */ + itemid = PageGetItemId(lpage, P_FIRSTKEY); + itemsz = ItemIdGetLength(itemid); + item = (BTItem) PageGetItem(lpage, itemid); + new_item = _bt_formitem(&(item->bti_itup)); + ItemPointerSet(&(new_item->bti_itup.t_tid), lbkno, P_FIRSTKEY); + + /* + * insert the left page pointer into the new root page. the root + * page is the rightmost page on its level so the "high key" item + * is the first data item. + */ + (void) PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED); + pfree(new_item); + + /* + * the right page is the rightmost page on the second level, so + * the "high key" item is the first data item on that page as well. + */ + itemid = PageGetItemId(rpage, P_HIKEY); + itemsz = ItemIdGetLength(itemid); + item = (BTItem) PageGetItem(rpage, itemid); + new_item = _bt_formitem(&(item->bti_itup)); + ItemPointerSet(&(new_item->bti_itup.t_tid), rbkno, P_HIKEY); + + /* + * insert the right page pointer into the new root page. + */ + (void) PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED); + pfree(new_item); + + /* write and let go of the root buffer */ + rootbknum = BufferGetBlockNumber(rootbuf); + _bt_wrtbuf(rel, rootbuf); + + /* update metadata page with new root block number */ + _bt_metaproot(rel, rootbknum); +} + +/* + * _bt_pgaddtup() -- add a tuple to a particular page in the index. + * + * This routine adds the tuple to the page as requested, and keeps the + * write lock and reference associated with the page's buffer. It is + * an error to call pgaddtup() without a write lock and reference. If + * afteritem is non-null, it's the item that we expect our new item + * to follow. Otherwise, we do a binary search for the correct place + * and insert the new item there. + */ +static OffsetNumber +_bt_pgaddtup(Relation rel, + Buffer buf, + int keysz, + ScanKey itup_scankey, + Size itemsize, + BTItem btitem, + BTItem afteritem) +{ + OffsetNumber itup_off; + OffsetNumber first; + Page page; + BTPageOpaque opaque; + BTItem chkitem; + Oid afteroid; + + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + first = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + if (afteritem == (BTItem) NULL) { + itup_off = _bt_binsrch(rel, buf, keysz, itup_scankey, BT_INSERTION); + } else { + afteroid = afteritem->bti_oid; + itup_off = first; + + do { + chkitem = + (BTItem) PageGetItem(page, PageGetItemId(page, itup_off)); + itup_off = OffsetNumberNext(itup_off); + } while (chkitem->bti_oid != afteroid); + } + + (void) PageAddItem(page, (Item) btitem, itemsize, itup_off, LP_USED); + + /* write the buffer, but hold our lock */ + _bt_wrtnorelbuf(rel, buf); + + return (itup_off); +} + +/* + * _bt_goesonpg() -- Does a new tuple belong on this page? + * + * This is part of the complexity introduced by allowing duplicate + * keys into the index. The tuple belongs on this page if: + * + * + there is no page to the right of this one; or + * + it is less than the high key on the page; or + * + the item it is to follow ("afteritem") appears on this + * page. + */ +static bool +_bt_goesonpg(Relation rel, + Buffer buf, + Size keysz, + ScanKey scankey, + BTItem afteritem) +{ + Page page; + ItemId hikey; + BTPageOpaque opaque; + BTItem chkitem; + OffsetNumber offnum, maxoff; + Oid afteroid; + bool found; + + page = BufferGetPage(buf); + + /* no right neighbor? */ + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + if (P_RIGHTMOST(opaque)) + return (true); + + /* + * this is a non-rightmost page, so it must have a high key item. + * + * If the scan key is < the high key (the min key on the next page), + * then it for sure belongs here. + */ + hikey = PageGetItemId(page, P_HIKEY); + if (_bt_skeycmp(rel, keysz, scankey, page, hikey, BTLessStrategyNumber)) + return (true); + + /* + * If the scan key is > the high key, then it for sure doesn't belong + * here. + */ + + if (_bt_skeycmp(rel, keysz, scankey, page, hikey, BTGreaterStrategyNumber)) + return (false); + + /* + * If we have no adjacency information, and the item is equal to the + * high key on the page (by here it is), then the item does not belong + * on this page. + */ + + if (afteritem == (BTItem) NULL) + return (false); + + /* damn, have to work for it. i hate that. */ + afteroid = afteritem->bti_oid; + maxoff = PageGetMaxOffsetNumber(page); + + /* + * Search the entire page for the afteroid. We need to do this, rather + * than doing a binary search and starting from there, because if the + * key we're searching for is the leftmost key in the tree at this + * level, then a binary search will do the wrong thing. Splits are + * pretty infrequent, so the cost isn't as bad as it could be. + */ + + found = false; + for (offnum = P_FIRSTKEY; + offnum <= maxoff; + offnum = OffsetNumberNext(offnum)) { + chkitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); + if (chkitem->bti_oid == afteroid) { + found = true; + break; + } + } + + return (found); +} + +/* + * _bt_itemcmp() -- compare item1 to item2 using a requested + * strategy (<, <=, =, >=, >) + * + */ +bool +_bt_itemcmp(Relation rel, + Size keysz, + BTItem item1, + BTItem item2, + StrategyNumber strat) +{ + TupleDesc tupDes; + IndexTuple indexTuple1, indexTuple2; + Datum attrDatum1, attrDatum2; + int i; + bool isNull; + bool compare; + + tupDes = RelationGetTupleDescriptor(rel); + indexTuple1 = &(item1->bti_itup); + indexTuple2 = &(item2->bti_itup); + + for (i = 1; i <= keysz; i++) { + attrDatum1 = index_getattr(indexTuple1, i, tupDes, &isNull); + attrDatum2 = index_getattr(indexTuple2, i, tupDes, &isNull); + compare = _bt_invokestrat(rel, i, strat, attrDatum1, attrDatum2); + if (!compare) { + return (false); + } + } + return (true); +} + +/* + * _bt_updateitem() -- updates the key of the item identified by the + * oid with the key of newItem (done in place) + * + */ +static void +_bt_updateitem(Relation rel, + Size keysz, + Buffer buf, + Oid bti_oid, + BTItem newItem) +{ + Page page; + OffsetNumber maxoff; + OffsetNumber i; + ItemPointerData itemPtrData; + BTItem item; + IndexTuple oldIndexTuple, newIndexTuple; + + page = BufferGetPage(buf); + maxoff = PageGetMaxOffsetNumber(page); + + /* locate item on the page */ + i = P_HIKEY; + do { + item = (BTItem) PageGetItem(page, PageGetItemId(page, i)); + i = OffsetNumberNext(i); + } while (i <= maxoff && item->bti_oid != bti_oid); + + /* this should never happen (in theory) */ + if (item->bti_oid != bti_oid) { + elog(FATAL, "_bt_getstackbuf was lying!!"); + } + + oldIndexTuple = &(item->bti_itup); + newIndexTuple = &(newItem->bti_itup); + + /* keep the original item pointer */ + ItemPointerCopy(&(oldIndexTuple->t_tid), &itemPtrData); + CopyIndexTuple(newIndexTuple, &oldIndexTuple); + ItemPointerCopy(&itemPtrData, &(oldIndexTuple->t_tid)); +} diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c new file mode 100644 index 00000000000..ce411a80d11 --- /dev/null +++ b/src/backend/access/nbtree/nbtpage.c @@ -0,0 +1,523 @@ +/*------------------------------------------------------------------------- + * + * btpage.c-- + * BTree-specific page management code for the Postgres btree access + * method. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + * NOTES + * Postgres btree pages look like ordinary relation pages. The opaque + * data at high addresses includes pointers to left and right siblings + * and flag data describing page state. The first page in a btree, page + * zero, is special -- it stores meta-information describing the tree. + * Pages one and higher store the actual tree data. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/genam.h" +#include "access/nbtree.h" + +#define BTREE_METAPAGE 0 +#define BTREE_MAGIC 0x053162 +#define BTREE_VERSION 0 + +typedef struct BTMetaPageData { + uint32 btm_magic; + uint32 btm_version; + BlockNumber btm_root; +} BTMetaPageData; + +#define BTPageGetMeta(p) \ + ((BTMetaPageData *) &((PageHeader) p)->pd_linp[0]) + +extern bool BuildingBtree; + +/* + * We use high-concurrency locking on btrees. There are two cases in + * which we don't do locking. One is when we're building the btree. + * Since the creating transaction has not committed, no one can see + * the index, and there's no reason to share locks. The second case + * is when we're just starting up the database system. We use some + * special-purpose initialization code in the relation cache manager + * (see utils/cache/relcache.c) to allow us to do indexed scans on + * the system catalogs before we'd normally be able to. This happens + * before the lock table is fully initialized, so we can't use it. + * Strictly speaking, this violates 2pl, but we don't do 2pl on the + * system catalogs anyway, so I declare this to be okay. + */ + +#define USELOCKING (!BuildingBtree && !IsInitProcessingMode()) + +/* + * _bt_metapinit() -- Initialize the metadata page of a btree. + */ +void +_bt_metapinit(Relation rel) +{ + Buffer buf; + Page pg; + int nblocks; + BTMetaPageData metad; + BTPageOpaque op; + + /* can't be sharing this with anyone, now... */ + if (USELOCKING) + RelationSetLockForWrite(rel); + + if ((nblocks = RelationGetNumberOfBlocks(rel)) != 0) { + elog(WARN, "Cannot initialize non-empty btree %s", + RelationGetRelationName(rel)); + } + + buf = ReadBuffer(rel, P_NEW); + pg = BufferGetPage(buf); + _bt_pageinit(pg, BufferGetPageSize(buf)); + + metad.btm_magic = BTREE_MAGIC; + metad.btm_version = BTREE_VERSION; + metad.btm_root = P_NONE; + memmove((char *) BTPageGetMeta(pg), (char *) &metad, sizeof(metad)); + + op = (BTPageOpaque) PageGetSpecialPointer(pg); + op->btpo_flags = BTP_META; + + WriteBuffer(buf); + + /* all done */ + if (USELOCKING) + RelationUnsetLockForWrite(rel); +} + +/* + * _bt_checkmeta() -- Verify that the metadata stored in a btree are + * reasonable. + */ +void +_bt_checkmeta(Relation rel) +{ + Buffer metabuf; + Page metap; + BTMetaPageData *metad; + BTPageOpaque op; + int nblocks; + + /* if the relation is empty, this is init time; don't complain */ + if ((nblocks = RelationGetNumberOfBlocks(rel)) == 0) + return; + + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); + metap = BufferGetPage(metabuf); + op = (BTPageOpaque) PageGetSpecialPointer(metap); + if (!(op->btpo_flags & BTP_META)) { + elog(WARN, "Invalid metapage for index %s", + RelationGetRelationName(rel)); + } + metad = BTPageGetMeta(metap); + + if (metad->btm_magic != BTREE_MAGIC) { + elog(WARN, "Index %s is not a btree", + RelationGetRelationName(rel)); + } + + if (metad->btm_version != BTREE_VERSION) { + elog(WARN, "Version mismatch on %s: version %d file, version %d code", + RelationGetRelationName(rel), + metad->btm_version, BTREE_VERSION); + } + + _bt_relbuf(rel, metabuf, BT_READ); +} + +/* + * _bt_getroot() -- Get the root page of the btree. + * + * Since the root page can move around the btree file, we have to read + * its location from the metadata page, and then read the root page + * itself. If no root page exists yet, we have to create one. The + * standard class of race conditions exists here; I think I covered + * them all in the Hopi Indian rain dance of lock requests below. + * + * We pass in the access type (BT_READ or BT_WRITE), and return the + * root page's buffer with the appropriate lock type set. Reference + * count on the root page gets bumped by ReadBuffer. The metadata + * page is unlocked and unreferenced by this process when this routine + * returns. + */ +Buffer +_bt_getroot(Relation rel, int access) +{ + Buffer metabuf; + Page metapg; + BTPageOpaque metaopaque; + Buffer rootbuf; + Page rootpg; + BTPageOpaque rootopaque; + BlockNumber rootblkno; + BTMetaPageData *metad; + + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ); + metapg = BufferGetPage(metabuf); + metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); + Assert(metaopaque->btpo_flags & BTP_META); + metad = BTPageGetMeta(metapg); + + /* if no root page initialized yet, do it */ + if (metad->btm_root == P_NONE) { + + /* turn our read lock in for a write lock */ + _bt_relbuf(rel, metabuf, BT_READ); + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE); + metapg = BufferGetPage(metabuf); + metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg); + Assert(metaopaque->btpo_flags & BTP_META); + metad = BTPageGetMeta(metapg); + + /* + * Race condition: if someone else initialized the metadata between + * the time we released the read lock and acquired the write lock, + * above, we want to avoid doing it again. + */ + + if (metad->btm_root == P_NONE) { + + /* + * Get, initialize, write, and leave a lock of the appropriate + * type on the new root page. Since this is the first page in + * the tree, it's a leaf. + */ + + rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE); + rootblkno = BufferGetBlockNumber(rootbuf); + rootpg = BufferGetPage(rootbuf); + metad->btm_root = rootblkno; + _bt_pageinit(rootpg, BufferGetPageSize(rootbuf)); + rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpg); + rootopaque->btpo_flags |= (BTP_LEAF | BTP_ROOT); + _bt_wrtnorelbuf(rel, rootbuf); + + /* swap write lock for read lock, if appropriate */ + if (access != BT_WRITE) { + _bt_setpagelock(rel, rootblkno, BT_READ); + _bt_unsetpagelock(rel, rootblkno, BT_WRITE); + } + + /* okay, metadata is correct */ + _bt_wrtbuf(rel, metabuf); + } else { + + /* + * Metadata initialized by someone else. In order to guarantee + * no deadlocks, we have to release the metadata page and start + * all over again. + */ + + _bt_relbuf(rel, metabuf, BT_WRITE); + return (_bt_getroot(rel, access)); + } + } else { + rootbuf = _bt_getbuf(rel, metad->btm_root, access); + + /* done with the meta page */ + _bt_relbuf(rel, metabuf, BT_READ); + } + + /* + * Race condition: If the root page split between the time we looked + * at the metadata page and got the root buffer, then we got the wrong + * buffer. + */ + + rootpg = BufferGetPage(rootbuf); + rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpg); + if (!(rootopaque->btpo_flags & BTP_ROOT)) { + + /* it happened, try again */ + _bt_relbuf(rel, rootbuf, access); + return (_bt_getroot(rel, access)); + } + + /* + * By here, we have a correct lock on the root block, its reference + * count is correct, and we have no lock set on the metadata page. + * Return the root block. + */ + + return (rootbuf); +} + +/* + * _bt_getbuf() -- Get a buffer by block number for read or write. + * + * When this routine returns, the appropriate lock is set on the + * requested buffer its reference count is correct. + */ +Buffer +_bt_getbuf(Relation rel, BlockNumber blkno, int access) +{ + Buffer buf; + Page page; + + /* + * If we want a new block, we can't set a lock of the appropriate type + * until we've instantiated the buffer. + */ + + if (blkno != P_NEW) { + if (access == BT_WRITE) + _bt_setpagelock(rel, blkno, BT_WRITE); + else + _bt_setpagelock(rel, blkno, BT_READ); + + buf = ReadBuffer(rel, blkno); + } else { + buf = ReadBuffer(rel, blkno); + blkno = BufferGetBlockNumber(buf); + page = BufferGetPage(buf); + _bt_pageinit(page, BufferGetPageSize(buf)); + + if (access == BT_WRITE) + _bt_setpagelock(rel, blkno, BT_WRITE); + else + _bt_setpagelock(rel, blkno, BT_READ); + } + + /* ref count and lock type are correct */ + return (buf); +} + +/* + * _bt_relbuf() -- release a locked buffer. + */ +void +_bt_relbuf(Relation rel, Buffer buf, int access) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + + /* access had better be one of read or write */ + if (access == BT_WRITE) + _bt_unsetpagelock(rel, blkno, BT_WRITE); + else + _bt_unsetpagelock(rel, blkno, BT_READ); + + ReleaseBuffer(buf); +} + +/* + * _bt_wrtbuf() -- write a btree page to disk. + * + * This routine releases the lock held on the buffer and our reference + * to it. It is an error to call _bt_wrtbuf() without a write lock + * or a reference to the buffer. + */ +void +_bt_wrtbuf(Relation rel, Buffer buf) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + WriteBuffer(buf); + _bt_unsetpagelock(rel, blkno, BT_WRITE); +} + +/* + * _bt_wrtnorelbuf() -- write a btree page to disk, but do not release + * our reference or lock. + * + * It is an error to call _bt_wrtnorelbuf() without a write lock + * or a reference to the buffer. + */ +void +_bt_wrtnorelbuf(Relation rel, Buffer buf) +{ + BlockNumber blkno; + + blkno = BufferGetBlockNumber(buf); + WriteNoReleaseBuffer(buf); +} + +/* + * _bt_pageinit() -- Initialize a new page. + */ +void +_bt_pageinit(Page page, Size size) +{ + /* + * Cargo-cult programming -- don't really need this to be zero, but + * creating new pages is an infrequent occurrence and it makes me feel + * good when I know they're empty. + */ + + memset(page, 0, size); + + PageInit(page, size, sizeof(BTPageOpaqueData)); +} + +/* + * _bt_metaproot() -- Change the root page of the btree. + * + * Lehman and Yao require that the root page move around in order to + * guarantee deadlock-free short-term, fine-granularity locking. When + * we split the root page, we record the new parent in the metadata page + * for the relation. This routine does the work. + * + * No direct preconditions, but if you don't have the a write lock on + * at least the old root page when you call this, you're making a big + * mistake. On exit, metapage data is correct and we no longer have + * a reference to or lock on the metapage. + */ +void +_bt_metaproot(Relation rel, BlockNumber rootbknum) +{ + Buffer metabuf; + Page metap; + BTPageOpaque metaopaque; + BTMetaPageData *metad; + + metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE); + metap = BufferGetPage(metabuf); + metaopaque = (BTPageOpaque) PageGetSpecialPointer(metap); + Assert(metaopaque->btpo_flags & BTP_META); + metad = BTPageGetMeta(metap); + metad->btm_root = rootbknum; + _bt_wrtbuf(rel, metabuf); +} + +/* + * _bt_getstackbuf() -- Walk back up the tree one step, and find the item + * we last looked at in the parent. + * + * This is possible because we save a bit image of the last item + * we looked at in the parent, and the update algorithm guarantees + * that if items above us in the tree move, they only move right. + */ +Buffer +_bt_getstackbuf(Relation rel, BTStack stack, int access) +{ + Buffer buf; + BlockNumber blkno; + OffsetNumber start, offnum, maxoff; + OffsetNumber i; + Page page; + ItemId itemid; + BTItem item; + BTPageOpaque opaque; + + blkno = stack->bts_blkno; + buf = _bt_getbuf(rel, blkno, access); + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + + if (maxoff >= stack->bts_offset) { + itemid = PageGetItemId(page, stack->bts_offset); + item = (BTItem) PageGetItem(page, itemid); + + /* if the item is where we left it, we're done */ + if (item->bti_oid == stack->bts_btitem->bti_oid) + return (buf); + + /* if the item has just moved right on this page, we're done */ + for (i = OffsetNumberNext(stack->bts_offset); + i <= maxoff; + i = OffsetNumberNext(i)) { + itemid = PageGetItemId(page, i); + item = (BTItem) PageGetItem(page, itemid); + + /* if the item is where we left it, we're done */ + if (item->bti_oid == stack->bts_btitem->bti_oid) + return (buf); + } + } + + /* by here, the item we're looking for moved right at least one page */ + for (;;) { + blkno = opaque->btpo_next; + if (P_RIGHTMOST(opaque)) + elog(FATAL, "my bits moved right off the end of the world!"); + + _bt_relbuf(rel, buf, access); + buf = _bt_getbuf(rel, blkno, access); + page = BufferGetPage(buf); + maxoff = PageGetMaxOffsetNumber(page); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* if we have a right sibling, step over the high key */ + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + /* see if it's on this page */ + for (offnum = start; + offnum <= maxoff; + offnum = OffsetNumberNext(offnum)) { + itemid = PageGetItemId(page, offnum); + item = (BTItem) PageGetItem(page, itemid); + if (item->bti_oid == stack->bts_btitem->bti_oid) + return (buf); + } + } +} + +void +_bt_setpagelock(Relation rel, BlockNumber blkno, int access) +{ + ItemPointerData iptr; + + if (USELOCKING) { + ItemPointerSet(&iptr, blkno, P_HIKEY); + + if (access == BT_WRITE) + RelationSetSingleWLockPage(rel, &iptr); + else + RelationSetSingleRLockPage(rel, &iptr); + } +} + +void +_bt_unsetpagelock(Relation rel, BlockNumber blkno, int access) +{ + ItemPointerData iptr; + + if (USELOCKING) { + ItemPointerSet(&iptr, blkno, P_HIKEY); + + if (access == BT_WRITE) + RelationUnsetSingleWLockPage(rel, &iptr); + else + RelationUnsetSingleRLockPage(rel, &iptr); + } +} + +void +_bt_pagedel(Relation rel, ItemPointer tid) +{ + Buffer buf; + Page page; + BlockNumber blkno; + OffsetNumber offno; + + blkno = ItemPointerGetBlockNumber(tid); + offno = ItemPointerGetOffsetNumber(tid); + + buf = _bt_getbuf(rel, blkno, BT_WRITE); + page = BufferGetPage(buf); + + PageIndexTupleDelete(page, offno); + + /* write the buffer and release the lock */ + _bt_wrtbuf(rel, buf); +} diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c new file mode 100644 index 00000000000..06016119964 --- /dev/null +++ b/src/backend/access/nbtree/nbtree.c @@ -0,0 +1,516 @@ +/*------------------------------------------------------------------------- + * + * btree.c-- + * Implementation of Lehman and Yao's btree management algorithm for + * Postgres. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + * NOTES + * This file contains only the public interface routines. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/sdir.h" +#include "access/nbtree.h" +#include "access/funcindex.h" + +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" + +#include "executor/executor.h" +#include "executor/tuptable.h" + +#include "catalog/index.h" + +bool BuildingBtree = false; +bool FastBuild = false; /* turn this on to make bulk builds work*/ + +/* + * btbuild() -- build a new btree index. + * + * We use a global variable to record the fact that we're creating + * a new index. This is used to avoid high-concurrency locking, + * since the index won't be visible until this transaction commits + * and since building is guaranteed to be single-threaded. + */ +void +btbuild(Relation heap, + Relation index, + int natts, + AttrNumber *attnum, + IndexStrategy istrat, + uint16 pcount, + Datum *params, + FuncIndexInfo *finfo, + PredInfo *predInfo) +{ + HeapScanDesc hscan; + Buffer buffer; + HeapTuple htup; + IndexTuple itup; + TupleDesc htupdesc, itupdesc; + Datum *attdata; + bool *nulls; + InsertIndexResult res; + int nhtups, nitups; + int i; + BTItem btitem; + ExprContext *econtext; + TupleTable tupleTable; + TupleTableSlot *slot; + Oid hrelid, irelid; + Node *pred, *oldPred; + void *spool; + + /* note that this is a new btree */ + BuildingBtree = true; + + pred = predInfo->pred; + oldPred = predInfo->oldPred; + + /* initialize the btree index metadata page (if this is a new index) */ + if (oldPred == NULL) + _bt_metapinit(index); + + /* get tuple descriptors for heap and index relations */ + htupdesc = RelationGetTupleDescriptor(heap); + itupdesc = RelationGetTupleDescriptor(index); + + /* get space for data items that'll appear in the index tuple */ + attdata = (Datum *) palloc(natts * sizeof(Datum)); + nulls = (bool *) palloc(natts * sizeof(bool)); + + /* + * If this is a predicate (partial) index, we will need to evaluate the + * predicate using ExecQual, which requires the current tuple to be in a + * slot of a TupleTable. In addition, ExecQual must have an ExprContext + * referring to that slot. Here, we initialize dummy TupleTable and + * ExprContext objects for this purpose. --Nels, Feb '92 + */ +#ifndef OMIT_PARTIAL_INDEX + if (pred != NULL || oldPred != NULL) { + tupleTable = ExecCreateTupleTable(1); + slot = ExecAllocTableSlot(tupleTable); + econtext = makeNode(ExprContext); + FillDummyExprContext(econtext, slot, htupdesc, InvalidBuffer); + } +#endif /* OMIT_PARTIAL_INDEX */ + + /* start a heap scan */ + hscan = heap_beginscan(heap, 0, NowTimeQual, 0, (ScanKey) NULL); + htup = heap_getnext(hscan, 0, &buffer); + + /* build the index */ + nhtups = nitups = 0; + + if (FastBuild) { + spool = _bt_spoolinit(index, 7); + res = (InsertIndexResult) NULL; + } + + for (; HeapTupleIsValid(htup); htup = heap_getnext(hscan, 0, &buffer)) { + + nhtups++; + + /* + * If oldPred != NULL, this is an EXTEND INDEX command, so skip + * this tuple if it was already in the existing partial index + */ + if (oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + + /*SetSlotContents(slot, htup);*/ + slot->val = htup; + if (ExecQual((List*)oldPred, econtext) == true) { + nitups++; + continue; + } +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* Skip this tuple if it doesn't satisfy the partial-index predicate */ + if (pred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /* SetSlotContents(slot, htup); */ + slot->val = htup; + if (ExecQual((List*)pred, econtext) == false) + continue; +#endif /* OMIT_PARTIAL_INDEX */ + } + + nitups++; + + /* + * For the current heap tuple, extract all the attributes + * we use in this index, and note which are null. + */ + + for (i = 1; i <= natts; i++) { + int attoff; + bool attnull; + + /* + * Offsets are from the start of the tuple, and are + * zero-based; indices are one-based. The next call + * returns i - 1. That's data hiding for you. + */ + + attoff = AttrNumberGetAttrOffset(i); + attdata[attoff] = GetIndexValue(htup, + htupdesc, + attoff, + attnum, + finfo, + &attnull, + buffer); + nulls[attoff] = (attnull ? 'n' : ' '); + } + + /* form an index tuple and point it at the heap tuple */ + itup = index_formtuple(itupdesc, attdata, nulls); + + /* + * If the single index key is null, we don't insert it into + * the index. Btrees support scans on <, <=, =, >=, and >. + * Relational algebra says that A op B (where op is one of the + * operators above) returns null if either A or B is null. This + * means that no qualification used in an index scan could ever + * return true on a null attribute. It also means that indices + * can't be used by ISNULL or NOTNULL scans, but that's an + * artifact of the strategy map architecture chosen in 1986, not + * of the way nulls are handled here. + */ + + if (itup->t_info & INDEX_NULL_MASK) { + pfree(itup); + continue; + } + + itup->t_tid = htup->t_ctid; + btitem = _bt_formitem(itup); + + /* + * if we are doing bottom-up btree build, we insert the index + * into a spool page for subsequent processing. otherwise, we + * insert into the btree. + */ + if (FastBuild) { + _bt_spool(index, btitem, spool); + } else { + res = _bt_doinsert(index, btitem); + } + + pfree(btitem); + pfree(itup); + if (res) { + pfree(res); + } + } + + /* okay, all heap tuples are indexed */ + heap_endscan(hscan); + + if (pred != NULL || oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + ExecDestroyTupleTable(tupleTable, true); + pfree(econtext); +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* + * if we are doing bottom-up btree build, we now have a bunch of + * sorted runs in the spool pages. finish the build by (1) + * merging the runs, (2) inserting the sorted tuples into btree + * pages and (3) building the upper levels. + */ + if (FastBuild) { + _bt_spool(index, (BTItem) NULL, spool); /* flush spool */ + _bt_leafbuild(index, spool); + _bt_spooldestroy(spool); + } + + /* + * Since we just counted the tuples in the heap, we update its + * stats in pg_class to guarantee that the planner takes advantage + * of the index we just created. Finally, only update statistics + * during normal index definitions, not for indices on system catalogs + * created during bootstrap processing. We must close the relations + * before updatings statistics to guarantee that the relcache entries + * are flushed when we increment the command counter in UpdateStats(). + */ + if (IsNormalProcessingMode()) + { + hrelid = heap->rd_id; + irelid = index->rd_id; + heap_close(heap); + index_close(index); + UpdateStats(hrelid, nhtups, true); + UpdateStats(irelid, nitups, false); + if (oldPred != NULL) { + if (nitups == nhtups) pred = NULL; + UpdateIndexPredicate(irelid, oldPred, pred); + } + } + + /* be tidy */ + pfree(nulls); + pfree(attdata); + + /* all done */ + BuildingBtree = false; +} + +/* + * btinsert() -- insert an index tuple into a btree. + * + * Descend the tree recursively, find the appropriate location for our + * new tuple, put it there, set its unique OID as appropriate, and + * return an InsertIndexResult to the caller. + */ +InsertIndexResult +btinsert(Relation rel, IndexTuple itup) +{ + BTItem btitem; + InsertIndexResult res; + + if (itup->t_info & INDEX_NULL_MASK) + return ((InsertIndexResult) NULL); + + btitem = _bt_formitem(itup); + + res = _bt_doinsert(rel, btitem); + pfree(btitem); + + return (res); +} + +/* + * btgettuple() -- Get the next tuple in the scan. + */ +char * +btgettuple(IndexScanDesc scan, ScanDirection dir) +{ + RetrieveIndexResult res; + + /* + * If we've already initialized this scan, we can just advance it + * in the appropriate direction. If we haven't done so yet, we + * call a routine to get the first item in the scan. + */ + + if (ItemPointerIsValid(&(scan->currentItemData))) + res = _bt_next(scan, dir); + else + res = _bt_first(scan, dir); + + return ((char *) res); +} + +/* + * btbeginscan() -- start a scan on a btree index + */ +char * +btbeginscan(Relation rel, bool fromEnd, uint16 keysz, ScanKey scankey) +{ + IndexScanDesc scan; + StrategyNumber strat; + BTScanOpaque so; + + /* first order the keys in the qualification */ + if (keysz > 1) + _bt_orderkeys(rel, &keysz, scankey); + + /* now get the scan */ + scan = RelationGetIndexScan(rel, fromEnd, keysz, scankey); + so = (BTScanOpaque) palloc(sizeof(BTScanOpaqueData)); + so->btso_curbuf = so->btso_mrkbuf = InvalidBuffer; + scan->opaque = so; + + /* finally, be sure that the scan exploits the tree order */ + scan->scanFromEnd = false; + scan->flags = 0x0; + if (keysz > 0) { + strat = _bt_getstrat(scan->relation, 1 /* XXX */, + scankey[0].sk_procedure); + + if (strat == BTLessStrategyNumber + || strat == BTLessEqualStrategyNumber) + scan->scanFromEnd = true; + } else { + scan->scanFromEnd = true; + } + + /* register scan in case we change pages it's using */ + _bt_regscan(scan); + + return ((char *) scan); +} + +/* + * btrescan() -- rescan an index relation + */ +void +btrescan(IndexScanDesc scan, bool fromEnd, ScanKey scankey) +{ + ItemPointer iptr; + BTScanOpaque so; + + so = (BTScanOpaque) scan->opaque; + + /* we hold a read lock on the current page in the scan */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _bt_relbuf(scan->relation, so->btso_curbuf, BT_READ); + so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* and we hold a read lock on the last marked item in the scan */ + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + _bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ); + so->btso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* reset the scan key */ + if (scan->numberOfKeys > 0) { + memmove(scan->keyData, + scankey, + scan->numberOfKeys * sizeof(ScanKeyData)); + } +} + +void +btmovescan(IndexScanDesc scan, Datum v) +{ + ItemPointer iptr; + BTScanOpaque so; + + so = (BTScanOpaque) scan->opaque; + + /* release any locks we still hold */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _bt_relbuf(scan->relation, so->btso_curbuf, BT_READ); + so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + scan->keyData[0].sk_argument = v; +} + +/* + * btendscan() -- close down a scan + */ +void +btendscan(IndexScanDesc scan) +{ + ItemPointer iptr; + BTScanOpaque so; + + so = (BTScanOpaque) scan->opaque; + + /* release any locks we still hold */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + if (BufferIsValid(so->btso_curbuf)) + _bt_relbuf(scan->relation, so->btso_curbuf, BT_READ); + so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + if (BufferIsValid(so->btso_mrkbuf)) + _bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ); + so->btso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* don't need scan registered anymore */ + _bt_dropscan(scan); + + /* be tidy */ +#ifdef PERFECT_MMGR + pfree (scan->opaque); +#endif /* PERFECT_MMGR */ +} + +/* + * btmarkpos() -- save current scan position + */ +void +btmarkpos(IndexScanDesc scan) +{ + ItemPointer iptr; + BTScanOpaque so; + + so = (BTScanOpaque) scan->opaque; + + /* release lock on old marked data, if any */ + if (ItemPointerIsValid(iptr = &(scan->currentMarkData))) { + _bt_relbuf(scan->relation, so->btso_mrkbuf, BT_READ); + so->btso_mrkbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* bump lock on currentItemData and copy to currentMarkData */ + if (ItemPointerIsValid(&(scan->currentItemData))) { + so->btso_mrkbuf = _bt_getbuf(scan->relation, + BufferGetBlockNumber(so->btso_curbuf), + BT_READ); + scan->currentMarkData = scan->currentItemData; + } +} + +/* + * btrestrpos() -- restore scan to last saved position + */ +void +btrestrpos(IndexScanDesc scan) +{ + ItemPointer iptr; + BTScanOpaque so; + + so = (BTScanOpaque) scan->opaque; + + /* release lock on current data, if any */ + if (ItemPointerIsValid(iptr = &(scan->currentItemData))) { + _bt_relbuf(scan->relation, so->btso_curbuf, BT_READ); + so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(iptr); + } + + /* bump lock on currentMarkData and copy to currentItemData */ + if (ItemPointerIsValid(&(scan->currentMarkData))) { + so->btso_curbuf = _bt_getbuf(scan->relation, + BufferGetBlockNumber(so->btso_mrkbuf), + BT_READ); + + scan->currentItemData = scan->currentMarkData; + } +} + +/* stubs */ +void +btdelete(Relation rel, ItemPointer tid) +{ + /* adjust any active scans that will be affected by this deletion */ + _bt_adjscans(rel, tid); + + /* delete the data from the page */ + _bt_pagedel(rel, tid); +} diff --git a/src/backend/access/nbtree/nbtscan.c b/src/backend/access/nbtree/nbtscan.c new file mode 100644 index 00000000000..62a029bc06f --- /dev/null +++ b/src/backend/access/nbtree/nbtscan.c @@ -0,0 +1,164 @@ +/*------------------------------------------------------------------------- + * + * btscan.c-- + * manage scans on btrees. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtscan.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + * + * NOTES + * Because we can be doing an index scan on a relation while we update + * it, we need to avoid missing data that moves around in the index. + * The routines and global variables in this file guarantee that all + * scans in the local address space stay correctly positioned. This + * is all we need to worry about, since write locking guarantees that + * no one else will be on the same page at the same time as we are. + * + * The scheme is to manage a list of active scans in the current backend. + * Whenever we add or remove records from an index, or whenever we + * split a leaf page, we check the list of active scans to see if any + * has been affected. A scan is affected only if it is on the same + * relation, and the same page, as the update. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/sdir.h" +#include "access/nbtree.h" + +typedef struct BTScanListData { + IndexScanDesc btsl_scan; + struct BTScanListData *btsl_next; +} BTScanListData; + +typedef BTScanListData *BTScanList; + +static BTScanList BTScans = (BTScanList) NULL; + +/* + * _bt_regscan() -- register a new scan. + */ +void +_bt_regscan(IndexScanDesc scan) +{ + BTScanList new_el; + + new_el = (BTScanList) palloc(sizeof(BTScanListData)); + new_el->btsl_scan = scan; + new_el->btsl_next = BTScans; + BTScans = new_el; +} + +/* + * _bt_dropscan() -- drop a scan from the scan list + */ +void +_bt_dropscan(IndexScanDesc scan) +{ + BTScanList chk, last; + + last = (BTScanList) NULL; + for (chk = BTScans; + chk != (BTScanList) NULL && chk->btsl_scan != scan; + chk = chk->btsl_next) { + last = chk; + } + + if (chk == (BTScanList) NULL) + elog(WARN, "btree scan list trashed; can't find 0x%lx", scan); + + if (last == (BTScanList) NULL) + BTScans = chk->btsl_next; + else + last->btsl_next = chk->btsl_next; + +#ifdef PERFECT_MEM + pfree (chk); +#endif /* PERFECT_MEM */ +} + +void +_bt_adjscans(Relation rel, ItemPointer tid) +{ + BTScanList l; + Oid relid; + + relid = rel->rd_id; + for (l = BTScans; l != (BTScanList) NULL; l = l->btsl_next) { + if (relid == l->btsl_scan->relation->rd_id) + _bt_scandel(l->btsl_scan, ItemPointerGetBlockNumber(tid), + ItemPointerGetOffsetNumber(tid)); + } +} + +void +_bt_scandel(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno) +{ + ItemPointer current; + Buffer buf; + BTScanOpaque so; + + if (!_bt_scantouched(scan, blkno, offno)) + return; + + so = (BTScanOpaque) scan->opaque; + buf = so->btso_curbuf; + + current = &(scan->currentItemData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) { + _bt_step(scan, &buf, BackwardScanDirection); + so->btso_curbuf = buf; + } + + current = &(scan->currentMarkData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) { + ItemPointerData tmp; + tmp = *current; + *current = scan->currentItemData; + scan->currentItemData = tmp; + _bt_step(scan, &buf, BackwardScanDirection); + so->btso_mrkbuf = buf; + tmp = *current; + *current = scan->currentItemData; + scan->currentItemData = tmp; + } +} + +bool +_bt_scantouched(IndexScanDesc scan, BlockNumber blkno, OffsetNumber offno) +{ + ItemPointer current; + + current = &(scan->currentItemData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) + return (true); + + current = &(scan->currentMarkData); + if (ItemPointerIsValid(current) + && ItemPointerGetBlockNumber(current) == blkno + && ItemPointerGetOffsetNumber(current) >= offno) + return (true); + + return (false); +} diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c new file mode 100644 index 00000000000..d7a7fc7d62e --- /dev/null +++ b/src/backend/access/nbtree/nbtsearch.c @@ -0,0 +1,1133 @@ +/*------------------------------------------------------------------------- + * + * btsearch.c-- + * search code for postgres btrees. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "fmgr.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/skey.h" +#include "access/sdir.h" +#include "access/nbtree.h" + +static BTStack _bt_searchr(Relation rel, int keysz, ScanKey scankey, Buffer *bufP, BTStack stack_in); +static OffsetNumber _bt_firsteq(Relation rel, TupleDesc itupdesc, Page page, Size keysz, ScanKey scankey, OffsetNumber offnum); +static int _bt_compare(Relation rel, TupleDesc itupdesc, Page page, int keysz, ScanKey scankey, OffsetNumber offnum); +static bool _bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir); +static RetrieveIndexResult _bt_endpoint(IndexScanDesc scan, ScanDirection dir); + +/* + * _bt_search() -- Search for a scan key in the index. + * + * This routine is actually just a helper that sets things up and + * calls a recursive-descent search routine on the tree. + */ +BTStack +_bt_search(Relation rel, int keysz, ScanKey scankey, Buffer *bufP) +{ + *bufP = _bt_getroot(rel, BT_READ); + return (_bt_searchr(rel, keysz, scankey, bufP, (BTStack) NULL)); +} + +/* + * _bt_searchr() -- Search the tree recursively for a particular scankey. + */ +static BTStack +_bt_searchr(Relation rel, + int keysz, + ScanKey scankey, + Buffer *bufP, + BTStack stack_in) +{ + BTStack stack; + OffsetNumber offnum; + Page page; + BTPageOpaque opaque; + BlockNumber par_blkno; + BlockNumber blkno; + ItemId itemid; + BTItem btitem; + BTItem item_save; + int item_nbytes; + IndexTuple itup; + + /* if this is a leaf page, we're done */ + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + if (opaque->btpo_flags & BTP_LEAF) + return (stack_in); + + /* + * Find the appropriate item on the internal page, and get the child + * page that it points to. + */ + + par_blkno = BufferGetBlockNumber(*bufP); + offnum = _bt_binsrch(rel, *bufP, keysz, scankey, BT_DESCENT); + itemid = PageGetItemId(page, offnum); + btitem = (BTItem) PageGetItem(page, itemid); + itup = &(btitem->bti_itup); + blkno = ItemPointerGetBlockNumber(&(itup->t_tid)); + + /* + * We need to save the bit image of the index entry we chose in the + * parent page on a stack. In case we split the tree, we'll use this + * bit image to figure out what our real parent page is, in case the + * parent splits while we're working lower in the tree. See the paper + * by Lehman and Yao for how this is detected and handled. (We use + * unique OIDs to disambiguate duplicate keys in the index -- Lehman + * and Yao disallow duplicate keys). + */ + + item_nbytes = ItemIdGetLength(itemid); + item_save = (BTItem) palloc(item_nbytes); + memmove((char *) item_save, (char *) btitem, item_nbytes); + stack = (BTStack) palloc(sizeof(BTStackData)); + stack->bts_blkno = par_blkno; + stack->bts_offset = offnum; + stack->bts_btitem = item_save; + stack->bts_parent = stack_in; + + /* drop the read lock on the parent page and acquire one on the child */ + _bt_relbuf(rel, *bufP, BT_READ); + *bufP = _bt_getbuf(rel, blkno, BT_READ); + + /* + * Race -- the page we just grabbed may have split since we read its + * pointer in the parent. If it has, we may need to move right to its + * new sibling. Do that. + */ + + *bufP = _bt_moveright(rel, *bufP, keysz, scankey, BT_READ); + + /* okay, all set to move down a level */ + return (_bt_searchr(rel, keysz, scankey, bufP, stack)); +} + +/* + * _bt_moveright() -- move right in the btree if necessary. + * + * When we drop and reacquire a pointer to a page, it is possible that + * the page has changed in the meanwhile. If this happens, we're + * guaranteed that the page has "split right" -- that is, that any + * data that appeared on the page originally is either on the page + * or strictly to the right of it. + * + * This routine decides whether or not we need to move right in the + * tree by examining the high key entry on the page. If that entry + * is strictly less than one we expect to be on the page, then our + * picture of the page is incorrect and we need to move right. + * + * On entry, we have the buffer pinned and a lock of the proper type. + * If we move right, we release the buffer and lock and acquire the + * same on the right sibling. + */ +Buffer +_bt_moveright(Relation rel, + Buffer buf, + int keysz, + ScanKey scankey, + int access) +{ + Page page; + BTPageOpaque opaque; + ItemId hikey; + ItemId itemid; + BlockNumber rblkno; + + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* if we're on a rightmost page, we don't need to move right */ + if (P_RIGHTMOST(opaque)) + return (buf); + + /* by convention, item 0 on non-rightmost pages is the high key */ + hikey = PageGetItemId(page, P_HIKEY); + + /* + * If the scan key that brought us to this page is >= the high key + * stored on the page, then the page has split and we need to move + * right. + */ + + if (_bt_skeycmp(rel, keysz, scankey, page, hikey, + BTGreaterEqualStrategyNumber)) { + + /* move right as long as we need to */ + do { + /* + * If this page consists of all duplicate keys (hikey and first + * key on the page have the same value), then we don't need to + * step right. + */ + if (PageGetMaxOffsetNumber(page) > P_HIKEY) { + itemid = PageGetItemId(page, P_FIRSTKEY); + if (_bt_skeycmp(rel, keysz, scankey, page, itemid, + BTEqualStrategyNumber)) { + /* break is for the "move right" while loop */ + break; + } + } + + /* step right one page */ + rblkno = opaque->btpo_next; + _bt_relbuf(rel, buf, access); + buf = _bt_getbuf(rel, rblkno, access); + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + hikey = PageGetItemId(page, P_HIKEY); + + } while (! P_RIGHTMOST(opaque) + && _bt_skeycmp(rel, keysz, scankey, page, hikey, + BTGreaterEqualStrategyNumber)); + } + return (buf); +} + +/* + * _bt_skeycmp() -- compare a scan key to a particular item on a page using + * a requested strategy (<, <=, =, >=, >). + * + * We ignore the unique OIDs stored in the btree item here. Those + * numbers are intended for use internally only, in repositioning a + * scan after a page split. They do not impose any meaningful ordering. + * + * The comparison is A B, where A is the scan key and B is the + * tuple pointed at by itemid on page. + */ +bool +_bt_skeycmp(Relation rel, + Size keysz, + ScanKey scankey, + Page page, + ItemId itemid, + StrategyNumber strat) +{ + BTItem item; + IndexTuple indexTuple; + TupleDesc tupDes; + ScanKey entry; + int i; + Datum attrDatum; + Datum keyDatum; + bool compare; + bool isNull; + + item = (BTItem) PageGetItem(page, itemid); + indexTuple = &(item->bti_itup); + + tupDes = RelationGetTupleDescriptor(rel); + + /* see if the comparison is true for all of the key attributes */ + for (i=1; i <= keysz; i++) { + + entry = &scankey[i-1]; + attrDatum = index_getattr(indexTuple, + entry->sk_attno, + tupDes, + &isNull); + keyDatum = entry->sk_argument; + + compare = _bt_invokestrat(rel, i, strat, keyDatum, attrDatum); + if (!compare) + return (false); + } + + return (true); +} + +/* + * _bt_binsrch() -- Do a binary search for a key on a particular page. + * + * The scankey we get has the compare function stored in the procedure + * entry of each data struct. We invoke this regproc to do the + * comparison for every key in the scankey. _bt_binsrch() returns + * the OffsetNumber of the first matching key on the page, or the + * OffsetNumber at which the matching key would appear if it were + * on this page. + * + * By the time this procedure is called, we're sure we're looking + * at the right page -- don't need to walk right. _bt_binsrch() has + * no lock or refcount side effects on the buffer. + */ +OffsetNumber +_bt_binsrch(Relation rel, + Buffer buf, + int keysz, + ScanKey scankey, + int srchtype) +{ + TupleDesc itupdesc; + Page page; + BTPageOpaque opaque; + OffsetNumber low, mid, high; + bool match; + int result; + + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* by convention, item 0 on any non-rightmost page is the high key */ + low = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + high = PageGetMaxOffsetNumber(page); + + /* + * Since for non-rightmost pages, the zeroeth item on the page is the + * high key, there are two notions of emptiness. One is if nothing + * appears on the page. The other is if nothing but the high key does. + * The reason we test high <= low, rather than high == low, is that + * after vacuuming there may be nothing *but* the high key on a page. + * In that case, given the scheme above, low = 1 and high = 0. + */ + + if (PageIsEmpty(page) || (! P_RIGHTMOST(opaque) && high <= low)) + return (low); + + itupdesc = RelationGetTupleDescriptor(rel); + match = false; + + while ((high - low) > 1) { + mid = low + ((high - low) / 2); + result = _bt_compare(rel, itupdesc, page, keysz, scankey, mid); + + if (result > 0) + low = mid; + else if (result < 0) + high = mid - 1; + else { + match = true; + break; + } + } + + /* if we found a match, we want to find the first one on the page */ + if (match) { + return (_bt_firsteq(rel, itupdesc, page, keysz, scankey, mid)); + } else { + + /* + * We terminated because the endpoints got too close together. There + * are two cases to take care of. + * + * For non-insertion searches on internal pages, we want to point at + * the last key <, or first key =, the scankey on the page. This + * guarantees that we'll descend the tree correctly. + * + * For all other cases, we want to point at the first key >= + * the scankey on the page. This guarantees that scans and + * insertions will happen correctly. + */ + + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + if (!(opaque->btpo_flags & BTP_LEAF) && srchtype == BT_DESCENT) { + + /* + * We want the last key <, or first key ==, the scan key. + */ + + result = _bt_compare(rel, itupdesc, page, keysz, scankey, high); + + if (result == 0) { + return (_bt_firsteq(rel, itupdesc, page, keysz, scankey, high)); + } else if (result > 0) { + return (high); + } else { + return (low); + } + } else { + + /* we want the first key >= the scan key */ + result = _bt_compare(rel, itupdesc, page, keysz, scankey, low); + if (result <= 0) { + return (low); + } else { + if (low == high) + return (OffsetNumberNext(low)); + + result = _bt_compare(rel, itupdesc, page, keysz, scankey, high); + if (result <= 0) + return (high); + else + return (OffsetNumberNext(high)); + } + } + } +} + +static OffsetNumber +_bt_firsteq(Relation rel, + TupleDesc itupdesc, + Page page, + Size keysz, + ScanKey scankey, + OffsetNumber offnum) +{ + BTPageOpaque opaque; + OffsetNumber limit; + + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* skip the high key, if any */ + limit = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + /* walk backwards looking for the first key in the chain of duplicates */ + while (offnum > limit + && _bt_compare(rel, itupdesc, page, + keysz, scankey, OffsetNumberPrev(offnum)) == 0) { + offnum = OffsetNumberPrev(offnum); + } + + return (offnum); +} + +/* + * _bt_compare() -- Compare scankey to a particular tuple on the page. + * + * This routine returns: + * -1 if scankey < tuple at offnum; + * 0 if scankey == tuple at offnum; + * +1 if scankey > tuple at offnum. + * + * In order to avoid having to propagate changes up the tree any time + * a new minimal key is inserted, the leftmost entry on the leftmost + * page is less than all possible keys, by definition. + */ +static int +_bt_compare(Relation rel, + TupleDesc itupdesc, + Page page, + int keysz, + ScanKey scankey, + OffsetNumber offnum) +{ + Datum datum; + BTItem btitem; + ItemId itemid; + IndexTuple itup; + BTPageOpaque opaque; + ScanKey entry; + AttrNumber attno; + int result; + int i; + bool null; + + /* + * If this is a leftmost internal page, and if our comparison is + * with the first key on the page, then the item at that position is + * by definition less than the scan key. + */ + + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + if (!(opaque->btpo_flags & BTP_LEAF) + && P_LEFTMOST(opaque) + && offnum == P_HIKEY) { + itemid = PageGetItemId(page, offnum); + + /* + * we just have to believe that this will only be called with + * offnum == P_HIKEY when P_HIKEY is the OffsetNumber of the + * first actual data key (i.e., this is also a rightmost + * page). there doesn't seem to be any code that implies + * that the leftmost page is normally missing a high key as + * well as the rightmost page. but that implies that this + * code path only applies to the root -- which seems + * unlikely.. + */ + if (! P_RIGHTMOST(opaque)) { + elog(WARN, "_bt_compare: invalid comparison to high key"); + } + + /* + * If the item on the page is equal to the scankey, that's + * okay to admit. We just can't claim that the first key on + * the page is greater than anything. + */ + + if (_bt_skeycmp(rel, keysz, scankey, page, itemid, + BTEqualStrategyNumber)) { + return (0); + } + return (1); + } + + btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &(btitem->bti_itup); + + /* + * The scan key is set up with the attribute number associated with each + * term in the key. It is important that, if the index is multi-key, + * the scan contain the first k key attributes, and that they be in + * order. If you think about how multi-key ordering works, you'll + * understand why this is. + * + * We don't test for violation of this condition here. + */ + + for (i = 1; i <= keysz; i++) { + long tmpres; + + entry = &scankey[i - 1]; + attno = entry->sk_attno; + datum = index_getattr(itup, attno, itupdesc, &null); + tmpres = (long) FMGR_PTR2(entry->sk_func, entry->sk_procedure, + entry->sk_argument, datum); + result = tmpres; + + /* if the keys are unequal, return the difference */ + if (result != 0) + return (result); + } + + /* by here, the keys are equal */ + return (0); +} + +/* + * _bt_next() -- Get the next item in a scan. + * + * On entry, we have a valid currentItemData in the scan, and a + * read lock on the page that contains that item. We do not have + * the page pinned. We return the next item in the scan. On + * exit, we have the page containing the next item locked but not + * pinned. + */ +RetrieveIndexResult +_bt_next(IndexScanDesc scan, ScanDirection dir) +{ + Relation rel; + Buffer buf; + Page page; + OffsetNumber offnum; + RetrieveIndexResult res; + BlockNumber blkno; + ItemPointer current; + ItemPointer iptr; + BTItem btitem; + IndexTuple itup; + BTScanOpaque so; + + rel = scan->relation; + so = (BTScanOpaque) scan->opaque; + current = &(scan->currentItemData); + + /* + * XXX 10 may 91: somewhere there's a bug in our management of the + * cached buffer for this scan. wei discovered it. the following + * is a workaround so he can work until i figure out what's going on. + */ + + if (!BufferIsValid(so->btso_curbuf)) + so->btso_curbuf = _bt_getbuf(rel, ItemPointerGetBlockNumber(current), + BT_READ); + + /* we still have the buffer pinned and locked */ + buf = so->btso_curbuf; + blkno = BufferGetBlockNumber(buf); + + /* step one tuple in the appropriate direction */ + if (!_bt_step(scan, &buf, dir)) + return ((RetrieveIndexResult) NULL); + + /* by here, current is the tuple we want to return */ + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &btitem->bti_itup; + + if (_bt_checkqual(scan, itup)) { + iptr = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) iptr, (char *) &(itup->t_tid), + sizeof(ItemPointerData)); + res = FormRetrieveIndexResult(current, iptr); + + /* remember which buffer we have pinned and locked */ + so->btso_curbuf = buf; + } else { + ItemPointerSetInvalid(current); + so->btso_curbuf = InvalidBuffer; + _bt_relbuf(rel, buf, BT_READ); + res = (RetrieveIndexResult) NULL; + } + + return (res); +} + +/* + * _bt_first() -- Find the first item in a scan. + * + * We need to be clever about the type of scan, the operation it's + * performing, and the tree ordering. We return the RetrieveIndexResult + * of the first item in the tree that satisfies the qualification + * associated with the scan descriptor. On exit, the page containing + * the current index tuple is read locked and pinned, and the scan's + * opaque data entry is updated to include the buffer. + */ +RetrieveIndexResult +_bt_first(IndexScanDesc scan, ScanDirection dir) +{ + Relation rel; + TupleDesc itupdesc; + Buffer buf; + Page page; + BTStack stack; + OffsetNumber offnum, maxoff; + BTItem btitem; + IndexTuple itup; + ItemPointer current; + ItemPointer iptr; + BlockNumber blkno; + StrategyNumber strat; + RetrieveIndexResult res; + RegProcedure proc; + int result; + BTScanOpaque so; + ScanKeyData skdata; + + /* if we just need to walk down one edge of the tree, do that */ + if (scan->scanFromEnd) + return (_bt_endpoint(scan, dir)); + + rel = scan->relation; + itupdesc = RelationGetTupleDescriptor(scan->relation); + current = &(scan->currentItemData); + so = (BTScanOpaque) scan->opaque; + + /* + * Okay, we want something more complicated. What we'll do is use + * the first item in the scan key passed in (which has been correctly + * ordered to take advantage of index ordering) to position ourselves + * at the right place in the scan. + */ + + /* + * XXX -- The attribute number stored in the scan key is the attno + * in the heap relation. We need to transmogrify this into + * the index relation attno here. For the moment, we have + * hardwired attno == 1. + */ + proc = index_getprocid(rel, 1, BTORDER_PROC); + ScanKeyEntryInitialize(&skdata, 0x0, 1, proc, + scan->keyData[0].sk_argument); + + stack = _bt_search(rel, 1, &skdata, &buf); + _bt_freestack(stack); + + /* find the nearest match to the manufactured scan key on the page */ + offnum = _bt_binsrch(rel, buf, 1, &skdata, BT_DESCENT); + page = BufferGetPage(buf); + + /* + * This will happen if the tree we're searching is entirely empty, + * or if we're doing a search for a key that would appear on an + * entirely empty internal page. In either case, there are no + * matching tuples in the index. + */ + + if (PageIsEmpty(page)) { + ItemPointerSetInvalid(current); + so->btso_curbuf = InvalidBuffer; + _bt_relbuf(rel, buf, BT_READ); + return ((RetrieveIndexResult) NULL); + } + + maxoff = PageGetMaxOffsetNumber(page); + + if (offnum > maxoff) + offnum = maxoff; + + blkno = BufferGetBlockNumber(buf); + ItemPointerSet(current, blkno, offnum); + + /* + * Now find the right place to start the scan. Result is the + * value we're looking for minus the value we're looking at + * in the index. + */ + + result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum); + strat = _bt_getstrat(rel, 1, scan->keyData[0].sk_procedure); + + switch (strat) { + case BTLessStrategyNumber: + if (result <= 0) { + do { + if (!_bt_twostep(scan, &buf, BackwardScanDirection)) + break; + + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum); + } while (result <= 0); + + /* if this is true, the key we just looked at is gone */ + if (result > 0) + (void) _bt_twostep(scan, &buf, ForwardScanDirection); + } + break; + + case BTLessEqualStrategyNumber: + if (result >= 0) { + do { + if (!_bt_twostep(scan, &buf, ForwardScanDirection)) + break; + + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum); + } while (result >= 0); + + if (result < 0) + (void) _bt_twostep(scan, &buf, BackwardScanDirection); + } + break; + + case BTEqualStrategyNumber: + if (result != 0) { + _bt_relbuf(scan->relation, buf, BT_READ); + so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(&(scan->currentItemData)); + return ((RetrieveIndexResult) NULL); + } + break; + + case BTGreaterEqualStrategyNumber: + if (result < 0) { + do { + if (!_bt_twostep(scan, &buf, BackwardScanDirection)) + break; + + page = BufferGetPage(buf); + offnum = ItemPointerGetOffsetNumber(current); + result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum); + } while (result < 0); + + if (result > 0) + (void) _bt_twostep(scan, &buf, ForwardScanDirection); + } + break; + + case BTGreaterStrategyNumber: + if (result >= 0) { + do { + if (!_bt_twostep(scan, &buf, ForwardScanDirection)) + break; + + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + result = _bt_compare(rel, itupdesc, page, 1, &skdata, offnum); + } while (result >= 0); + } + break; + } + + /* okay, current item pointer for the scan is right */ + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &btitem->bti_itup; + + if (_bt_checkqual(scan, itup)) { + iptr = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) iptr, (char *) &(itup->t_tid), + sizeof(ItemPointerData)); + res = FormRetrieveIndexResult(current, iptr); + pfree(iptr); + + /* remember which buffer we have pinned */ + so->btso_curbuf = buf; + } else { + ItemPointerSetInvalid(current); + so->btso_curbuf = InvalidBuffer; + _bt_relbuf(rel, buf, BT_READ); + res = (RetrieveIndexResult) NULL; + } + + return (res); +} + +/* + * _bt_step() -- Step one item in the requested direction in a scan on + * the tree. + * + * If no adjacent record exists in the requested direction, return + * false. Else, return true and set the currentItemData for the + * scan to the right thing. + */ +bool +_bt_step(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) +{ + Page page; + BTPageOpaque opaque; + OffsetNumber offnum, maxoff; + OffsetNumber start; + BlockNumber blkno; + BlockNumber obknum; + BTScanOpaque so; + ItemPointer current; + Relation rel; + + rel = scan->relation; + current = &(scan->currentItemData); + offnum = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + so = (BTScanOpaque) scan->opaque; + maxoff = PageGetMaxOffsetNumber(page); + + /* get the next tuple */ + if (ScanDirectionIsForward(dir)) { + if (!PageIsEmpty(page) && offnum < maxoff) { + offnum = OffsetNumberNext(offnum); + } else { + + /* if we're at end of scan, release the buffer and return */ + blkno = opaque->btpo_next; + if (P_RIGHTMOST(opaque)) { + _bt_relbuf(rel, *bufP, BT_READ); + ItemPointerSetInvalid(current); + *bufP = so->btso_curbuf = InvalidBuffer; + return (false); + } else { + + /* walk right to the next page with data */ + _bt_relbuf(rel, *bufP, BT_READ); + for (;;) { + *bufP = _bt_getbuf(rel, blkno, BT_READ); + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + if (!PageIsEmpty(page) && start <= maxoff) { + break; + } else { + blkno = opaque->btpo_next; + _bt_relbuf(rel, *bufP, BT_READ); + if (blkno == P_NONE) { + *bufP = so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(current); + return (false); + } + } + } + offnum = start; + } + } + } else if (ScanDirectionIsBackward(dir)) { + + /* remember that high key is item zero on non-rightmost pages */ + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + if (offnum > start) { + offnum = OffsetNumberPrev(offnum); + } else { + + /* if we're at end of scan, release the buffer and return */ + blkno = opaque->btpo_prev; + if (P_LEFTMOST(opaque)) { + _bt_relbuf(rel, *bufP, BT_READ); + *bufP = so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(current); + return (false); + } else { + + obknum = BufferGetBlockNumber(*bufP); + + /* walk right to the next page with data */ + _bt_relbuf(rel, *bufP, BT_READ); + for (;;) { + *bufP = _bt_getbuf(rel, blkno, BT_READ); + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + + /* + * If the adjacent page just split, then we may have the + * wrong block. Handle this case. Because pages only + * split right, we don't have to worry about this failing + * to terminate. + */ + + while (opaque->btpo_next != obknum) { + blkno = opaque->btpo_next; + _bt_relbuf(rel, *bufP, BT_READ); + *bufP = _bt_getbuf(rel, blkno, BT_READ); + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + } + + /* don't consider the high key */ + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + /* anything to look at here? */ + if (!PageIsEmpty(page) && maxoff >= start) { + break; + } else { + blkno = opaque->btpo_prev; + obknum = BufferGetBlockNumber(*bufP); + _bt_relbuf(rel, *bufP, BT_READ); + if (blkno == P_NONE) { + *bufP = so->btso_curbuf = InvalidBuffer; + ItemPointerSetInvalid(current); + return (false); + } + } + } + offnum = maxoff; /* XXX PageIsEmpty? */ + } + } + } + blkno = BufferGetBlockNumber(*bufP); + so->btso_curbuf = *bufP; + ItemPointerSet(current, blkno, offnum); + + return (true); +} + +/* + * _bt_twostep() -- Move to an adjacent record in a scan on the tree, + * if an adjacent record exists. + * + * This is like _bt_step, except that if no adjacent record exists + * it restores us to where we were before trying the step. This is + * only hairy when you cross page boundaries, since the page you cross + * from could have records inserted or deleted, or could even split. + * This is unlikely, but we try to handle it correctly here anyway. + * + * This routine contains the only case in which our changes to Lehman + * and Yao's algorithm. + * + * Like step, this routine leaves the scan's currentItemData in the + * proper state and acquires a lock and pin on *bufP. If the twostep + * succeeded, we return true; otherwise, we return false. + */ +static bool +_bt_twostep(IndexScanDesc scan, Buffer *bufP, ScanDirection dir) +{ + Page page; + BTPageOpaque opaque; + OffsetNumber offnum, maxoff; + OffsetNumber start; + ItemPointer current; + ItemId itemid; + int itemsz; + BTItem btitem; + BTItem svitem; + BlockNumber blkno; + + blkno = BufferGetBlockNumber(*bufP); + page = BufferGetPage(*bufP); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + current = &(scan->currentItemData); + offnum = ItemPointerGetOffsetNumber(current); + + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + /* if we're safe, just do it */ + if (ScanDirectionIsForward(dir) && offnum < maxoff) { /* XXX PageIsEmpty? */ + ItemPointerSet(current, blkno, OffsetNumberNext(offnum)); + return (true); + } else if (ScanDirectionIsBackward(dir) && offnum > start) { + ItemPointerSet(current, blkno, OffsetNumberPrev(offnum)); + return (true); + } + + /* if we've hit end of scan we don't have to do any work */ + if (ScanDirectionIsForward(dir) && P_RIGHTMOST(opaque)) { + return (false); + } else if (ScanDirectionIsBackward(dir) && P_LEFTMOST(opaque)) { + return (false); + } + + /* + * Okay, it's off the page; let _bt_step() do the hard work, and we'll + * try to remember where we were. This is not guaranteed to work; this + * is the only place in the code where concurrency can screw us up, + * and it's because we want to be able to move in two directions in + * the scan. + */ + + itemid = PageGetItemId(page, offnum); + itemsz = ItemIdGetLength(itemid); + btitem = (BTItem) PageGetItem(page, itemid); + svitem = (BTItem) palloc(itemsz); + memmove((char *) svitem, (char *) btitem, itemsz); + + if (_bt_step(scan, bufP, dir)) { + pfree(svitem); + return (true); + } + + /* try to find our place again */ + *bufP = _bt_getbuf(scan->relation, blkno, BT_READ); + page = BufferGetPage(*bufP); + maxoff = PageGetMaxOffsetNumber(page); + + while (offnum <= maxoff) { + itemid = PageGetItemId(page, offnum); + btitem = (BTItem) PageGetItem(page, itemid); + if (btitem->bti_oid == svitem->bti_oid) { + pfree(svitem); + ItemPointerSet(current, blkno, offnum); + return (false); + } + } + + /* + * XXX crash and burn -- can't find our place. We can be a little + * smarter -- walk to the next page to the right, for example, since + * that's the only direction that splits happen in. Deletions screw + * us up less often since they're only done by the vacuum daemon. + */ + + elog(WARN, "btree synchronization error: concurrent update botched scan"); + + return (false); +} + +/* + * _bt_endpoint() -- Find the first or last key in the index. + */ +static RetrieveIndexResult +_bt_endpoint(IndexScanDesc scan, ScanDirection dir) +{ + Relation rel; + Buffer buf; + Page page; + BTPageOpaque opaque; + ItemPointer current; + ItemPointer iptr; + OffsetNumber offnum, maxoff; + OffsetNumber start; + BlockNumber blkno; + BTItem btitem; + IndexTuple itup; + BTScanOpaque so; + RetrieveIndexResult res; + + rel = scan->relation; + current = &(scan->currentItemData); + + buf = _bt_getroot(rel, BT_READ); + blkno = BufferGetBlockNumber(buf); + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + for (;;) { + if (opaque->btpo_flags & BTP_LEAF) + break; + + if (ScanDirectionIsForward(dir)) { + offnum = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + } else { + offnum = PageGetMaxOffsetNumber(page); + } + + btitem = (BTItem) PageGetItem(page, PageGetItemId(page, offnum)); + itup = &(btitem->bti_itup); + + blkno = ItemPointerGetBlockNumber(&(itup->t_tid)); + + _bt_relbuf(rel, buf, BT_READ); + buf = _bt_getbuf(rel, blkno, BT_READ); + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + + /* + * Race condition: If the child page we just stepped onto is + * in the process of being split, we need to make sure we're + * all the way at the right edge of the tree. See the paper + * by Lehman and Yao. + */ + + if (ScanDirectionIsBackward(dir) && ! P_RIGHTMOST(opaque)) { + do { + blkno = opaque->btpo_next; + _bt_relbuf(rel, buf, BT_READ); + buf = _bt_getbuf(rel, blkno, BT_READ); + page = BufferGetPage(buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + } while (! P_RIGHTMOST(opaque)); + } + } + + /* okay, we've got the {left,right}-most page in the tree */ + maxoff = PageGetMaxOffsetNumber(page); + + if (ScanDirectionIsForward(dir)) { + if (PageIsEmpty(page)) { + maxoff = FirstOffsetNumber; + } else { + maxoff = PageGetMaxOffsetNumber(page); + } + start = P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY; + + if (PageIsEmpty(page) || start > maxoff) { + ItemPointerSet(current, blkno, maxoff); + if (!_bt_step(scan, &buf, BackwardScanDirection)) + return ((RetrieveIndexResult) NULL); + + start = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + } else { + ItemPointerSet(current, blkno, start); + } + } else if (ScanDirectionIsBackward(dir)) { + if (PageIsEmpty(page)) { + ItemPointerSet(current, blkno, FirstOffsetNumber); + if (!_bt_step(scan, &buf, ForwardScanDirection)) + return ((RetrieveIndexResult) NULL); + + start = ItemPointerGetOffsetNumber(current); + page = BufferGetPage(buf); + } else { + start = PageGetMaxOffsetNumber(page); + ItemPointerSet(current, blkno, start); + } + } else { + elog(WARN, "Illegal scan direction %d", dir); + } + + btitem = (BTItem) PageGetItem(page, PageGetItemId(page, start)); + itup = &(btitem->bti_itup); + + /* see if we picked a winner */ + if (_bt_checkqual(scan, itup)) { + iptr = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) iptr, (char *) &(itup->t_tid), + sizeof(ItemPointerData)); + res = FormRetrieveIndexResult(current, iptr); + + /* remember which buffer we have pinned */ + so = (BTScanOpaque) scan->opaque; + so->btso_curbuf = buf; + } else { + _bt_relbuf(rel, buf, BT_READ); + res = (RetrieveIndexResult) NULL; + } + + return (res); +} diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c new file mode 100644 index 00000000000..3d2676324a0 --- /dev/null +++ b/src/backend/access/nbtree/nbtsort.c @@ -0,0 +1,1196 @@ +/*------------------------------------------------------------------------- + * btsort.c-- + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Id: nbtsort.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + * NOTES + * + * what we do is: + * - generate a set of initial one-block runs, distributed round-robin + * between the output tapes. + * - for each pass, + * - swap input and output tape sets, rewinding both and truncating + * the output tapes. + * - merge the current run in each input tape to the current output + * tape. + * - when each input run has been exhausted, switch to another output + * tape and start processing another run. + * - when we have fewer runs than tapes, we know we are ready to start + * merging into the btree leaf pages. + * - every time we complete a level of the btree, we can construct the + * next level up. when we have only one page on a level, it can be + * attached to the btree metapage and we are done. + * + * conventions: + * - external interface routines take in and return "void *" for their + * opaque handles. this is for modularity reasons (i prefer not to + * export these structures without good reason). + * + * this code is moderately slow (~10% slower) compared to the regular + * btree (insertion) build code on sorted or well-clustered data. on + * random data, however, the insertion build code is unusable -- the + * difference on a 60MB heap is a factor of 15 because the random + * probes into the btree thrash the buffer pool. + * + * this code currently packs the pages to 100% of capacity. this is + * not wise, since *any* insertion will cause splitting. filling to + * something like the standard 70% steady-state load factor for btrees + * would probably be better. + * + * somebody desperately needs to figure out how to do a better job of + * balancing the merge passes -- the fan-in on the final merges can be + * pretty poor, which is bad for performance. + *------------------------------------------------------------------------- + */ + +#include + +#include "c.h" + +#include "access/nbtree.h" + +#include "storage/bufmgr.h" +#include "storage/fd.h" +#include "utils/rel.h" +#include "utils/palloc.h" +#include "utils/elog.h" + +/*#define FASTBUILD_DEBUG*/ /* turn on debugging output */ + +#define FASTBUILD + +#ifdef FASTBUILD + +#define MAXTAPES (7) +#define TAPEBLCKSZ (BLCKSZ << 2) +#define TAPETEMP "pg_btsortXXXXXX" + + +/*------------------------------------------------------------------------- + * sorting comparison routine - returns {-1,0,1} depending on whether + * the key in the left BTItem is {<,=,>} the key in the right BTItem. + * + * we want to use _bt_isortcmp as a comparison function for qsort(3), + * but it needs extra arguments, so we "pass them in" as global + * variables. ick. fortunately, they are the same throughout the + * build, so we need do this only once. this is why you must call + * _bt_isortcmpinit before the call to qsort(3). + * + * a NULL BTItem is always assumed to be greater than any actual + * value; our heap routines (see below) assume that the smallest + * element in the heap is returned. that way, NULL values from the + * exhausted tapes can sift down to the bottom of the heap. in point + * of fact we just don't replace the elements of exhausted tapes, but + * what the heck. + * *------------------------------------------------------------------------- + */ +static Relation _bt_sortrel; + +static void +_bt_isortcmpinit(Relation index) +{ + _bt_sortrel = index; +} + +static int +_bt_isortcmp(BTItem *bti1p, BTItem *bti2p) +{ + BTItem bti1 = *bti1p; + BTItem bti2 = *bti2p; + + if (bti1 == (BTItem) NULL) { + if (bti2 == (BTItem) NULL) { + return(0); /* 1 = 2 */ + } + return(1); /* 1 > 2 */ + } else if (bti2 == (BTItem) NULL) { + return(-1); /* 1 < 2 */ + } else if (_bt_itemcmp(_bt_sortrel, 1, bti1, bti2, + BTGreaterStrategyNumber)) { + return(1); /* 1 > 2 */ + } else if (_bt_itemcmp(_bt_sortrel, 1, bti2, bti1, + BTGreaterStrategyNumber)) { + return(-1); /* 1 < 2 */ + } + return(0); /* 1 = 2 */ +} + +/*------------------------------------------------------------------------- + * priority queue methods + * + * these were more-or-less lifted from the heap section of the 1984 + * edition of gonnet's book on algorithms and data structures. they + * are coded so that the smallest element in the heap is returned (we + * use them for merging sorted runs). + * + * XXX these probably ought to be generic library functions. + *------------------------------------------------------------------------- + */ + +typedef struct { + int btpqe_tape; /* tape identifier */ + BTItem btpqe_item; /* pointer to BTItem in tape buffer */ +} BTPriQueueElem; + +#define MAXELEM MAXTAPES +typedef struct { + int btpq_nelem; + BTPriQueueElem btpq_queue[MAXELEM]; + Relation btpq_rel; +} BTPriQueue; + +/* be sure to call _bt_isortcmpinit first */ +#define GREATER(a, b) \ + (_bt_isortcmp(&((a)->btpqe_item), &((b)->btpqe_item)) > 0) + +static void +_bt_pqsift(BTPriQueue *q, int parent) +{ + int child; + BTPriQueueElem e; + + for (child = parent * 2 + 1; + child < q->btpq_nelem; + child = parent * 2 + 1) { + if (child < q->btpq_nelem - 1) { + if (GREATER(&(q->btpq_queue[child]), &(q->btpq_queue[child+1]))) { + ++child; + } + } + if (GREATER(&(q->btpq_queue[parent]), &(q->btpq_queue[child]))) { + e = q->btpq_queue[child]; /* struct = */ + q->btpq_queue[child] = q->btpq_queue[parent]; /* struct = */ + q->btpq_queue[parent] = e; /* struct = */ + parent = child; + } else { + parent = child + 1; + } + } +} + +static int +_bt_pqnext(BTPriQueue *q, BTPriQueueElem *e) +{ + if (q->btpq_nelem < 1) { /* already empty */ + return(-1); + } + *e = q->btpq_queue[0]; /* struct = */ + + if (--q->btpq_nelem < 1) { /* now empty, don't sift */ + return(0); + } + q->btpq_queue[0] = q->btpq_queue[q->btpq_nelem]; /* struct = */ + _bt_pqsift(q, 0); + return(0); +} + +static void +_bt_pqadd(BTPriQueue *q, BTPriQueueElem *e) +{ + int child, parent; + + if (q->btpq_nelem >= MAXELEM) { + elog(WARN, "_bt_pqadd: queue overflow"); + } + + child = q->btpq_nelem++; + while (child > 0) { + parent = child / 2; + if (GREATER(e, &(q->btpq_queue[parent]))) { + break; + } else { + q->btpq_queue[child] = q->btpq_queue[parent]; /* struct = */ + child = parent; + } + } + + q->btpq_queue[child] = *e; /* struct = */ +} + +/*------------------------------------------------------------------------- + * tape methods + *------------------------------------------------------------------------- + */ + +#define BTITEMSZ(btitem) \ + ((btitem) ? \ + (IndexTupleDSize((btitem)->bti_itup) + \ + (sizeof(BTItemData) - sizeof(IndexTupleData))) : \ + 0) +#define SPCLEFT(tape) \ + (sizeof((tape)->bttb_data) - (tape)->bttb_top) +#define EMPTYTAPE(tape) \ + ((tape)->bttb_ntup <= 0) +#define BTTAPEMAGIC 0x19660226 + +/* + * this is what we use to shovel BTItems in and out of memory. it's + * bigger than a standard block because we are doing a lot of strictly + * sequential i/o. this is obviously something of a tradeoff since we + * are potentially reading a bunch of zeroes off of disk in many + * cases. + * + * BTItems are packed in and DOUBLEALIGN'd. + * + * the fd should not be going out to disk, strictly speaking, but it's + * the only thing like that so i'm not going to worry about wasting a + * few bytes. + */ +typedef struct { + int bttb_magic; /* magic number */ + int bttb_fd; /* file descriptor */ + int bttb_top; /* top of free space within bttb_data */ + short bttb_ntup; /* number of tuples in this block */ + short bttb_eor; /* End-Of-Run marker */ + char bttb_data[TAPEBLCKSZ - 2 * sizeof(double)]; +} BTTapeBlock; + + +/* + * reset the tape header for its next use without doing anything to + * the physical tape file. (setting bttb_top to 0 makes the block + * empty.) + */ +static void +_bt_tapereset(BTTapeBlock *tape) +{ + tape->bttb_eor = 0; + tape->bttb_top = 0; + tape->bttb_ntup = 0; +} + +/* + * rewind the physical tape file. + */ +static void +_bt_taperewind(BTTapeBlock *tape) +{ + (void) FileSeek(tape->bttb_fd, 0, SEEK_SET); +} + +/* + * destroy the contents of the physical tape file without destroying + * the tape data structure or removing the physical tape file. + * + * we use the VFD version of ftruncate(2) to do this rather than + * unlinking and recreating the file. you still have to wait while + * the OS frees up all of the file system blocks and stuff, but at + * least you don't have to delete and reinsert the directory entries. + */ +static void +_bt_tapeclear(BTTapeBlock *tape) +{ + /* blow away the contents of the old file */ + _bt_taperewind(tape); +#if 0 + FileSync(tape->bttb_fd); +#endif + FileTruncate(tape->bttb_fd, 0); + + /* reset the buffer */ + _bt_tapereset(tape); +} + +/* + * create a new BTTapeBlock, allocating memory for the data structure + * as well as opening a physical tape file. + */ +static BTTapeBlock * +_bt_tapecreate(char *fname) +{ + BTTapeBlock *tape = (BTTapeBlock *) palloc(sizeof(BTTapeBlock)); + + if (tape == (BTTapeBlock *) NULL) { + elog(WARN, "_bt_tapecreate: out of memory"); + } + + tape->bttb_magic = BTTAPEMAGIC; + + tape->bttb_fd = FileNameOpenFile(fname, O_RDWR|O_CREAT|O_TRUNC, 0600); + Assert(tape->bttb_fd >= 0); + + /* initialize the buffer */ + _bt_tapereset(tape); + + return(tape); +} + +/* + * destroy the BTTapeBlock structure and its physical tape file. + */ +static void +_bt_tapedestroy(BTTapeBlock *tape) +{ + FileUnlink(tape->bttb_fd); + pfree((void *) tape); +} + +/* + * flush the tape block to the file, marking End-Of-Run if requested. + */ +static void +_bt_tapewrite(BTTapeBlock *tape, int eor) +{ + tape->bttb_eor = eor; + FileWrite(tape->bttb_fd, (char*)tape, TAPEBLCKSZ); + _bt_tapereset(tape); +} + +/* + * read a tape block from the file, overwriting the current contents + * of the buffer. + * + * returns: + * - 0 if there are no more blocks in the tape or in this run (call + * _bt_tapereset to clear the End-Of-Run marker) + * - 1 if a valid block was read + */ +static int +_bt_taperead(BTTapeBlock *tape) +{ + int fd; + int nread; + + if (tape->bttb_eor) { + return(0); /* we are at End-Of-Run */ + } + + /* + * we're clobbering the old tape block, but we do need to save the + * VFD (the one in the block we're reading is bogus). + */ + fd = tape->bttb_fd; + nread = FileRead(fd, (char*) tape, TAPEBLCKSZ); + tape->bttb_fd = fd; + + if (nread != TAPEBLCKSZ) { + Assert(nread == 0); /* we are at EOF */ + return(0); + } + Assert(tape->bttb_magic == BTTAPEMAGIC); + return(1); +} + +/* + * get the next BTItem from a tape block. + * + * returns: + * - NULL if we have run out of BTItems + * - a pointer to the BTItemData in the block otherwise + * + * side effects: + * - sets 'pos' to the current position within the block. + */ +static BTItem +_bt_tapenext(BTTapeBlock *tape, char **pos) +{ + Size itemsz; + BTItem bti; + + if (*pos >= tape->bttb_data + tape->bttb_top) { + return((BTItem) NULL); + } + bti = (BTItem) *pos; + itemsz = BTITEMSZ(bti); + *pos += DOUBLEALIGN(itemsz); + return(bti); +} + +/* + * copy a BTItem into a tape block. + * + * assumes that we have already checked to see if the block has enough + * space for the item. + * + * side effects: + * + * - advances the 'top' pointer in the tape block header to point to + * the beginning of free space. + */ +static void +_bt_tapeadd(BTTapeBlock *tape, BTItem item, int itemsz) +{ + (void) memcpy(tape->bttb_data + tape->bttb_top, item, itemsz); + ++tape->bttb_ntup; + tape->bttb_top += DOUBLEALIGN(itemsz); +} + +/*------------------------------------------------------------------------- + * spool methods + *------------------------------------------------------------------------- + */ + +/* + * this structure holds the bookkeeping for a simple balanced multiway + * merge. (polyphase merging is hairier than i want to get into right + * now, and i don't see why i have to care how many "tapes" i use + * right now. though if psort was in a condition that i could hack it + * to do this, you bet i would.) + */ +typedef struct { + int bts_ntapes; + int bts_tape; + BTTapeBlock **bts_itape; /* input tape blocks */ + BTTapeBlock **bts_otape; /* output tape blocks */ +} BTSpool; + +/* + * create and initialize a spool structure, including the underlying + * files. + */ +void * +_bt_spoolinit(Relation index, int ntapes) +{ + char *mktemp(); + + BTSpool *btspool = (BTSpool *) palloc(sizeof(BTSpool)); + int i; + char *fname = (char *) palloc(sizeof(TAPETEMP) + 1); + + if (btspool == (BTSpool *) NULL || fname == (char *) NULL) { + elog(WARN, "_bt_spoolinit: out of memory"); + } + (void) memset((char *) btspool, 0, sizeof(BTSpool)); + btspool->bts_ntapes = ntapes; + btspool->bts_tape = 0; + + btspool->bts_itape = + (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes); + btspool->bts_otape = + (BTTapeBlock **) palloc(sizeof(BTTapeBlock *) * ntapes); + if (btspool->bts_itape == (BTTapeBlock **) NULL || + btspool->bts_otape == (BTTapeBlock **) NULL) { + elog(WARN, "_bt_spoolinit: out of memory"); + } + + for (i = 0; i < ntapes; ++i) { + btspool->bts_itape[i] = + _bt_tapecreate(mktemp(strcpy(fname, TAPETEMP))); + btspool->bts_otape[i] = + _bt_tapecreate(mktemp(strcpy(fname, TAPETEMP))); + } + pfree((void *) fname); + + _bt_isortcmpinit(index); + + return((void *) btspool); +} + +/* + * clean up a spool structure and its substructures. + */ +void +_bt_spooldestroy(void *spool) +{ + BTSpool *btspool = (BTSpool *) spool; + int i; + + for (i = 0; i < btspool->bts_ntapes; ++i) { + _bt_tapedestroy(btspool->bts_otape[i]); + _bt_tapedestroy(btspool->bts_itape[i]); + } + pfree((void *) btspool); +} + +/* + * flush out any dirty output tape blocks + */ +static void +_bt_spoolflush(BTSpool *btspool) +{ + int i; + + for (i = 0; i < btspool->bts_ntapes; ++i) { + if (!EMPTYTAPE(btspool->bts_otape[i])) { + _bt_tapewrite(btspool->bts_otape[i], 1); + } + } +} + +/* + * swap input tapes and output tapes by swapping their file + * descriptors. additional preparation for the next merge pass + * includes rewinding the new input tapes and clearing out the new + * output tapes. + */ +static void +_bt_spoolswap(BTSpool *btspool) +{ + File tmpfd; + BTTapeBlock *itape; + BTTapeBlock *otape; + int i; + + for (i = 0; i < btspool->bts_ntapes; ++i) { + itape = btspool->bts_itape[i]; + otape = btspool->bts_otape[i]; + + /* + * swap the input and output VFDs. + */ + tmpfd = itape->bttb_fd; + itape->bttb_fd = otape->bttb_fd; + otape->bttb_fd = tmpfd; + + /* + * rewind the new input tape. + */ + _bt_taperewind(itape); + _bt_tapereset(itape); + + /* + * clear the new output tape -- it's ok to throw away the old + * inputs. + */ + _bt_tapeclear(otape); + } +} + +/*------------------------------------------------------------------------- + * sorting routines + *------------------------------------------------------------------------- + */ + +/* + * spool 'btitem' into an initial run. as tape blocks are filled, the + * block BTItems are qsorted and written into some output tape (it + * doesn't matter which; we go round-robin for simplicity). the + * initial runs are therefore always just one block. + */ +void +_bt_spool(Relation index, BTItem btitem, void *spool) +{ + BTSpool *btspool = (BTSpool *) spool; + BTTapeBlock *itape; + Size itemsz; + + itape = btspool->bts_itape[btspool->bts_tape]; + itemsz = BTITEMSZ(btitem); + itemsz = DOUBLEALIGN(itemsz); + + /* + * if this buffer is too full for this BTItemData, or if we have + * run out of BTItems, we need to sort the buffer and write it + * out. in this case, the BTItemData will go into the next tape's + * buffer. + */ + if (btitem == (BTItem) NULL || SPCLEFT(itape) < itemsz) { + BTItem *parray; + BTTapeBlock *otape; + BTItem bti; + char *pos; + int btisz; + int i; + + /* + * build an array of pointers to the BTItemDatas on the input + * block. + */ + parray = (BTItem *) palloc(itape->bttb_ntup * sizeof(BTItem)); + if (parray == (BTItem *) NULL) { + elog(WARN, "_bt_spool: out of memory"); + } + pos = itape->bttb_data; + for (i = 0; i < itape->bttb_ntup; ++i) { + parray[i] = _bt_tapenext(itape, &pos); + } + + /* + * qsort the pointer array. + */ + _bt_isortcmpinit(index); + qsort((void *) parray, itape->bttb_ntup, sizeof(BTItem), _bt_isortcmp); + + /* + * write the spooled run into the output tape. we copy the + * BTItemDatas in the order dictated by the sorted array of + * BTItems, not the original order. + * + * (since everything was DOUBLEALIGN'd and is all on a single + * page, everything had *better* still fit on one page..) + */ + otape = btspool->bts_otape[btspool->bts_tape]; + for (i = 0; i < itape->bttb_ntup; ++i) { + bti = parray[i]; + btisz = BTITEMSZ(bti); + btisz = DOUBLEALIGN(btisz); + _bt_tapeadd(otape, bti, btisz); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(bti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_spool: inserted <%x> into output tape %d\n", + d, btspool->bts_tape); + } +#endif /* FASTBUILD_DEBUG */ + } + + /* + * the initial runs are always single tape blocks. flush the + * output block, marking End-Of-Run. + */ + _bt_tapewrite(otape, 1); + + /* + * reset the input buffer for the next run. we don't have to + * write it out or anything -- we only use it to hold the + * unsorted BTItemDatas, the output tape contains all the + * sorted stuff. + * + * changing bts_tape changes the output tape and input tape; + * we change itape for the code below. + */ + _bt_tapereset(itape); + btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes; + itape = btspool->bts_itape[btspool->bts_tape]; + + /* + * destroy the pointer array. + */ + pfree((void *) parray); + } + + /* insert this item into the current buffer */ + if (btitem != (BTItem) NULL) { + _bt_tapeadd(itape, btitem, itemsz); + } +} + +/* + * allocate a new, clean btree page, not linked to any siblings. + */ +static void +_bt_blnewpage(Relation index, Buffer *buf, Page *page, int flags) +{ + BTPageOpaque opaque; + + *buf = _bt_getbuf(index, P_NEW, BT_WRITE); + *page = BufferGetPage(*buf); + _bt_pageinit(*page, BufferGetPageSize(*buf)); + opaque = (BTPageOpaque) PageGetSpecialPointer(*page); + opaque->btpo_prev = opaque->btpo_next = P_NONE; + opaque->btpo_flags = flags; +} + +/* + * slide an array of ItemIds back one slot (from P_FIRSTKEY to + * P_HIKEY). we need to do this when we discover that we have built + * an ItemId array in what has turned out to be a P_RIGHTMOST page. + */ +static void +_bt_slideleft(Relation index, Buffer buf, Page page) +{ + OffsetNumber off; + OffsetNumber maxoff; + ItemId previi; + ItemId thisii; + + maxoff = PageGetMaxOffsetNumber(page); + previi = PageGetItemId(page, P_HIKEY); + for (off = P_FIRSTKEY; off <= maxoff; off = OffsetNumberNext(off)) { + thisii = PageGetItemId(page, off); + *previi = *thisii; + previi = thisii; + } + ((PageHeader) page)->pd_lower -= sizeof(ItemIdData); +} + +typedef struct { + Buffer btps_buf; + Page btps_page; + BTItem btps_lastbti; + OffsetNumber btps_lastoff; + OffsetNumber btps_firstoff; +} BTPageState; + +/* + * add an item to a disk page from a merge tape block. + * + * we must be careful to observe the following restrictions, placed + * upon us by the conventions in nbtsearch.c: + * - rightmost pages start data items at P_HIKEY instead of at + * P_FIRSTKEY. + * - duplicates cannot be split among pages unless the chain of + * duplicates starts at the first data item. + * + * a leaf page being built looks like: + * + * +----------------+---------------------------------+ + * | PageHeaderData | linp0 linp1 linp2 ... | + * +-----------+----+---------------------------------+ + * | ... linpN | ^ first | + * +-----------+--------------------------------------+ + * | ^ last | + * | | + * | v last | + * +-------------+------------------------------------+ + * | | itemN ... | + * +-------------+------------------+-----------------+ + * | ... item3 item2 item1 | "special space" | + * +--------------------------------+-----------------+ + * ^ first + * + * contrast this with the diagram in bufpage.h; note the mismatch + * between linps and items. this is because we reserve linp0 as a + * placeholder for the pointer to the "high key" item; when we have + * filled up the page, we will set linp0 to point to itemN and clear + * linpN. + * + * 'last' pointers indicate the last offset/item added to the page. + * 'first' pointers indicate the first offset/item that is part of a + * chain of duplicates extending from 'first' to 'last'. + * + * if all keys are unique, 'first' will always be the same as 'last'. + */ +static void +_bt_buildadd(Relation index, BTPageState *state, BTItem bti, int flags) +{ + Buffer nbuf; + Page npage; + BTItem last_bti; + OffsetNumber first_off; + OffsetNumber last_off; + OffsetNumber off; + Size pgspc; + Size btisz; + + nbuf = state->btps_buf; + npage = state->btps_page; + first_off = state->btps_firstoff; + last_off = state->btps_lastoff; + last_bti = state->btps_lastbti; + + pgspc = PageGetFreeSpace(npage); + btisz = BTITEMSZ(bti); + btisz = DOUBLEALIGN(btisz); + if (pgspc < btisz) { + Buffer obuf = nbuf; + Page opage = npage; + OffsetNumber o, n; + ItemId ii; + ItemId hii; + + _bt_blnewpage(index, &nbuf, &npage, flags); + + /* + * if 'last' is part of a chain of duplicates that does not + * start at the beginning of the old page, the entire chain is + * copied to the new page; we delete all of the duplicates + * from the old page except the first, which becomes the high + * key item of the old page. + * + * if the chain starts at the beginning of the page or there + * is no chain ('first' == 'last'), we need only copy 'last' + * to the new page. again, 'first' (== 'last') becomes the + * high key of the old page. + * + * note that in either case, we copy at least one item to the + * new page, so 'last_bti' will always be valid. 'bti' will + * never be the first data item on the new page. + */ + if (first_off == P_FIRSTKEY) { + Assert(last_off != P_FIRSTKEY); + first_off = last_off; + } + for (o = first_off, n = P_FIRSTKEY; + o <= last_off; + o = OffsetNumberNext(o), n = OffsetNumberNext(n)) { + ii = PageGetItemId(opage, o); + (void) PageAddItem(npage, PageGetItem(opage, ii), + ii->lp_len, n, LP_USED); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + BTItem tmpbti = + (BTItem) PageGetItem(npage, PageGetItemId(npage, n)); + Datum d = index_getattr(&(tmpbti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_buildadd: moved <%x> to offset %d\n", + d, n); + } +#endif /* FASTBUILD_DEBUG */ + } + for (o = last_off; o > first_off; o = OffsetNumberPrev(o)) { + PageIndexTupleDelete(opage, o); + } + hii = PageGetItemId(opage, P_HIKEY); + ii = PageGetItemId(opage, first_off); + *hii = *ii; + ii->lp_flags &= ~LP_USED; + ((PageHeader) opage)->pd_lower -= sizeof(ItemIdData); + + first_off = P_FIRSTKEY; + last_off = PageGetMaxOffsetNumber(npage); + last_bti = (BTItem) PageGetItem(npage, PageGetItemId(npage, last_off)); + + /* + * set the page (side link) pointers. + */ + { + BTPageOpaque oopaque = (BTPageOpaque) PageGetSpecialPointer(opage); + BTPageOpaque nopaque = (BTPageOpaque) PageGetSpecialPointer(npage); + + oopaque->btpo_next = BufferGetBlockNumber(nbuf); + nopaque->btpo_prev = BufferGetBlockNumber(obuf); + nopaque->btpo_next = P_NONE; + } + + /* + * write out the old stuff. we never want to see it again, so + * we can give up our lock (if we had one; BuildingBtree is + * set, so we aren't locking). + */ + _bt_wrtbuf(index, obuf); + } + + /* + * if this item is different from the last item added, we start a + * new chain of duplicates. + */ + off = OffsetNumberNext(last_off); + (void) PageAddItem(npage, (Item) bti, btisz, off, LP_USED); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(bti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_buildadd: inserted <%x> at offset %d\n", + d, off); + } +#endif /* FASTBUILD_DEBUG */ + if (last_bti == (BTItem) NULL) { + first_off = P_FIRSTKEY; + } else if (!_bt_itemcmp(index, 1, bti, last_bti, BTEqualStrategyNumber)) { + first_off = off; + } + last_off = off; + last_bti = (BTItem) PageGetItem(npage, PageGetItemId(npage, off)); + + state->btps_buf = nbuf; + state->btps_page = npage; + state->btps_lastbti = last_bti; + state->btps_lastoff = last_off; + state->btps_firstoff = first_off; +} + +/* + * take the input tapes stored by 'btspool' and perform successive + * merging passes until at most one run is left in each tape. at that + * point, merge the final tape runs into a set of btree leaves. + * + * XXX three nested loops? gross. cut me up into smaller routines. + */ +static BlockNumber +_bt_merge(Relation index, BTSpool *btspool) +{ + BTPageState state; + BlockNumber firstblk; + BTPriQueue q; + BTPriQueueElem e; + BTItem bti; + BTTapeBlock *itape; + BTTapeBlock *otape; + char *tapepos[MAXTAPES]; + int tapedone[MAXTAPES]; + int t; + int goodtapes; + int nruns; + Size btisz; + bool doleaf = false; + + /* + * initialize state needed for the merge into the btree leaf pages. + */ + (void) memset((char *) &state, 0, sizeof(BTPageState)); + _bt_blnewpage(index, &(state.btps_buf), &(state.btps_page), BTP_LEAF); + state.btps_lastoff = P_HIKEY; + state.btps_lastbti = (BTItem) NULL; + firstblk = BufferGetBlockNumber(state.btps_buf); + + do { /* pass */ + /* + * each pass starts by flushing the previous outputs and + * swapping inputs and outputs. this process also clears the + * new output tapes and rewinds the new input tapes. + */ + btspool->bts_tape = btspool->bts_ntapes - 1; + _bt_spoolflush(btspool); + _bt_spoolswap(btspool); + + nruns = 0; + + for (;;) { /* run */ + /* + * each run starts by selecting a new output tape. the + * merged results of a given run are always sent to this + * one tape. + */ + btspool->bts_tape = (btspool->bts_tape + 1) % btspool->bts_ntapes; + otape = btspool->bts_otape[btspool->bts_tape]; + + /* + * initialize the priority queue by loading it with the + * first element of the given run in each tape. since we + * are starting a new run, we reset the tape (clearing the + * End-Of-Run marker) before reading it. this means that + * _bt_taperead will return 0 only if the tape is actually + * at EOF. + */ + (void) memset((char *) &q, 0, sizeof(BTPriQueue)); + goodtapes = 0; + for (t = 0; t < btspool->bts_ntapes; ++t) { + itape = btspool->bts_itape[t]; + tapepos[t] = itape->bttb_data; + _bt_tapereset(itape); + if (_bt_taperead(itape) == 0) { + tapedone[t] = 1; + } else { + ++goodtapes; + tapedone[t] = 0; + e.btpqe_tape = t; + e.btpqe_item = _bt_tapenext(itape, &tapepos[t]); + if (e.btpqe_item != (BTItem) NULL) { + _bt_pqadd(&q, &e); + } + } + } + /* + * if we don't have any tapes with any input (i.e., they + * are all at EOF), we must be done with this pass. + */ + if (goodtapes == 0) { + break; /* for */ + } + ++nruns; + + /* + * output the smallest element from the queue until there are no + * more. + */ + while (_bt_pqnext(&q, &e) >= 0) { /* item */ + /* + * replace the element taken from priority queue, + * fetching a new block if needed. a tape can run out + * if it hits either End-Of-Run or EOF. + */ + t = e.btpqe_tape; + bti = e.btpqe_item; + if (bti != (BTItem) NULL) { + btisz = BTITEMSZ(bti); + btisz = DOUBLEALIGN(btisz); + if (doleaf) { + _bt_buildadd(index, &state, bti, BTP_LEAF); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(bti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_merge: inserted <%x> into block %d\n", + d, BufferGetBlockNumber(state.btps_buf)); + } +#endif /* FASTBUILD_DEBUG */ + } else { + if (SPCLEFT(otape) < btisz) { + /* + * if it's full, write it out and add the + * item to the next block. (since we know + * there will be at least one more block, + * we know we do *not* want to set + * End-Of-Run here!) + */ + _bt_tapewrite(otape, 0); + } + _bt_tapeadd(otape, bti, btisz); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(bti->bti_itup), 1, + RelationGetTupleDescriptor(index), &isnull); + printf("_bt_merge: inserted <%x> into tape %d\n", + d, btspool->bts_tape); + } +#endif /* FASTBUILD_DEBUG */ + } + } +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(bti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_merge: got <%x> from tape %d\n", d, t); + } +#endif /* FASTBUILD_DEBUG */ + + itape = btspool->bts_itape[t]; + if (!tapedone[t]) { + BTItem newbti = _bt_tapenext(itape, &tapepos[t]); + + if (newbti == (BTItem) NULL) { + if (_bt_taperead(itape) == 0) { + tapedone[t] = 1; + } else { + tapepos[t] = itape->bttb_data; + newbti = _bt_tapenext(itape, &tapepos[t]); + } + } + if (newbti != (BTItem) NULL) { + BTPriQueueElem nexte; + + nexte.btpqe_tape = t; + nexte.btpqe_item = newbti; + _bt_pqadd(&q, &nexte); + } + } + } /* item */ + } /* run */ + + /* + * we are here because we ran out of input on all of the input + * tapes. + * + * if this pass did not generate more actual output runs than + * we have tapes, we know we have at most one run in each + * tape. this means that we are ready to merge into the final + * btree leaf pages instead of merging into a tape file. + */ + if (nruns <= btspool->bts_ntapes) { + doleaf = true; + } + } while (nruns > 0); /* pass */ + + /* + * this is the rightmost page, so the ItemId array needs to be + * slid back one slot. + */ + _bt_slideleft(index, state.btps_buf, state.btps_page); + _bt_wrtbuf(index, state.btps_buf); + + return(firstblk); +} + + +/* + * given the block number 'blk' of the first page of a set of linked + * siblings (i.e., the start of an entire level of the btree), + * construct the corresponding next level of the btree. we do this by + * placing minimum keys from each page into this page. the format of + * the internal pages is otherwise the same as for leaf pages. + */ +void +_bt_upperbuild(Relation index, BlockNumber blk, int level) +{ + Buffer rbuf; + Page rpage; + BTPageOpaque ropaque; + BTPageState state; + BlockNumber firstblk; + BTItem bti; + BTItem nbti; + OffsetNumber off; + + rbuf = _bt_getbuf(index, blk, BT_WRITE); + rpage = BufferGetPage(rbuf); + ropaque = (BTPageOpaque) PageGetSpecialPointer(rpage); + + /* + * if we only have one page on a level, we can just make it the + * root. + */ + if (P_RIGHTMOST(ropaque)) { + ropaque->btpo_flags |= BTP_ROOT; + _bt_wrtbuf(index, rbuf); + _bt_metaproot(index, blk); + return; + } + _bt_relbuf(index, rbuf, BT_WRITE); + + (void) memset((char *) &state, 0, sizeof(BTPageState)); + _bt_blnewpage(index, &(state.btps_buf), &(state.btps_page), 0); + state.btps_lastoff = P_HIKEY; + state.btps_lastbti = (BTItem) NULL; + firstblk = BufferGetBlockNumber(state.btps_buf); + + /* for each page... */ + do { + rbuf = _bt_getbuf(index, blk, BT_READ); + rpage = BufferGetPage(rbuf); + ropaque = (BTPageOpaque) PageGetSpecialPointer(rpage); + + /* for each item... */ + if (!PageIsEmpty(rpage)) { + /* + * form a new index tuple corresponding to the minimum key + * of the lower page and insert it into a page at this + * level. + */ + off = P_RIGHTMOST(ropaque) ? P_HIKEY : P_FIRSTKEY; + bti = (BTItem) PageGetItem(rpage, PageGetItemId(rpage, off)); + nbti = _bt_formitem(&(bti->bti_itup)); + ItemPointerSet(&(nbti->bti_itup.t_tid), blk, P_HIKEY); +#ifdef FASTBUILD_DEBUG + { + bool isnull; + Datum d = index_getattr(&(nbti->bti_itup), 1, + RelationGetTupleDescriptor(index), + &isnull); + printf("_bt_upperbuild: inserting <%x> at %d\n", + d, level); + } +#endif /* FASTBUILD_DEBUG */ + _bt_buildadd(index, &state, nbti, 0); + pfree((void *) nbti); + } + blk = ropaque->btpo_next; + _bt_relbuf(index, rbuf, BT_READ); + } while (blk != P_NONE); + + /* + * this is the rightmost page, so the ItemId array needs to be + * slid back one slot. + */ + _bt_slideleft(index, state.btps_buf, state.btps_page); + _bt_wrtbuf(index, state.btps_buf); + + _bt_upperbuild(index, firstblk, level + 1); +} + +/* + * given a spool loading by successive calls to _bt_spool, create an + * entire btree. + */ +void +_bt_leafbuild(Relation index, void *spool) +{ + BTSpool *btspool = (BTSpool *) spool; + BlockNumber firstblk; + + /* + * merge the runs into btree leaf pages. + */ + firstblk = _bt_merge(index, btspool); + + /* + * build the upper levels of the btree. + */ + _bt_upperbuild(index, firstblk, 0); +} + +#else /* !FASTBUILD */ + +void *_bt_spoolinit(Relation index, int ntapes) { return((void *) NULL); } +void _bt_spooldestroy(void *spool) { } +void _bt_spool(Relation index, BTItem btitem, void *spool) { } +void _bt_upperbuild(Relation index, BlockNumber blk, int level) { } +void _bt_leafbuild(Relation index, void *spool) { } + +#endif /* !FASTBUILD */ diff --git a/src/backend/access/nbtree/nbtstrat.c b/src/backend/access/nbtree/nbtstrat.c new file mode 100644 index 00000000000..2214c60950d --- /dev/null +++ b/src/backend/access/nbtree/nbtstrat.c @@ -0,0 +1,134 @@ +/*------------------------------------------------------------------------- + * + * btstrat.c-- + * Srategy map entries for the btree indexed access method + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/Attic/nbtstrat.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/genam.h" +#include "access/nbtree.h" + +/* + * Note: + * StrategyNegate, StrategyCommute, and StrategyNegateCommute + * assume <, <=, ==, >=, > ordering. + */ +static StrategyNumber BTNegate[5] = { + BTGreaterEqualStrategyNumber, + BTGreaterStrategyNumber, + InvalidStrategy, + BTLessStrategyNumber, + BTLessEqualStrategyNumber +}; + +static StrategyNumber BTCommute[5] = { + BTGreaterStrategyNumber, + BTGreaterEqualStrategyNumber, + InvalidStrategy, + BTLessEqualStrategyNumber, + BTLessStrategyNumber +}; + +static StrategyNumber BTNegateCommute[5] = { + BTLessEqualStrategyNumber, + BTLessStrategyNumber, + InvalidStrategy, + BTGreaterStrategyNumber, + BTGreaterEqualStrategyNumber +}; + +static uint16 BTLessTermData[] = { /* XXX type clash */ + 2, + BTLessStrategyNumber, + SK_NEGATE, + BTLessStrategyNumber, + SK_NEGATE | SK_COMMUTE +}; + +static uint16 BTLessEqualTermData[] = { /* XXX type clash */ + 2, + BTLessEqualStrategyNumber, + 0x0, + BTLessEqualStrategyNumber, + SK_COMMUTE +}; + +static uint16 BTGreaterEqualTermData[] = { /* XXX type clash */ + 2, + BTGreaterEqualStrategyNumber, + 0x0, + BTGreaterEqualStrategyNumber, + SK_COMMUTE + }; + +static uint16 BTGreaterTermData[] = { /* XXX type clash */ + 2, + BTGreaterStrategyNumber, + SK_NEGATE, + BTGreaterStrategyNumber, + SK_NEGATE | SK_COMMUTE +}; + +static StrategyTerm BTEqualExpressionData[] = { + (StrategyTerm)BTLessTermData, /* XXX */ + (StrategyTerm)BTLessEqualTermData, /* XXX */ + (StrategyTerm)BTGreaterEqualTermData, /* XXX */ + (StrategyTerm)BTGreaterTermData, /* XXX */ + NULL +}; + +static StrategyEvaluationData BTEvaluationData = { + /* XXX static for simplicity */ + + BTMaxStrategyNumber, + (StrategyTransformMap)BTNegate, /* XXX */ + (StrategyTransformMap)BTCommute, /* XXX */ + (StrategyTransformMap)BTNegateCommute, /* XXX */ + + { NULL, NULL, (StrategyExpression)BTEqualExpressionData, NULL, NULL, + NULL,NULL,NULL,NULL,NULL,NULL,NULL} +}; + +/* ---------------------------------------------------------------- + * RelationGetBTStrategy + * ---------------------------------------------------------------- + */ + +StrategyNumber +_bt_getstrat(Relation rel, + AttrNumber attno, + RegProcedure proc) +{ + StrategyNumber strat; + + strat = RelationGetStrategy(rel, attno, &BTEvaluationData, proc); + + Assert(StrategyNumberIsValid(strat)); + + return (strat); +} + +bool +_bt_invokestrat(Relation rel, + AttrNumber attno, + StrategyNumber strat, + Datum left, + Datum right) +{ + return (RelationInvokeStrategy(rel, &BTEvaluationData, attno, strat, + left, right)); +} diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c new file mode 100644 index 00000000000..695a2b637c8 --- /dev/null +++ b/src/backend/access/nbtree/nbtutils.c @@ -0,0 +1,239 @@ +/*------------------------------------------------------------------------- + * + * btutils.c-- + * Utility code for Postgres btree implementation. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" +#include "utils/datum.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/iqual.h" +#include "access/nbtree.h" + +ScanKey +_bt_mkscankey(Relation rel, IndexTuple itup) +{ + ScanKey skey; + TupleDesc itupdesc; + int natts; + int i; + Datum arg; + RegProcedure proc; + bool null; + + natts = rel->rd_rel->relnatts; + itupdesc = RelationGetTupleDescriptor(rel); + + skey = (ScanKey) palloc(natts * sizeof(ScanKeyData)); + + for (i = 0; i < natts; i++) { + arg = index_getattr(itup, i + 1, itupdesc, &null); + proc = index_getprocid(rel, i + 1, BTORDER_PROC); + ScanKeyEntryInitialize(&skey[i], + 0x0, (AttrNumber) (i + 1), proc, arg); + } + + return (skey); +} + +void +_bt_freeskey(ScanKey skey) +{ + pfree(skey); +} + +void +_bt_freestack(BTStack stack) +{ + BTStack ostack; + + while (stack != (BTStack) NULL) { + ostack = stack; + stack = stack->bts_parent; + pfree(ostack->bts_btitem); + pfree(ostack); + } +} + +/* + * _bt_orderkeys() -- Put keys in a sensible order for conjunctive quals. + * + * The order of the keys in the qual match the ordering imposed by + * the index. This routine only needs to be called if there are + * more than one qual clauses using this index. + */ +void +_bt_orderkeys(Relation relation, uint16 *numberOfKeys, ScanKey key) +{ + ScanKey xform; + ScanKeyData *cur; + StrategyMap map; + int nbytes; + long test; + int i, j; + int init[BTMaxStrategyNumber+1]; + + /* haven't looked at any strategies yet */ + for (i = 0; i <= BTMaxStrategyNumber; i++) + init[i] = 0; + + /* get space for the modified array of keys */ + nbytes = BTMaxStrategyNumber * sizeof(ScanKeyData); + xform = (ScanKey) palloc(nbytes); + memset(xform, 0, nbytes); + + + /* get the strategy map for this index/attribute pair */ + /* + * XXX + * When we support multiple keys in a single index, this is what + * we'll want to do. At present, the planner is hosed, so we + * hard-wire the attribute number below. Postgres only does single- + * key indices... + * map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation), + * BTMaxStrategyNumber, + * key->data[0].attributeNumber); + */ + map = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(relation), + BTMaxStrategyNumber, + 1 /* XXX */ ); + + /* check each key passed in */ + for (i = *numberOfKeys; --i >= 0; ) { + cur = &key[i]; + for (j = BTMaxStrategyNumber; --j >= 0; ) { + if (cur->sk_procedure == map->entry[j].sk_procedure) + break; + } + + /* have we seen one of these before? */ + if (init[j]) { + /* yup, use the appropriate value */ + test = + (long) FMGR_PTR2(cur->sk_func, cur->sk_procedure, + cur->sk_argument, xform[j].sk_argument); + if (test) + xform[j].sk_argument = cur->sk_argument; + } else { + /* nope, use this value */ + memmove(&xform[j], cur, sizeof(*cur)); + + init[j] = 1; + } + } + + /* if = has been specified, no other key will be used */ + if (init[BTEqualStrategyNumber - 1]) { + init[BTLessStrategyNumber - 1] = 0; + init[BTLessEqualStrategyNumber - 1] = 0; + init[BTGreaterEqualStrategyNumber - 1] = 0; + init[BTGreaterStrategyNumber - 1] = 0; + } + + /* only one of <, <= */ + if (init[BTLessStrategyNumber - 1] + && init[BTLessEqualStrategyNumber - 1]) { + + ScanKeyData *lt, *le; + + lt = &xform[BTLessStrategyNumber - 1]; + le = &xform[BTLessEqualStrategyNumber - 1]; + + /* + * DO NOT use the cached function stuff here -- this is key + * ordering, happens only when the user expresses a hokey + * qualification, and gets executed only once, anyway. The + * transform maps are hard-coded, and can't be initialized + * in the correct way. + */ + + test = (long) fmgr(le->sk_procedure, le->sk_argument, lt->sk_argument); + + if (test) + init[BTLessEqualStrategyNumber - 1] = 0; + else + init[BTLessStrategyNumber - 1] = 0; + } + + /* only one of >, >= */ + if (init[BTGreaterStrategyNumber - 1] + && init[BTGreaterEqualStrategyNumber - 1]) { + + ScanKeyData *gt, *ge; + + gt = &xform[BTGreaterStrategyNumber - 1]; + ge = &xform[BTGreaterEqualStrategyNumber - 1]; + + /* see note above on function cache */ + test = (long) fmgr(ge->sk_procedure, gt->sk_argument, gt->sk_argument); + + if (test) + init[BTGreaterStrategyNumber - 1] = 0; + else + init[BTGreaterEqualStrategyNumber - 1] = 0; + } + + /* okay, reorder and count */ + j = 0; + + for (i = BTMaxStrategyNumber; --i >= 0; ) + if (init[i]) + key[j++] = xform[i]; + + *numberOfKeys = j; + + pfree(xform); +} + +bool +_bt_checkqual(IndexScanDesc scan, IndexTuple itup) +{ + if (scan->numberOfKeys > 0) + return (index_keytest(itup, RelationGetTupleDescriptor(scan->relation), + scan->numberOfKeys, scan->keyData)); + else + return (true); +} + +BTItem +_bt_formitem(IndexTuple itup) +{ + int nbytes_btitem; + BTItem btitem; + Size tuplen; + extern Oid newoid(); + + /* disallow nulls in btree keys */ + if (itup->t_info & INDEX_NULL_MASK) + elog(WARN, "btree indices cannot include null keys"); + + /* make a copy of the index tuple with room for the sequence number */ + tuplen = IndexTupleSize(itup); + nbytes_btitem = tuplen + + (sizeof(BTItemData) - sizeof(IndexTupleData)); + + btitem = (BTItem) palloc(nbytes_btitem); + memmove((char *) &(btitem->bti_itup), (char *) itup, tuplen); + + btitem->bti_oid = newoid(); + return (btitem); +} diff --git a/src/backend/access/printtup.h b/src/backend/access/printtup.h new file mode 100644 index 00000000000..b5843daf7e0 --- /dev/null +++ b/src/backend/access/printtup.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * printtup.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: printtup.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PRINTTUP_H +#define PRINTTUP_H + +#include "access/htup.h" +#include "access/tupdesc.h" + +extern Oid typtoout(Oid type); +extern void printtup(HeapTuple tuple, TupleDesc typeinfo); +extern void showatts(char *name, TupleDesc attinfo); +extern void debugtup(HeapTuple tuple, TupleDesc typeinfo); +extern void printtup_internal(HeapTuple tuple, TupleDesc typeinfo); +extern Oid gettypelem(Oid type); + +#endif /* PRINTTUP_H */ diff --git a/src/backend/access/relscan.h b/src/backend/access/relscan.h new file mode 100644 index 00000000000..7899e9d945f --- /dev/null +++ b/src/backend/access/relscan.h @@ -0,0 +1,87 @@ +/*------------------------------------------------------------------------- + * + * relscan.h-- + * POSTGRES internal relation scan descriptor definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: relscan.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RELSCAN_H +#define RELSCAN_H + +#include "c.h" + +#include "access/skey.h" +#include "storage/buf.h" +#include "access/htup.h" +#include "storage/itemptr.h" + +#include "utils/tqual.h" +#include "utils/rel.h" + + +typedef ItemPointerData MarkData; + +typedef struct HeapScanDescData { + Relation rs_rd; /* pointer to relation descriptor */ + HeapTuple rs_ptup; /* previous tuple in scan */ + HeapTuple rs_ctup; /* current tuple in scan */ + HeapTuple rs_ntup; /* next tuple in scan */ + Buffer rs_pbuf; /* previous buffer in scan */ + Buffer rs_cbuf; /* current buffer in scan */ + Buffer rs_nbuf; /* next buffer in scan */ + ItemPointerData rs_mptid; /* marked previous tid */ + ItemPointerData rs_mctid; /* marked current tid */ + ItemPointerData rs_mntid; /* marked next tid */ + ItemPointerData rs_mcd; /* marked current delta XXX ??? */ + bool rs_atend; /* restart scan at end? */ + TimeQual rs_tr; /* time qualification */ + uint16 rs_cdelta; /* current delta in chain */ + uint16 rs_nkeys; /* number of attributes in keys */ + ScanKey rs_key; /* key descriptors */ +} HeapScanDescData; + +typedef HeapScanDescData *HeapScanDesc; + +typedef struct IndexScanDescData { + Relation relation; /* relation descriptor */ + void *opaque; /* am-specific slot */ + ItemPointerData previousItemData; /* previous index pointer */ + ItemPointerData currentItemData; /* current index pointer */ + ItemPointerData nextItemData; /* next index pointer */ + MarkData previousMarkData; /* marked previous pointer */ + MarkData currentMarkData; /* marked current pointer */ + MarkData nextMarkData; /* marked next pointer */ + uint8 flags; /* scan position flags */ + bool scanFromEnd; /* restart scan at end? */ + uint16 numberOfKeys; /* number of key attributes */ + ScanKey keyData; /* key descriptor */ +} IndexScanDescData; + +typedef IndexScanDescData *IndexScanDesc; + +/* ---------------- + * IndexScanDescPtr is used in the executor where we have to + * keep track of several index scans when using several indices + * - cim 9/10/89 + * ---------------- + */ +typedef IndexScanDesc *IndexScanDescPtr; + +/* + * HeapScanIsValid -- + * True iff the heap scan is valid. + */ +#define HeapScanIsValid(scan) PointerIsValid(scan) + +/* + * IndexScanIsValid -- + * True iff the index scan is valid. + */ +#define IndexScanIsValid(scan) PointerIsValid(scan) + +#endif /* RELSCAN_H */ diff --git a/src/backend/access/rtree.h b/src/backend/access/rtree.h new file mode 100644 index 00000000000..79f1622e48b --- /dev/null +++ b/src/backend/access/rtree.h @@ -0,0 +1,98 @@ +/*------------------------------------------------------------------------- + * + * rtree.h-- + * common declarations for the rtree access method code. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: rtree.h,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RTREE_H +#define RTREE_H + +/* see rtstrat.c for what all this is about */ +#define RTNStrategies 8 +#define RTLeftStrategyNumber 1 +#define RTOverLeftStrategyNumber 2 +#define RTOverlapStrategyNumber 3 +#define RTOverRightStrategyNumber 4 +#define RTRightStrategyNumber 5 +#define RTSameStrategyNumber 6 +#define RTContainsStrategyNumber 7 +#define RTContainedByStrategyNumber 8 + +#define RTNProcs 3 +#define RT_UNION_PROC 1 +#define RT_INTER_PROC 2 +#define RT_SIZE_PROC 3 + +#define F_LEAF (1 << 0) + +typedef struct RTreePageOpaqueData { + uint32 flags; +} RTreePageOpaqueData; + +typedef RTreePageOpaqueData *RTreePageOpaque; + +/* + * When we descend a tree, we keep a stack of parent pointers. + */ + +typedef struct RTSTACK { + struct RTSTACK *rts_parent; + OffsetNumber rts_child; + BlockNumber rts_blk; +} RTSTACK; + +/* + * When we're doing a scan, we need to keep track of the parent stack + * for the marked and current items. Also, rtrees have the following + * property: if you're looking for the box (1,1,2,2), on the internal + * nodes you have to search for all boxes that *contain* (1,1,2,2), and + * not the ones that match it. We have a private scan key for internal + * nodes in the opaque structure for rtrees for this reason. See + * access/index-rtree/rtscan.c and rtstrat.c for how it gets initialized. + */ + +typedef struct RTreeScanOpaqueData { + struct RTSTACK *s_stack; + struct RTSTACK *s_markstk; + uint16 s_flags; + uint16 s_internalNKey; + ScanKey s_internalKey; +} RTreeScanOpaqueData; + +typedef RTreeScanOpaqueData *RTreeScanOpaque; + +/* + * When we're doing a scan and updating a tree at the same time, the + * updates may affect the scan. We use the flags entry of the scan's + * opaque space to record our actual position in response to updates + * that we can't handle simply by adjusting pointers. + */ + +#define RTS_CURBEFORE ((uint16) (1 << 0)) +#define RTS_MRKBEFORE ((uint16) (1 << 1)) + +/* root page of an rtree */ +#define P_ROOT 0 + +/* + * When we update a relation on which we're doing a scan, we need to + * check the scan and fix it if the update affected any of the pages it + * touches. Otherwise, we can miss records that we should see. The only + * times we need to do this are for deletions and splits. See the code in + * rtscan.c for how the scan is fixed. These two contants tell us what sort + * of operation changed the index. + */ + +#define RTOP_DEL 0 +#define RTOP_SPLIT 1 + +/* defined in rtree.c */ +extern void freestack(RTSTACK *s); + +#endif /* RTREE_H */ diff --git a/src/backend/access/rtree/Makefile.inc b/src/backend/access/rtree/Makefile.inc new file mode 100644 index 00000000000..a93a5e53290 --- /dev/null +++ b/src/backend/access/rtree/Makefile.inc @@ -0,0 +1,14 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/rtree (R-Tree access method) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:12 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= rtget.c rtproc.c rtree.c rtscan.c rtstrat.c diff --git a/src/backend/access/rtree/rtget.c b/src/backend/access/rtree/rtget.c new file mode 100644 index 00000000000..fb2e169297d --- /dev/null +++ b/src/backend/access/rtree/rtget.c @@ -0,0 +1,320 @@ +/*------------------------------------------------------------------------- + * + * rtget.c-- + * fetch tuples from an rtree scan. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtget.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/iqual.h" +#include "access/rtree.h" +#include "access/sdir.h" + +static OffsetNumber findnext(IndexScanDesc s, Page p, OffsetNumber n, + ScanDirection dir); +static RetrieveIndexResult rtscancache(IndexScanDesc s, ScanDirection dir); +static RetrieveIndexResult rtfirst(IndexScanDesc s, ScanDirection dir); +static RetrieveIndexResult rtnext(IndexScanDesc s, ScanDirection dir); +static ItemPointer rtheapptr(Relation r, ItemPointer itemp); + + +RetrieveIndexResult +rtgettuple(IndexScanDesc s, ScanDirection dir) +{ + RetrieveIndexResult res; + + /* if we have it cached in the scan desc, just return the value */ + if ((res = rtscancache(s, dir)) != (RetrieveIndexResult) NULL) + return (res); + + /* not cached, so we'll have to do some work */ + if (ItemPointerIsValid(&(s->currentItemData))) { + res = rtnext(s, dir); + } else { + res = rtfirst(s, dir); + } + return (res); +} + +static RetrieveIndexResult +rtfirst(IndexScanDesc s, ScanDirection dir) +{ + Buffer b; + Page p; + OffsetNumber n; + OffsetNumber maxoff; + RetrieveIndexResult res; + RTreePageOpaque po; + RTreeScanOpaque so; + RTSTACK *stk; + BlockNumber blk; + IndexTuple it; + ItemPointer ip; + + b = ReadBuffer(s->relation, P_ROOT); + p = BufferGetPage(b); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + so = (RTreeScanOpaque) s->opaque; + + for (;;) { + maxoff = PageGetMaxOffsetNumber(p); + if (ScanDirectionIsBackward(dir)) + n = findnext(s, p, maxoff, dir); + else + n = findnext(s, p, FirstOffsetNumber, dir); + + while (n < FirstOffsetNumber || n > maxoff) { + + ReleaseBuffer(b); + if (so->s_stack == (RTSTACK *) NULL) + return ((RetrieveIndexResult) NULL); + + stk = so->s_stack; + b = ReadBuffer(s->relation, stk->rts_blk); + p = BufferGetPage(b); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + maxoff = PageGetMaxOffsetNumber(p); + + if (ScanDirectionIsBackward(dir)) { + n = OffsetNumberPrev(stk->rts_child); + } else { + n = OffsetNumberNext(stk->rts_child); + } + so->s_stack = stk->rts_parent; + pfree(stk); + + n = findnext(s, p, n, dir); + } + if (po->flags & F_LEAF) { + ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n); + + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + ip = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) ip, (char *) &(it->t_tid), + sizeof(ItemPointerData)); + ReleaseBuffer(b); + + res = FormRetrieveIndexResult(&(s->currentItemData), ip); + + return (res); + } else { + stk = (RTSTACK *) palloc(sizeof(RTSTACK)); + stk->rts_child = n; + stk->rts_blk = BufferGetBlockNumber(b); + stk->rts_parent = so->s_stack; + so->s_stack = stk; + + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + blk = ItemPointerGetBlockNumber(&(it->t_tid)); + + ReleaseBuffer(b); + b = ReadBuffer(s->relation, blk); + p = BufferGetPage(b); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + } + } +} + +static RetrieveIndexResult +rtnext(IndexScanDesc s, ScanDirection dir) +{ + Buffer b; + Page p; + OffsetNumber n; + OffsetNumber maxoff; + RetrieveIndexResult res; + RTreePageOpaque po; + RTreeScanOpaque so; + RTSTACK *stk; + BlockNumber blk; + IndexTuple it; + ItemPointer ip; + + blk = ItemPointerGetBlockNumber(&(s->currentItemData)); + n = ItemPointerGetOffsetNumber(&(s->currentItemData)); + + if (ScanDirectionIsForward(dir)) { + n = OffsetNumberNext(n); + } else { + n = OffsetNumberPrev(n); + } + + b = ReadBuffer(s->relation, blk); + p = BufferGetPage(b); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + so = (RTreeScanOpaque) s->opaque; + + for (;;) { + maxoff = PageGetMaxOffsetNumber(p); + n = findnext(s, p, n, dir); + + while (n < FirstOffsetNumber || n > maxoff) { + + ReleaseBuffer(b); + if (so->s_stack == (RTSTACK *) NULL) + return ((RetrieveIndexResult) NULL); + + stk = so->s_stack; + b = ReadBuffer(s->relation, stk->rts_blk); + p = BufferGetPage(b); + maxoff = PageGetMaxOffsetNumber(p); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + + if (ScanDirectionIsBackward(dir)) { + n = OffsetNumberPrev(stk->rts_child); + } else { + n = OffsetNumberNext(stk->rts_child); + } + so->s_stack = stk->rts_parent; + pfree(stk); + + n = findnext(s, p, n, dir); + } + if (po->flags & F_LEAF) { + ItemPointerSet(&(s->currentItemData), BufferGetBlockNumber(b), n); + + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + ip = (ItemPointer) palloc(sizeof(ItemPointerData)); + memmove((char *) ip, (char *) &(it->t_tid), + sizeof(ItemPointerData)); + ReleaseBuffer(b); + + res = FormRetrieveIndexResult(&(s->currentItemData), ip); + + return (res); + } else { + stk = (RTSTACK *) palloc(sizeof(RTSTACK)); + stk->rts_child = n; + stk->rts_blk = BufferGetBlockNumber(b); + stk->rts_parent = so->s_stack; + so->s_stack = stk; + + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + blk = ItemPointerGetBlockNumber(&(it->t_tid)); + + ReleaseBuffer(b); + b = ReadBuffer(s->relation, blk); + p = BufferGetPage(b); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + + if (ScanDirectionIsBackward(dir)) { + n = PageGetMaxOffsetNumber(p); + } else { + n = FirstOffsetNumber; + } + } + } +} + +static OffsetNumber +findnext(IndexScanDesc s, Page p, OffsetNumber n, ScanDirection dir) +{ + OffsetNumber maxoff; + IndexTuple it; + RTreePageOpaque po; + RTreeScanOpaque so; + + maxoff = PageGetMaxOffsetNumber(p); + po = (RTreePageOpaque) PageGetSpecialPointer(p); + so = (RTreeScanOpaque) s->opaque; + + /* + * If we modified the index during the scan, we may have a pointer to + * a ghost tuple, before the scan. If this is the case, back up one. + */ + + if (so->s_flags & RTS_CURBEFORE) { + so->s_flags &= ~RTS_CURBEFORE; + n = OffsetNumberPrev(n); + } + + while (n >= FirstOffsetNumber && n <= maxoff) { + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + if (po->flags & F_LEAF) { + if (index_keytest(it, + RelationGetTupleDescriptor(s->relation), + s->numberOfKeys, s->keyData)) + break; + } else { + if (index_keytest(it, + RelationGetTupleDescriptor(s->relation), + so->s_internalNKey, so->s_internalKey)) + break; + } + + if (ScanDirectionIsBackward(dir)) { + n = OffsetNumberPrev(n); + } else { + n = OffsetNumberNext(n); + } + } + + return (n); +} + +static RetrieveIndexResult +rtscancache(IndexScanDesc s, ScanDirection dir) +{ + RetrieveIndexResult res; + ItemPointer ip; + + if (!(ScanDirectionIsNoMovement(dir) + && ItemPointerIsValid(&(s->currentItemData)))) { + + return ((RetrieveIndexResult) NULL); + } + + ip = rtheapptr(s->relation, &(s->currentItemData)); + + if (ItemPointerIsValid(ip)) + res = FormRetrieveIndexResult(&(s->currentItemData), ip); + else + res = (RetrieveIndexResult) NULL; + + return (res); +} + +/* + * rtheapptr returns the item pointer to the tuple in the heap relation + * for which itemp is the index relation item pointer. + */ +static ItemPointer +rtheapptr(Relation r, ItemPointer itemp) +{ + Buffer b; + Page p; + IndexTuple it; + ItemPointer ip; + OffsetNumber n; + + ip = (ItemPointer) palloc(sizeof(ItemPointerData)); + if (ItemPointerIsValid(itemp)) { + b = ReadBuffer(r, ItemPointerGetBlockNumber(itemp)); + p = BufferGetPage(b); + n = ItemPointerGetOffsetNumber(itemp); + it = (IndexTuple) PageGetItem(p, PageGetItemId(p, n)); + memmove((char *) ip, (char *) &(it->t_tid), + sizeof(ItemPointerData)); + ReleaseBuffer(b); + } else { + ItemPointerSetInvalid(ip); + } + + return (ip); +} diff --git a/src/backend/access/rtree/rtproc.c b/src/backend/access/rtree/rtproc.c new file mode 100644 index 00000000000..a2f7bef46b4 --- /dev/null +++ b/src/backend/access/rtree/rtproc.c @@ -0,0 +1,150 @@ +/*------------------------------------------------------------------------- + * + * rtproc.c-- + * pg_amproc entries for rtrees. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtproc.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include + +#include "postgres.h" + +#include "utils/elog.h" +#include "utils/geo-decls.h" +#include "utils/palloc.h" + +BOX +*rt_box_union(BOX *a, BOX *b) +{ + BOX *n; + + if ((n = (BOX *) palloc(sizeof (*n))) == (BOX *) NULL) + elog(WARN, "Cannot allocate box for union"); + + n->xh = Max(a->xh, b->xh); + n->yh = Max(a->yh, b->yh); + n->xl = Min(a->xl, b->xl); + n->yl = Min(a->yl, b->yl); + + return (n); +} + +BOX * +rt_box_inter(BOX *a, BOX *b) +{ + BOX *n; + + if ((n = (BOX *) palloc(sizeof (*n))) == (BOX *) NULL) + elog(WARN, "Cannot allocate box for union"); + + n->xh = Min(a->xh, b->xh); + n->yh = Min(a->yh, b->yh); + n->xl = Max(a->xl, b->xl); + n->yl = Max(a->yl, b->yl); + + if (n->xh < n->xl || n->yh < n->yl) { + pfree(n); + return ((BOX *) NULL); + } + + return (n); +} + +void +rt_box_size(BOX *a, float *size) +{ + if (a == (BOX *) NULL || a->xh <= a->xl || a->yh <= a->yl) + *size = 0.0; + else + *size = (float) ((a->xh - a->xl) * (a->yh - a->yl)); + + return; +} + +/* + * rt_bigbox_size() -- Compute a size for big boxes. + * + * In an earlier release of the system, this routine did something + * different from rt_box_size. We now use floats, rather than ints, + * as the return type for the size routine, so we no longer need to + * have a special return type for big boxes. + */ +void +rt_bigbox_size(BOX *a, float *size) +{ + rt_box_size(a, size); +} + +POLYGON * +rt_poly_union(POLYGON *a, POLYGON *b) +{ + POLYGON *p; + + p = (POLYGON *)PALLOCTYPE(POLYGON); + + if (!PointerIsValid(p)) + elog(WARN, "Cannot allocate polygon for union"); + + memset((char *) p, 0, sizeof(POLYGON)); /* zero any holes */ + p->size = sizeof(POLYGON); + p->npts = 0; + p->boundbox.xh = Max(a->boundbox.xh, b->boundbox.xh); + p->boundbox.yh = Max(a->boundbox.yh, b->boundbox.yh); + p->boundbox.xl = Min(a->boundbox.xl, b->boundbox.xl); + p->boundbox.yl = Min(a->boundbox.yl, b->boundbox.yl); + return p; +} + +void +rt_poly_size(POLYGON *a, float *size) +{ + double xdim, ydim; + + size = (float *) palloc(sizeof(float)); + if (a == (POLYGON *) NULL || + a->boundbox.xh <= a->boundbox.xl || + a->boundbox.yh <= a->boundbox.yl) + *size = 0.0; + else { + xdim = (a->boundbox.xh - a->boundbox.xl); + ydim = (a->boundbox.yh - a->boundbox.yl); + + *size = (float) (xdim * ydim); + } + + return; +} + +POLYGON * +rt_poly_inter(POLYGON *a, POLYGON *b) +{ + POLYGON *p; + + p = (POLYGON *) PALLOCTYPE(POLYGON); + + if (!PointerIsValid(p)) + elog(WARN, "Cannot allocate polygon for intersection"); + + memset((char *) p, 0, sizeof(POLYGON)); /* zero any holes */ + p->size = sizeof(POLYGON); + p->npts = 0; + p->boundbox.xh = Min(a->boundbox.xh, b->boundbox.xh); + p->boundbox.yh = Min(a->boundbox.yh, b->boundbox.yh); + p->boundbox.xl = Max(a->boundbox.xl, b->boundbox.xl); + p->boundbox.yl = Max(a->boundbox.yl, b->boundbox.yl); + + if (p->boundbox.xh < p->boundbox.xl || p->boundbox.yh < p->boundbox.yl) + { + pfree(p); + return ((POLYGON *) NULL); + } + + return (p); +} diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c new file mode 100644 index 00000000000..96efc3bc90b --- /dev/null +++ b/src/backend/access/rtree/rtree.c @@ -0,0 +1,955 @@ +/*------------------------------------------------------------------------- + * + * rtree.c-- + * interface routines for the postgres rtree indexed access method. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/excid.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/rtree.h" +#include "access/rtscan.h" +#include "access/funcindex.h" +#include "access/tupdesc.h" + +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" + +#include "executor/executor.h" +#include "executor/tuptable.h" + +#include "catalog/index.h" + +typedef struct SPLITVEC { + OffsetNumber *spl_left; + int spl_nleft; + char *spl_ldatum; + OffsetNumber *spl_right; + int spl_nright; + char *spl_rdatum; +} SPLITVEC; + +typedef struct RTSTATE { + func_ptr unionFn; /* union function */ + func_ptr sizeFn; /* size function */ + func_ptr interFn; /* intersection function */ +} RTSTATE; + +/* non-export function prototypes */ +static InsertIndexResult rtdoinsert(Relation r, IndexTuple itup, + RTSTATE *rtstate); +static void rttighten(Relation r, RTSTACK *stk, char *datum, int att_size, + RTSTATE *rtstate); +static InsertIndexResult dosplit(Relation r, Buffer buffer, RTSTACK *stack, + IndexTuple itup, RTSTATE *rtstate); +static void rtintinsert(Relation r, RTSTACK *stk, IndexTuple ltup, + IndexTuple rtup, RTSTATE *rtstate); +static void rtnewroot(Relation r, IndexTuple lt, IndexTuple rt); +static void picksplit(Relation r, Page page, SPLITVEC *v, IndexTuple itup, + RTSTATE *rtstate); +static void RTInitBuffer(Buffer b, uint32 f); +static OffsetNumber choose(Relation r, Page p, IndexTuple it, + RTSTATE *rtstate); +static int nospace(Page p, IndexTuple it); +static void initRtstate(RTSTATE *rtstate, Relation index); + + +void +rtbuild(Relation heap, + Relation index, + int natts, + AttrNumber *attnum, + IndexStrategy istrat, + uint16 pcount, + Datum *params, + FuncIndexInfo *finfo, + PredInfo *predInfo) +{ + HeapScanDesc scan; + Buffer buffer; + AttrNumber i; + HeapTuple htup; + IndexTuple itup; + TupleDesc hd, id; + InsertIndexResult res; + Datum *d; + bool *nulls; + int nb, nh, ni; + ExprContext *econtext; + TupleTable tupleTable; + TupleTableSlot *slot; + Oid hrelid, irelid; + Node *pred, *oldPred; + RTSTATE rtState; + + initRtstate(&rtState, index); + + /* rtrees only know how to do stupid locking now */ + RelationSetLockForWrite(index); + + pred = predInfo->pred; + oldPred = predInfo->oldPred; + + /* + * We expect to be called exactly once for any index relation. + * If that's not the case, big trouble's what we have. + */ + + if (oldPred == NULL && (nb = RelationGetNumberOfBlocks(index)) != 0) + elog(WARN, "%s already contains data", index->rd_rel->relname.data); + + /* initialize the root page (if this is a new index) */ + if (oldPred == NULL) { + buffer = ReadBuffer(index, P_NEW); + RTInitBuffer(buffer, F_LEAF); + WriteBuffer(buffer); + } + + /* init the tuple descriptors and get set for a heap scan */ + hd = RelationGetTupleDescriptor(heap); + id = RelationGetTupleDescriptor(index); + d = (Datum *)palloc(natts * sizeof (*d)); + nulls = (bool *)palloc(natts * sizeof (*nulls)); + + /* + * If this is a predicate (partial) index, we will need to evaluate the + * predicate using ExecQual, which requires the current tuple to be in a + * slot of a TupleTable. In addition, ExecQual must have an ExprContext + * referring to that slot. Here, we initialize dummy TupleTable and + * ExprContext objects for this purpose. --Nels, Feb '92 + */ +#ifndef OMIT_PARTIAL_INDEX + if (pred != NULL || oldPred != NULL) { + tupleTable = ExecCreateTupleTable(1); + slot = ExecAllocTableSlot(tupleTable); + econtext = makeNode(ExprContext); + FillDummyExprContext(econtext, slot, hd, buffer); + } +#endif /* OMIT_PARTIAL_INDEX */ + scan = heap_beginscan(heap, 0, NowTimeQual, 0, (ScanKey) NULL); + htup = heap_getnext(scan, 0, &buffer); + + /* count the tuples as we insert them */ + nh = ni = 0; + + for (; HeapTupleIsValid(htup); htup = heap_getnext(scan, 0, &buffer)) { + + nh++; + + /* + * If oldPred != NULL, this is an EXTEND INDEX command, so skip + * this tuple if it was already in the existing partial index + */ + if (oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /*SetSlotContents(slot, htup); */ + slot->val = htup; + if (ExecQual((List*)oldPred, econtext) == true) { + ni++; + continue; + } +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* Skip this tuple if it doesn't satisfy the partial-index predicate */ + if (pred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /*SetSlotContents(slot, htup); */ + slot->val = htup; + if (ExecQual((List*)pred, econtext) == false) + continue; +#endif /* OMIT_PARTIAL_INDEX */ + } + + ni++; + + /* + * For the current heap tuple, extract all the attributes + * we use in this index, and note which are null. + */ + + for (i = 1; i <= natts; i++) { + int attoff; + bool attnull; + + /* + * Offsets are from the start of the tuple, and are + * zero-based; indices are one-based. The next call + * returns i - 1. That's data hiding for you. + */ + + attoff = AttrNumberGetAttrOffset(i); + /* + d[attoff] = HeapTupleGetAttributeValue(htup, buffer, + */ + d[attoff] = GetIndexValue(htup, + hd, + attoff, + attnum, + finfo, + &attnull, + buffer); + nulls[attoff] = (attnull ? 'n' : ' '); + } + + /* form an index tuple and point it at the heap tuple */ + itup = index_formtuple(id, &d[0], nulls); + itup->t_tid = htup->t_ctid; + + /* + * Since we already have the index relation locked, we + * call rtdoinsert directly. Normal access method calls + * dispatch through rtinsert, which locks the relation + * for write. This is the right thing to do if you're + * inserting single tups, but not when you're initializing + * the whole index at once. + */ + + res = rtdoinsert(index, itup, &rtState); + pfree(itup); + pfree(res); + } + + /* okay, all heap tuples are indexed */ + heap_endscan(scan); + RelationUnsetLockForWrite(index); + + if (pred != NULL || oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + ExecDestroyTupleTable(tupleTable, true); + pfree(econtext); +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* + * Since we just counted the tuples in the heap, we update its + * stats in pg_relation to guarantee that the planner takes + * advantage of the index we just created. UpdateStats() does a + * CommandCounterIncrement(), which flushes changed entries from + * the system relcache. The act of constructing an index changes + * these heap and index tuples in the system catalogs, so they + * need to be flushed. We close them to guarantee that they + * will be. + */ + + hrelid = heap->rd_id; + irelid = index->rd_id; + heap_close(heap); + index_close(index); + + UpdateStats(hrelid, nh, true); + UpdateStats(irelid, ni, false); + + if (oldPred != NULL) { + if (ni == nh) pred = NULL; + UpdateIndexPredicate(irelid, oldPred, pred); + } + + /* be tidy */ + pfree(nulls); + pfree(d); +} + +/* + * rtinsert -- wrapper for rtree tuple insertion. + * + * This is the public interface routine for tuple insertion in rtrees. + * It doesn't do any work; just locks the relation and passes the buck. + */ +InsertIndexResult +rtinsert(Relation r, IndexTuple itup) +{ + InsertIndexResult res; + RTSTATE rtState; + + initRtstate(&rtState, r); + + RelationSetLockForWrite(r); + res = rtdoinsert(r, itup, &rtState); + + /* XXX two-phase locking -- don't unlock the relation until EOT */ + return (res); +} + +static InsertIndexResult +rtdoinsert(Relation r, IndexTuple itup, RTSTATE *rtstate) +{ + Page page; + Buffer buffer; + BlockNumber blk; + IndexTuple which; + OffsetNumber l; + RTSTACK *stack; + InsertIndexResult res; + RTreePageOpaque opaque; + char *datum; + + blk = P_ROOT; + buffer = InvalidBuffer; + stack = (RTSTACK *) NULL; + + do { + /* let go of current buffer before getting next */ + if (buffer != InvalidBuffer) + ReleaseBuffer(buffer); + + /* get next buffer */ + buffer = ReadBuffer(r, blk); + page = (Page) BufferGetPage(buffer); + + opaque = (RTreePageOpaque) PageGetSpecialPointer(page); + if (!(opaque->flags & F_LEAF)) { + RTSTACK *n; + ItemId iid; + + n = (RTSTACK *) palloc(sizeof(RTSTACK)); + n->rts_parent = stack; + n->rts_blk = blk; + n->rts_child = choose(r, page, itup, rtstate); + stack = n; + + iid = PageGetItemId(page, n->rts_child); + which = (IndexTuple) PageGetItem(page, iid); + blk = ItemPointerGetBlockNumber(&(which->t_tid)); + } + } while (!(opaque->flags & F_LEAF)); + + if (nospace(page, itup)) { + /* need to do a split */ + res = dosplit(r, buffer, stack, itup, rtstate); + freestack(stack); + WriteBuffer(buffer); /* don't forget to release buffer! */ + return (res); + } + + /* add the item and write the buffer */ + if (PageIsEmpty(page)) { + l = PageAddItem(page, (Item) itup, IndexTupleSize(itup), + FirstOffsetNumber, + LP_USED); + } else { + l = PageAddItem(page, (Item) itup, IndexTupleSize(itup), + OffsetNumberNext(PageGetMaxOffsetNumber(page)), + LP_USED); + } + + WriteBuffer(buffer); + + datum = (((char *) itup) + sizeof(IndexTupleData)); + + /* now expand the page boundary in the parent to include the new child */ + rttighten(r, stack, datum, + (IndexTupleSize(itup) - sizeof(IndexTupleData)), rtstate); + freestack(stack); + + /* build and return an InsertIndexResult for this insertion */ + res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); + ItemPointerSet(&(res->pointerData), blk, l); + + return (res); +} + +static void +rttighten(Relation r, + RTSTACK *stk, + char *datum, + int att_size, + RTSTATE *rtstate) +{ + char *oldud; + char *tdatum; + Page p; + float old_size, newd_size; + Buffer b; + + if (stk == (RTSTACK *) NULL) + return; + + b = ReadBuffer(r, stk->rts_blk); + p = BufferGetPage(b); + + oldud = (char *) PageGetItem(p, PageGetItemId(p, stk->rts_child)); + oldud += sizeof(IndexTupleData); + + (*rtstate->sizeFn)(oldud, &old_size); + datum = (char *) (*rtstate->unionFn)(oldud, datum); + + (*rtstate->sizeFn)(datum, &newd_size); + + if (newd_size != old_size) { + TupleDesc td = RelationGetTupleDescriptor(r); + + if (td->attrs[0]->attlen < 0) { + /* + * This is an internal page, so 'oldud' had better be a + * union (constant-length) key, too. (See comment below.) + */ + Assert(VARSIZE(datum) == VARSIZE(oldud)); + memmove(oldud, datum, VARSIZE(datum)); + } else { + memmove(oldud, datum, att_size); + } + WriteBuffer(b); + + /* + * The user may be defining an index on variable-sized data (like + * polygons). If so, we need to get a constant-sized datum for + * insertion on the internal page. We do this by calling the union + * proc, which is guaranteed to return a rectangle. + */ + + tdatum = (char *) (*rtstate->unionFn)(datum, datum); + rttighten(r, stk->rts_parent, tdatum, att_size, rtstate); + pfree(tdatum); + } else { + ReleaseBuffer(b); + } + pfree(datum); +} + +/* + * dosplit -- split a page in the tree. + * + * This is the quadratic-cost split algorithm Guttman describes in + * his paper. The reason we chose it is that you can implement this + * with less information about the data types on which you're operating. + */ +static InsertIndexResult +dosplit(Relation r, + Buffer buffer, + RTSTACK *stack, + IndexTuple itup, + RTSTATE *rtstate) +{ + Page p; + Buffer leftbuf, rightbuf; + Page left, right; + ItemId itemid; + IndexTuple item; + IndexTuple ltup, rtup; + OffsetNumber maxoff; + OffsetNumber i; + OffsetNumber leftoff, rightoff; + BlockNumber lbknum, rbknum; + BlockNumber bufblock; + RTreePageOpaque opaque; + int blank; + InsertIndexResult res; + char *isnull; + SPLITVEC v; + TupleDesc tupDesc; + + isnull = (char *) palloc(r->rd_rel->relnatts); + for (blank = 0; blank < r->rd_rel->relnatts; blank++) + isnull[blank] = ' '; + p = (Page) BufferGetPage(buffer); + opaque = (RTreePageOpaque) PageGetSpecialPointer(p); + + /* + * The root of the tree is the first block in the relation. If + * we're about to split the root, we need to do some hocus-pocus + * to enforce this guarantee. + */ + + if (BufferGetBlockNumber(buffer) == P_ROOT) { + leftbuf = ReadBuffer(r, P_NEW); + RTInitBuffer(leftbuf, opaque->flags); + lbknum = BufferGetBlockNumber(leftbuf); + left = (Page) BufferGetPage(leftbuf); + } else { + leftbuf = buffer; + IncrBufferRefCount(buffer); + lbknum = BufferGetBlockNumber(buffer); + left = (Page) PageGetTempPage(p, sizeof(RTreePageOpaqueData)); + } + + rightbuf = ReadBuffer(r, P_NEW); + RTInitBuffer(rightbuf, opaque->flags); + rbknum = BufferGetBlockNumber(rightbuf); + right = (Page) BufferGetPage(rightbuf); + + picksplit(r, p, &v, itup, rtstate); + + leftoff = rightoff = FirstOffsetNumber; + maxoff = PageGetMaxOffsetNumber(p); + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { + itemid = PageGetItemId(p, i); + item = (IndexTuple) PageGetItem(p, itemid); + + if (i == *(v.spl_left)) { + (void) PageAddItem(left, (Item) item, IndexTupleSize(item), + leftoff, LP_USED); + leftoff = OffsetNumberNext(leftoff); + v.spl_left++; /* advance in left split vector */ + } else { + (void) PageAddItem(right, (Item) item, IndexTupleSize(item), + rightoff, LP_USED); + rightoff = OffsetNumberNext(rightoff); + v.spl_right++; /* advance in right split vector */ + } + } + + /* build an InsertIndexResult for this insertion */ + res = (InsertIndexResult) palloc(sizeof(InsertIndexResultData)); + + /* now insert the new index tuple */ + if (*(v.spl_left) != FirstOffsetNumber) { + (void) PageAddItem(left, (Item) itup, IndexTupleSize(itup), + leftoff, LP_USED); + leftoff = OffsetNumberNext(leftoff); + ItemPointerSet(&(res->pointerData), lbknum, leftoff); + } else { + (void) PageAddItem(right, (Item) itup, IndexTupleSize(itup), + rightoff, LP_USED); + rightoff = OffsetNumberNext(rightoff); + ItemPointerSet(&(res->pointerData), rbknum, rightoff); + } + + if ((bufblock = BufferGetBlockNumber(buffer)) != P_ROOT) { + PageRestoreTempPage(left, p); + } + WriteBuffer(leftbuf); + WriteBuffer(rightbuf); + + /* + * Okay, the page is split. We have three things left to do: + * + * 1) Adjust any active scans on this index to cope with changes + * we introduced in its structure by splitting this page. + * + * 2) "Tighten" the bounding box of the pointer to the left + * page in the parent node in the tree, if any. Since we + * moved a bunch of stuff off the left page, we expect it + * to get smaller. This happens in the internal insertion + * routine. + * + * 3) Insert a pointer to the right page in the parent. This + * may cause the parent to split. If it does, we need to + * repeat steps one and two for each split node in the tree. + */ + + /* adjust active scans */ + rtadjscans(r, RTOP_SPLIT, bufblock, FirstOffsetNumber); + + tupDesc = r->rd_att; + ltup = (IndexTuple) index_formtuple(tupDesc, + (Datum *) &(v.spl_ldatum), isnull); + rtup = (IndexTuple) index_formtuple(tupDesc, + (Datum *) &(v.spl_rdatum), isnull); + pfree(isnull); + + /* set pointers to new child pages in the internal index tuples */ + ItemPointerSet(&(ltup->t_tid), lbknum, 1); + ItemPointerSet(&(rtup->t_tid), rbknum, 1); + + rtintinsert(r, stack, ltup, rtup, rtstate); + + pfree(ltup); + pfree(rtup); + + return (res); +} + +static void +rtintinsert(Relation r, + RTSTACK *stk, + IndexTuple ltup, + IndexTuple rtup, + RTSTATE *rtstate) +{ + IndexTuple old; + Buffer b; + Page p; + char *ldatum, *rdatum, *newdatum; + InsertIndexResult res; + + if (stk == (RTSTACK *) NULL) { + rtnewroot(r, ltup, rtup); + return; + } + + b = ReadBuffer(r, stk->rts_blk); + p = BufferGetPage(b); + old = (IndexTuple) PageGetItem(p, PageGetItemId(p, stk->rts_child)); + + /* + * This is a hack. Right now, we force rtree keys to be constant size. + * To fix this, need delete the old key and add both left and right + * for the two new pages. The insertion of left may force a split if + * the new left key is bigger than the old key. + */ + + if (IndexTupleSize(old) != IndexTupleSize(ltup)) + elog(WARN, "Variable-length rtree keys are not supported."); + + /* install pointer to left child */ + memmove(old, ltup,IndexTupleSize(ltup)); + + if (nospace(p, rtup)) { + newdatum = (((char *) ltup) + sizeof(IndexTupleData)); + rttighten(r, stk->rts_parent, newdatum, + (IndexTupleSize(ltup) - sizeof(IndexTupleData)), rtstate); + res = dosplit(r, b, stk->rts_parent, rtup, rtstate); + WriteBuffer(b); /* don't forget to release buffer! - 01/31/94 */ + pfree(res); + } else { + (void) PageAddItem(p, (Item) rtup, IndexTupleSize(rtup), + PageGetMaxOffsetNumber(p), LP_USED); + WriteBuffer(b); + ldatum = (((char *) ltup) + sizeof(IndexTupleData)); + rdatum = (((char *) rtup) + sizeof(IndexTupleData)); + newdatum = (char *) (*rtstate->unionFn)(ldatum, rdatum); + + rttighten(r, stk->rts_parent, newdatum, + (IndexTupleSize(rtup) - sizeof(IndexTupleData)), rtstate); + + pfree(newdatum); + } +} + +static void +rtnewroot(Relation r, IndexTuple lt, IndexTuple rt) +{ + Buffer b; + Page p; + + b = ReadBuffer(r, P_ROOT); + RTInitBuffer(b, 0); + p = BufferGetPage(b); + (void) PageAddItem(p, (Item) lt, IndexTupleSize(lt), + FirstOffsetNumber, LP_USED); + (void) PageAddItem(p, (Item) rt, IndexTupleSize(rt), + OffsetNumberNext(FirstOffsetNumber), LP_USED); + WriteBuffer(b); +} + +static void +picksplit(Relation r, + Page page, + SPLITVEC *v, + IndexTuple itup, + RTSTATE *rtstate) +{ + OffsetNumber maxoff; + OffsetNumber i, j; + IndexTuple item_1, item_2; + char *datum_alpha, *datum_beta; + char *datum_l, *datum_r; + char *union_d, *union_dl, *union_dr; + char *inter_d; + bool firsttime; + float size_alpha, size_beta, size_union, size_inter; + float size_waste, waste; + float size_l, size_r; + int nbytes; + OffsetNumber seed_1 = 0, seed_2 = 0; + OffsetNumber *left, *right; + + maxoff = PageGetMaxOffsetNumber(page); + + nbytes = (maxoff + 2) * sizeof(OffsetNumber); + v->spl_left = (OffsetNumber *) palloc(nbytes); + v->spl_right = (OffsetNumber *) palloc(nbytes); + + firsttime = true; + waste = 0.0; + + for (i = FirstOffsetNumber; i < maxoff; i = OffsetNumberNext(i)) { + item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); + datum_alpha = ((char *) item_1) + sizeof(IndexTupleData); + for (j = OffsetNumberNext(i); j <= maxoff; j = OffsetNumberNext(j)) { + item_2 = (IndexTuple) PageGetItem(page, PageGetItemId(page, j)); + datum_beta = ((char *) item_2) + sizeof(IndexTupleData); + + /* compute the wasted space by unioning these guys */ + union_d = (char *)(rtstate->unionFn)(datum_alpha, datum_beta); + (rtstate->sizeFn)(union_d, &size_union); + inter_d = (char *)(rtstate->interFn)(datum_alpha, datum_beta); + (rtstate->sizeFn)(inter_d, &size_inter); + size_waste = size_union - size_inter; + + pfree(union_d); + + if (inter_d != (char *) NULL) + pfree(inter_d); + + /* + * are these a more promising split that what we've + * already seen? + */ + + if (size_waste > waste || firsttime) { + waste = size_waste; + seed_1 = i; + seed_2 = j; + firsttime = false; + } + } + } + + left = v->spl_left; + v->spl_nleft = 0; + right = v->spl_right; + v->spl_nright = 0; + + item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, seed_1)); + datum_alpha = ((char *) item_1) + sizeof(IndexTupleData); + datum_l = (char *)(*rtstate->unionFn)(datum_alpha, datum_alpha); + (*rtstate->sizeFn)(datum_l, &size_l); + item_2 = (IndexTuple) PageGetItem(page, PageGetItemId(page, seed_2)); + datum_beta = ((char *) item_2) + sizeof(IndexTupleData); + datum_r = (char *)(*rtstate->unionFn)(datum_beta, datum_beta); + (*rtstate->sizeFn)(datum_r, &size_r); + + /* + * Now split up the regions between the two seeds. An important + * property of this split algorithm is that the split vector v + * has the indices of items to be split in order in its left and + * right vectors. We exploit this property by doing a merge in + * the code that actually splits the page. + * + * For efficiency, we also place the new index tuple in this loop. + * This is handled at the very end, when we have placed all the + * existing tuples and i == maxoff + 1. + */ + + maxoff = OffsetNumberNext(maxoff); + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { + + /* + * If we've already decided where to place this item, just + * put it on the right list. Otherwise, we need to figure + * out which page needs the least enlargement in order to + * store the item. + */ + + if (i == seed_1) { + *left++ = i; + v->spl_nleft++; + continue; + } else if (i == seed_2) { + *right++ = i; + v->spl_nright++; + continue; + } + + /* okay, which page needs least enlargement? */ + if (i == maxoff) { + item_1 = itup; + } else { + item_1 = (IndexTuple) PageGetItem(page, PageGetItemId(page, i)); + } + + datum_alpha = ((char *) item_1) + sizeof(IndexTupleData); + union_dl = (char *)(*rtstate->unionFn)(datum_l, datum_alpha); + union_dr = (char *)(*rtstate->unionFn)(datum_r, datum_alpha); + (*rtstate->sizeFn)(union_dl, &size_alpha); + (*rtstate->sizeFn)(union_dr, &size_beta); + + /* pick which page to add it to */ + if (size_alpha - size_l < size_beta - size_r) { + pfree(datum_l); + pfree(union_dr); + datum_l = union_dl; + size_l = size_alpha; + *left++ = i; + v->spl_nleft++; + } else { + pfree(datum_r); + pfree(union_dl); + datum_r = union_dr; + size_r = size_alpha; + *right++ = i; + v->spl_nright++; + } + } + *left = *right = FirstOffsetNumber; /* sentinel value, see dosplit() */ + + v->spl_ldatum = datum_l; + v->spl_rdatum = datum_r; +} + +static void +RTInitBuffer(Buffer b, uint32 f) +{ + RTreePageOpaque opaque; + Page page; + Size pageSize; + + pageSize = BufferGetPageSize(b); + + page = BufferGetPage(b); + memset(page, 0, (int) pageSize); + PageInit(page, pageSize, sizeof(RTreePageOpaqueData)); + + opaque = (RTreePageOpaque) PageGetSpecialPointer(page); + opaque->flags = f; +} + +static OffsetNumber +choose(Relation r, Page p, IndexTuple it, RTSTATE *rtstate) +{ + OffsetNumber maxoff; + OffsetNumber i; + char *ud, *id; + char *datum; + float usize, dsize; + OffsetNumber which; + float which_grow; + + id = ((char *) it) + sizeof(IndexTupleData); + maxoff = PageGetMaxOffsetNumber(p); + which_grow = -1.0; + which = -1; + + for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i)) { + datum = (char *) PageGetItem(p, PageGetItemId(p, i)); + datum += sizeof(IndexTupleData); + (*rtstate->sizeFn)(datum, &dsize); + ud = (char *) (*rtstate->unionFn)(datum, id); + (*rtstate->sizeFn)(ud, &usize); + pfree(ud); + if (which_grow < 0 || usize - dsize < which_grow) { + which = i; + which_grow = usize - dsize; + if (which_grow == 0) + break; + } + } + + return (which); +} + +static int +nospace(Page p, IndexTuple it) +{ + return (PageGetFreeSpace(p) < IndexTupleSize(it)); +} + +void +freestack(RTSTACK *s) +{ + RTSTACK *p; + + while (s != (RTSTACK *) NULL) { + p = s->rts_parent; + pfree(s); + s = p; + } +} + +char * +rtdelete(Relation r, ItemPointer tid) +{ + BlockNumber blkno; + OffsetNumber offnum; + Buffer buf; + Page page; + + /* must write-lock on delete */ + RelationSetLockForWrite(r); + + blkno = ItemPointerGetBlockNumber(tid); + offnum = ItemPointerGetOffsetNumber(tid); + + /* adjust any scans that will be affected by this deletion */ + rtadjscans(r, RTOP_DEL, blkno, offnum); + + /* delete the index tuple */ + buf = ReadBuffer(r, blkno); + page = BufferGetPage(buf); + + PageIndexTupleDelete(page, offnum); + + WriteBuffer(buf); + + /* XXX -- two-phase locking, don't release the write lock */ + return ((char *) NULL); +} + +static void initRtstate(RTSTATE *rtstate, Relation index) +{ + RegProcedure union_proc, size_proc, inter_proc; + func_ptr user_fn; + int pronargs; + + union_proc = index_getprocid(index, 1, RT_UNION_PROC); + size_proc = index_getprocid(index, 1, RT_SIZE_PROC); + inter_proc = index_getprocid(index, 1, RT_INTER_PROC); + fmgr_info(union_proc, &user_fn, &pronargs); + rtstate->unionFn = user_fn; + fmgr_info(size_proc, &user_fn, &pronargs); + rtstate->sizeFn = user_fn; + fmgr_info(inter_proc, &user_fn, &pronargs); + rtstate->interFn = user_fn; + return; +} + +#define RTDEBUG +#ifdef RTDEBUG +#include "utils/geo-decls.h" + +void +_rtdump(Relation r) +{ + Buffer buf; + Page page; + OffsetNumber offnum, maxoff; + BlockNumber blkno; + BlockNumber nblocks; + RTreePageOpaque po; + IndexTuple itup; + BlockNumber itblkno; + OffsetNumber itoffno; + char *datum; + char *itkey; + + nblocks = RelationGetNumberOfBlocks(r); + for (blkno = 0; blkno < nblocks; blkno++) { + buf = ReadBuffer(r, blkno); + page = BufferGetPage(buf); + po = (RTreePageOpaque) PageGetSpecialPointer(page); + maxoff = PageGetMaxOffsetNumber(page); + printf("Page %d maxoff %d <%s>\n", blkno, maxoff, + (po->flags & F_LEAF ? "LEAF" : "INTERNAL")); + + if (PageIsEmpty(page)) { + ReleaseBuffer(buf); + continue; + } + + for (offnum = FirstOffsetNumber; + offnum <= maxoff; + offnum = OffsetNumberNext(offnum)) { + itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum)); + itblkno = ItemPointerGetBlockNumber(&(itup->t_tid)); + itoffno = ItemPointerGetOffsetNumber(&(itup->t_tid)); + datum = ((char *) itup); + datum += sizeof(IndexTupleData); + itkey = (char *) box_out((BOX *) datum); + printf("\t[%d] size %d heap <%d,%d> key:%s\n", + offnum, IndexTupleSize(itup), itblkno, itoffno, itkey); + pfree(itkey); + } + + ReleaseBuffer(buf); + } +} +#endif /* defined RTDEBUG */ + diff --git a/src/backend/access/rtree/rtscan.c b/src/backend/access/rtree/rtscan.c new file mode 100644 index 00000000000..aa68f0db70b --- /dev/null +++ b/src/backend/access/rtree/rtscan.c @@ -0,0 +1,392 @@ +/*------------------------------------------------------------------------- + * + * rtscan.c-- + * routines to manage scans on index relations + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" +#include "postgres.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/rtree.h" +#include "access/rtstrat.h" + +/* routines defined and used here */ +static void rtregscan(IndexScanDesc s); +static void rtdropscan(IndexScanDesc s); +static void rtadjone(IndexScanDesc s, int op, BlockNumber blkno, + OffsetNumber offnum); +static void adjuststack(RTSTACK *stk, BlockNumber blkno, + OffsetNumber offnum); +static void adjustiptr(IndexScanDesc s, ItemPointer iptr, + int op, BlockNumber blkno, OffsetNumber offnum); + +/* + * Whenever we start an rtree scan in a backend, we register it in private + * space. Then if the rtree index gets updated, we check all registered + * scans and adjust them if the tuple they point at got moved by the + * update. We only need to do this in private space, because when we update + * an rtree we have a write lock on the tree, so no other process can have + * any locks at all on it. A single transaction can have write and read + * locks on the same object, so that's why we need to handle this case. + */ + +typedef struct RTScanListData { + IndexScanDesc rtsl_scan; + struct RTScanListData *rtsl_next; +} RTScanListData; + +typedef RTScanListData *RTScanList; + +/* pointer to list of local scans on rtrees */ +static RTScanList RTScans = (RTScanList) NULL; + +IndexScanDesc +rtbeginscan(Relation r, + bool fromEnd, + uint16 nkeys, + ScanKey key) +{ + IndexScanDesc s; + + RelationSetLockForRead(r); + s = RelationGetIndexScan(r, fromEnd, nkeys, key); + rtregscan(s); + + return (s); +} + +void +rtrescan(IndexScanDesc s, bool fromEnd, ScanKey key) +{ + RTreeScanOpaque p; + RegProcedure internal_proc; + int i; + + if (!IndexScanIsValid(s)) { + elog(WARN, "rtrescan: invalid scan."); + return; + } + + /* + * Clear all the pointers. + */ + + ItemPointerSetInvalid(&s->previousItemData); + ItemPointerSetInvalid(&s->currentItemData); + ItemPointerSetInvalid(&s->nextItemData); + ItemPointerSetInvalid(&s->previousMarkData); + ItemPointerSetInvalid(&s->currentMarkData); + ItemPointerSetInvalid(&s->nextMarkData); + + /* + * Set flags. + */ + if (RelationGetNumberOfBlocks(s->relation) == 0) { + s->flags = ScanUnmarked; + } else if (fromEnd) { + s->flags = ScanUnmarked | ScanUncheckedPrevious; + } else { + s->flags = ScanUnmarked | ScanUncheckedNext; + } + + s->scanFromEnd = fromEnd; + + if (s->numberOfKeys > 0) { + memmove(s->keyData, + key, + s->numberOfKeys * sizeof(ScanKeyData)); + } + + p = (RTreeScanOpaque) s->opaque; + if (p != (RTreeScanOpaque) NULL) { + freestack(p->s_stack); + freestack(p->s_markstk); + p->s_stack = p->s_markstk = (RTSTACK *) NULL; + p->s_flags = 0x0; + } else { + /* initialize opaque data */ + p = (RTreeScanOpaque) palloc(sizeof(RTreeScanOpaqueData)); + p->s_internalKey = + (ScanKey) palloc(sizeof(ScanKeyData) * s->numberOfKeys); + p->s_stack = p->s_markstk = (RTSTACK *) NULL; + p->s_internalNKey = s->numberOfKeys; + p->s_flags = 0x0; + for (i = 0; i < s->numberOfKeys; i++) + p->s_internalKey[i].sk_argument = s->keyData[i].sk_argument; + s->opaque = p; + if (s->numberOfKeys > 0) { + + /* + * Scans on internal pages use different operators than they + * do on leaf pages. For example, if the user wants all boxes + * that exactly match (x1,y1,x2,y2), then on internal pages + * we need to find all boxes that contain (x1,y1,x2,y2). + */ + + for (i = 0; i < s->numberOfKeys; i++) { + internal_proc = RTMapOperator(s->relation, + s->keyData[i].sk_attno, + s->keyData[i].sk_procedure); + ScanKeyEntryInitialize(&(p->s_internalKey[i]), + s->keyData[i].sk_flags, + s->keyData[i].sk_attno, + internal_proc, + s->keyData[i].sk_argument); + } + } + } +} + +void +rtmarkpos(IndexScanDesc s) +{ + RTreeScanOpaque p; + RTSTACK *o, *n, *tmp; + + s->currentMarkData = s->currentItemData; + p = (RTreeScanOpaque) s->opaque; + if (p->s_flags & RTS_CURBEFORE) + p->s_flags |= RTS_MRKBEFORE; + else + p->s_flags &= ~RTS_MRKBEFORE; + + o = (RTSTACK *) NULL; + n = p->s_stack; + + /* copy the parent stack from the current item data */ + while (n != (RTSTACK *) NULL) { + tmp = (RTSTACK *) palloc(sizeof(RTSTACK)); + tmp->rts_child = n->rts_child; + tmp->rts_blk = n->rts_blk; + tmp->rts_parent = o; + o = tmp; + n = n->rts_parent; + } + + freestack(p->s_markstk); + p->s_markstk = o; +} + +void +rtrestrpos(IndexScanDesc s) +{ + RTreeScanOpaque p; + RTSTACK *o, *n, *tmp; + + s->currentItemData = s->currentMarkData; + p = (RTreeScanOpaque) s->opaque; + if (p->s_flags & RTS_MRKBEFORE) + p->s_flags |= RTS_CURBEFORE; + else + p->s_flags &= ~RTS_CURBEFORE; + + o = (RTSTACK *) NULL; + n = p->s_markstk; + + /* copy the parent stack from the current item data */ + while (n != (RTSTACK *) NULL) { + tmp = (RTSTACK *) palloc(sizeof(RTSTACK)); + tmp->rts_child = n->rts_child; + tmp->rts_blk = n->rts_blk; + tmp->rts_parent = o; + o = tmp; + n = n->rts_parent; + } + + freestack(p->s_stack); + p->s_stack = o; +} + +void +rtendscan(IndexScanDesc s) +{ + RTreeScanOpaque p; + + p = (RTreeScanOpaque) s->opaque; + + if (p != (RTreeScanOpaque) NULL) { + freestack(p->s_stack); + freestack(p->s_markstk); + } + + rtdropscan(s); + /* XXX don't unset read lock -- two-phase locking */ +} + +static void +rtregscan(IndexScanDesc s) +{ + RTScanList l; + + l = (RTScanList) palloc(sizeof(RTScanListData)); + l->rtsl_scan = s; + l->rtsl_next = RTScans; + RTScans = l; +} + +static void +rtdropscan(IndexScanDesc s) +{ + RTScanList l; + RTScanList prev; + + prev = (RTScanList) NULL; + + for (l = RTScans; + l != (RTScanList) NULL && l->rtsl_scan != s; + l = l->rtsl_next) { + prev = l; + } + + if (l == (RTScanList) NULL) + elog(WARN, "rtree scan list corrupted -- cannot find 0x%lx", s); + + if (prev == (RTScanList) NULL) + RTScans = l->rtsl_next; + else + prev->rtsl_next = l->rtsl_next; + + pfree(l); +} + +void +rtadjscans(Relation r, int op, BlockNumber blkno, OffsetNumber offnum) +{ + RTScanList l; + Oid relid; + + relid = r->rd_id; + for (l = RTScans; l != (RTScanList) NULL; l = l->rtsl_next) { + if (l->rtsl_scan->relation->rd_id == relid) + rtadjone(l->rtsl_scan, op, blkno, offnum); + } +} + +/* + * rtadjone() -- adjust one scan for update. + * + * By here, the scan passed in is on a modified relation. Op tells + * us what the modification is, and blkno and offind tell us what + * block and offset index were affected. This routine checks the + * current and marked positions, and the current and marked stacks, + * to see if any stored location needs to be changed because of the + * update. If so, we make the change here. + */ +static void +rtadjone(IndexScanDesc s, + int op, + BlockNumber blkno, + OffsetNumber offnum) +{ + RTreeScanOpaque so; + + adjustiptr(s, &(s->currentItemData), op, blkno, offnum); + adjustiptr(s, &(s->currentMarkData), op, blkno, offnum); + + so = (RTreeScanOpaque) s->opaque; + + if (op == RTOP_SPLIT) { + adjuststack(so->s_stack, blkno, offnum); + adjuststack(so->s_markstk, blkno, offnum); + } +} + +/* + * adjustiptr() -- adjust current and marked item pointers in the scan + * + * Depending on the type of update and the place it happened, we + * need to do nothing, to back up one record, or to start over on + * the same page. + */ +static void +adjustiptr(IndexScanDesc s, + ItemPointer iptr, + int op, + BlockNumber blkno, + OffsetNumber offnum) +{ + OffsetNumber curoff; + RTreeScanOpaque so; + + if (ItemPointerIsValid(iptr)) { + if (ItemPointerGetBlockNumber(iptr) == blkno) { + curoff = ItemPointerGetOffsetNumber(iptr); + so = (RTreeScanOpaque) s->opaque; + + switch (op) { + case RTOP_DEL: + /* back up one if we need to */ + if (curoff >= offnum) { + + if (curoff > FirstOffsetNumber) { + /* just adjust the item pointer */ + ItemPointerSet(iptr, blkno, OffsetNumberPrev(curoff)); + } else { + /* remember that we're before the current tuple */ + ItemPointerSet(iptr, blkno, FirstOffsetNumber); + if (iptr == &(s->currentItemData)) + so->s_flags |= RTS_CURBEFORE; + else + so->s_flags |= RTS_MRKBEFORE; + } + } + break; + + case RTOP_SPLIT: + /* back to start of page on split */ + ItemPointerSet(iptr, blkno, FirstOffsetNumber); + if (iptr == &(s->currentItemData)) + so->s_flags &= ~RTS_CURBEFORE; + else + so->s_flags &= ~RTS_MRKBEFORE; + break; + + default: + elog(WARN, "Bad operation in rtree scan adjust: %d", op); + } + } + } +} + +/* + * adjuststack() -- adjust the supplied stack for a split on a page in + * the index we're scanning. + * + * If a page on our parent stack has split, we need to back up to the + * beginning of the page and rescan it. The reason for this is that + * the split algorithm for rtrees doesn't order tuples in any useful + * way on a single page. This means on that a split, we may wind up + * looking at some heap tuples more than once. This is handled in the + * access method update code for heaps; if we've modified the tuple we + * are looking at already in this transaction, we ignore the update + * request. + */ +/*ARGSUSED*/ +static void +adjuststack(RTSTACK *stk, + BlockNumber blkno, + OffsetNumber offnum) +{ + while (stk != (RTSTACK *) NULL) { + if (stk->rts_blk == blkno) + stk->rts_child = FirstOffsetNumber; + + stk = stk->rts_parent; + } +} diff --git a/src/backend/access/rtree/rtstrat.c b/src/backend/access/rtree/rtstrat.c new file mode 100644 index 00000000000..c5d934a22a2 --- /dev/null +++ b/src/backend/access/rtree/rtstrat.c @@ -0,0 +1,239 @@ +/*------------------------------------------------------------------------- + * + * rtstrat.c-- + * strategy map data for rtrees. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtstrat.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "utils/rel.h" + +#include "storage/bufmgr.h" +#include "storage/bufpage.h" + +#include "access/istrat.h" +#include "access/rtree.h" + +/* + * Note: negate, commute, and negatecommute all assume that operators are + * ordered as follows in the strategy map: + * + * left, left-or-overlap, overlap, right-or-overlap, right, same, + * contains, contained-by + * + * The negate, commute, and negatecommute arrays are used by the planner + * to plan indexed scans over data that appears in the qualificiation in + * a boolean negation, or whose operands appear in the wrong order. For + * example, if the operator "<%" means "contains", and the user says + * + * where not rel.box <% "(10,10,20,20)"::box + * + * the planner can plan an index scan by noting that rtree indices have + * an operator in their operator class for negating <%. + * + * Similarly, if the user says something like + * + * where "(10,10,20,20)"::box <% rel.box + * + * the planner can see that the rtree index on rel.box has an operator in + * its opclass for commuting <%, and plan the scan using that operator. + * This added complexity in the access methods makes the planner a lot easier + * to write. + */ + +/* if a op b, what operator tells us if (not a op b)? */ +static StrategyNumber RTNegate[RTNStrategies] = { + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy + }; + +/* if a op_1 b, what is the operator op_2 such that b op_2 a? */ +static StrategyNumber RTCommute[RTNStrategies] = { + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy + }; + +/* if a op_1 b, what is the operator op_2 such that (b !op_2 a)? */ +static StrategyNumber RTNegateCommute[RTNStrategies] = { + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy, + InvalidStrategy + }; + +/* + * Now do the TermData arrays. These exist in case the user doesn't give + * us a full set of operators for a particular operator class. The idea + * is that by making multiple comparisons using any one of the supplied + * operators, we can decide whether two n-dimensional polygons are equal. + * For example, if a contains b and b contains a, we may conclude that + * a and b are equal. + * + * The presence of the TermData arrays in all this is a historical accident. + * Early in the development of the POSTGRES access methods, it was believed + * that writing functions was harder than writing arrays. This is wrong; + * TermData is hard to understand and hard to get right. In general, when + * someone populates a new operator class, the populate it completely. If + * Mike Hirohama had forced Cimarron Taylor to populate the strategy map + * for btree int2_ops completely in 1988, you wouldn't have to deal with + * all this now. Too bad for you. + * + * Since you can't necessarily do this in all cases (for example, you can't + * do it given only "intersects" or "disjoint"), TermData arrays for some + * operators don't appear below. + * + * Note that if you DO supply all the operators required in a given opclass + * by inserting them into the pg_opclass system catalog, you can get away + * without doing all this TermData stuff. Since the rtree code is intended + * to be a reference for access method implementors, I'm doing TermData + * correctly here. + * + * Note on style: these are all actually of type StrategyTermData, but + * since those have variable-length data at the end of the struct we can't + * properly initialize them if we declare them to be what they are. + */ + +/* if you only have "contained-by", how do you determine equality? */ +static uint16 RTContainedByTermData[] = { + 2, /* make two comparisons */ + RTContainedByStrategyNumber, /* use "a contained-by b" */ + 0x0, /* without any magic */ + RTContainedByStrategyNumber, /* then use contained-by, */ + SK_COMMUTE /* swapping a and b */ + }; + +/* if you only have "contains", how do you determine equality? */ +static uint16 RTContainsTermData[] = { + 2, /* make two comparisons */ + RTContainsStrategyNumber, /* use "a contains b" */ + 0x0, /* without any magic */ + RTContainsStrategyNumber, /* then use contains again, */ + SK_COMMUTE /* swapping a and b */ + }; + +/* now put all that together in one place for the planner */ +static StrategyTerm RTEqualExpressionData[] = { + (StrategyTerm) RTContainedByTermData, + (StrategyTerm) RTContainsTermData, + NULL + }; + +/* + * If you were sufficiently attentive to detail, you would go through + * the ExpressionData pain above for every one of the seven strategies + * we defined. I am not. Now we declare the StrategyEvaluationData + * structure that gets shipped around to help the planner and the access + * method decide what sort of scan it should do, based on (a) what the + * user asked for, (b) what operators are defined for a particular opclass, + * and (c) the reams of information we supplied above. + * + * The idea of all of this initialized data is to make life easier on the + * user when he defines a new operator class to use this access method. + * By filling in all the data, we let him get away with leaving holes in his + * operator class, and still let him use the index. The added complexity + * in the access methods just isn't worth the trouble, though. + */ + +static StrategyEvaluationData RTEvaluationData = { + RTNStrategies, /* # of strategies */ + (StrategyTransformMap) RTNegate, /* how to do (not qual) */ + (StrategyTransformMap) RTCommute, /* how to swap operands */ + (StrategyTransformMap) RTNegateCommute, /* how to do both */ + { + NULL, /* express left */ + NULL, /* express overleft */ + NULL, /* express over */ + NULL, /* express overright */ + NULL, /* express right */ + (StrategyExpression) RTEqualExpressionData, /* express same */ + NULL, /* express contains */ + NULL, /* express contained-by */ + NULL, + NULL, + NULL + } +}; + +/* + * Okay, now something peculiar to rtrees that doesn't apply to most other + * indexing structures: When we're searching a tree for a given value, we + * can't do the same sorts of comparisons on internal node entries as we + * do at leaves. The reason is that if we're looking for (say) all boxes + * that are the same as (0,0,10,10), then we need to find all leaf pages + * that overlap that region. So internally we search for overlap, and at + * the leaf we search for equality. + * + * This array maps leaf search operators to the internal search operators. + * We assume the normal ordering on operators: + * + * left, left-or-overlap, overlap, right-or-overlap, right, same, + * contains, contained-by + */ +static StrategyNumber RTOperMap[RTNStrategies] = { + RTOverLeftStrategyNumber, + RTOverLeftStrategyNumber, + RTOverlapStrategyNumber, + RTOverRightStrategyNumber, + RTOverRightStrategyNumber, + RTContainsStrategyNumber, + RTContainsStrategyNumber, + RTOverlapStrategyNumber + }; + +StrategyNumber +RelationGetRTStrategy(Relation r, + AttrNumber attnum, + RegProcedure proc) +{ + return (RelationGetStrategy(r, attnum, &RTEvaluationData, proc)); +} + +bool +RelationInvokeRTStrategy(Relation r, + AttrNumber attnum, + StrategyNumber s, + Datum left, + Datum right) +{ + return (RelationInvokeStrategy(r, &RTEvaluationData, attnum, s, + left, right)); +} + +RegProcedure +RTMapOperator(Relation r, + AttrNumber attnum, + RegProcedure proc) +{ + StrategyNumber procstrat; + StrategyMap strategyMap; + + procstrat = RelationGetRTStrategy(r, attnum, proc); + strategyMap = IndexStrategyGetStrategyMap(RelationGetIndexStrategy(r), + RTNStrategies, + attnum); + + return (strategyMap->entry[RTOperMap[procstrat - 1] - 1].sk_procedure); +} diff --git a/src/backend/access/rtscan.h b/src/backend/access/rtscan.h new file mode 100644 index 00000000000..a928303f3f3 --- /dev/null +++ b/src/backend/access/rtscan.h @@ -0,0 +1,17 @@ +/*------------------------------------------------------------------------- + * + * rtscan.h-- + * routines defined in access/rtree/rtscan.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: rtscan.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RTSCAN_H + +void rtadjscans(Relation r, int op, BlockNumber blkno, OffsetNumber offnum); + +#endif /* RTSCAN_H */ diff --git a/src/backend/access/rtstrat.h b/src/backend/access/rtstrat.h new file mode 100644 index 00000000000..5b439e7b338 --- /dev/null +++ b/src/backend/access/rtstrat.h @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * rtstrat.h-- + * routines defined in access/rtree/rtstrat.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: rtstrat.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RTSTRAT_H + +extern RegProcedure RTMapOperator(Relation r, AttrNumber attnum, + RegProcedure proc); + +#endif /* RTSTRAT_H */ diff --git a/src/backend/access/sdir.h b/src/backend/access/sdir.h new file mode 100644 index 00000000000..030007d39c9 --- /dev/null +++ b/src/backend/access/sdir.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * sdir.h-- + * POSTGRES scan direction definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: sdir.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef SDIR_H +#define SDIR_H + +#include "c.h" + +/* + * ScanDirection was an int8 for no apparent reason. I kept the original + * values because I'm not sure if I'll break anything otherwise. -ay 2/95 + */ +typedef enum ScanDirection { + BackwardScanDirection = -1, + NoMovementScanDirection = 0, + ForwardScanDirection = 1 +} ScanDirection; + +/* + * ScanDirectionIsValid -- + * True iff scan direciton is valid. + */ +#define ScanDirectionIsValid(direction) \ + ((bool) (BackwardScanDirection <= direction && \ + direction <= ForwardScanDirection)) + +/* + * ScanDirectionIsBackward -- + * True iff scan direciton is backward. + */ +#define ScanDirectionIsBackward(direction) \ + ((bool) (direction == BackwardScanDirection)) + +/* + * ScanDirectionIsNoMovement -- + * True iff scan direciton indicates no movement. + */ +#define ScanDirectionIsNoMovement(direction) \ + ((bool) (direction == NoMovementScanDirection)) + +/* + * ScanDirectionIsForward -- + * True iff scan direciton is forward. + */ +#define ScanDirectionIsForward(direction) \ + ((bool) (direction == ForwardScanDirection)) + +#endif /* SDIR_H */ diff --git a/src/backend/access/skey.h b/src/backend/access/skey.h new file mode 100644 index 00000000000..3cadf348f42 --- /dev/null +++ b/src/backend/access/skey.h @@ -0,0 +1,52 @@ +/*------------------------------------------------------------------------- + * + * skey.h-- + * POSTGRES scan key definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: skey.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + * + * Note: + * Needs more accessor/assignment routines. + *------------------------------------------------------------------------- + */ +#ifndef SKEY_H +#define SKEY_H + +#include "postgres.h" +#include "access/attnum.h" + + +typedef struct ScanKeyData { + bits16 sk_flags; /* flags */ + AttrNumber sk_attno; /* domain number */ + RegProcedure sk_procedure; /* procedure OID */ + func_ptr sk_func; + int32 sk_nargs; + Datum sk_argument; /* data to compare */ +} ScanKeyData; + +typedef ScanKeyData *ScanKey; + + +#define SK_ISNULL 0x1 +#define SK_UNARY 0x2 +#define SK_NEGATE 0x4 +#define SK_COMMUTE 0x8 + +#define ScanUnmarked 0x01 +#define ScanUncheckedPrevious 0x02 +#define ScanUncheckedNext 0x04 + + +/* + * prototypes for functions in access/common/scankey.c + */ +extern void ScanKeyEntrySetIllegal(ScanKey entry); +extern void ScanKeyEntryInitialize(ScanKey entry, bits16 flags, + AttrNumber attributeNumber, RegProcedure procedure, Datum argument); + +#endif /* SKEY_H */ diff --git a/src/backend/access/strat.h b/src/backend/access/strat.h new file mode 100644 index 00000000000..4ddb2190d88 --- /dev/null +++ b/src/backend/access/strat.h @@ -0,0 +1,86 @@ +/*------------------------------------------------------------------------- + * + * strat.h-- + * index strategy type definitions + * (separated out from original istrat.h to avoid circular refs) + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: strat.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef STRAT_H +#define STRAT_H + +#include "postgres.h" +#include "access/attnum.h" +#include "access/skey.h" + +typedef uint16 StrategyNumber; + +#define InvalidStrategy 0 + +typedef struct StrategyTransformMapData { + StrategyNumber strategy[1]; /* VARIABLE LENGTH ARRAY */ +} StrategyTransformMapData; /* VARIABLE LENGTH STRUCTURE */ + +typedef StrategyTransformMapData *StrategyTransformMap; + +typedef struct StrategyOperatorData { + StrategyNumber strategy; + bits16 flags; /* scan qualification flags h/skey.h */ +} StrategyOperatorData; + +typedef StrategyOperatorData *StrategyOperator; + +typedef struct StrategyTermData { /* conjunctive term */ + uint16 degree; + StrategyOperatorData operatorData[1]; /* VARIABLE LENGTH */ +} StrategyTermData; /* VARIABLE LENGTH STRUCTURE */ + +typedef StrategyTermData *StrategyTerm; + +typedef struct StrategyExpressionData { /* disjunctive normal form */ + StrategyTerm term[1]; /* VARIABLE LENGTH ARRAY */ +} StrategyExpressionData; /* VARIABLE LENGTH STRUCTURE */ + +typedef StrategyExpressionData *StrategyExpression; + +typedef struct StrategyEvaluationData { + StrategyNumber maxStrategy; + StrategyTransformMap negateTransform; + StrategyTransformMap commuteTransform; + StrategyTransformMap negateCommuteTransform; + StrategyExpression expression[12]; /* XXX VARIABLE LENGTH */ +} StrategyEvaluationData; /* VARIABLE LENGTH STRUCTURE */ + +typedef StrategyEvaluationData *StrategyEvaluation; + +/* + * StrategyTransformMapIsValid -- + * Returns true iff strategy transformation map is valid. + */ +#define StrategyTransformMapIsValid(transform) PointerIsValid(transform) + + +#ifndef CorrectStrategies /* XXX this should be removable */ +#define AMStrategies(foo) 12 +#else /* !defined(CorrectStrategies) */ +#define AMStrategies(foo) (foo) +#endif /* !defined(CorrectStrategies) */ + +typedef struct StrategyMapData { + ScanKeyData entry[1]; /* VARIABLE LENGTH ARRAY */ +} StrategyMapData; /* VARIABLE LENGTH STRUCTURE */ + +typedef StrategyMapData *StrategyMap; + +typedef struct IndexStrategyData { + StrategyMapData strategyMapData[1]; /* VARIABLE LENGTH ARRAY */ +} IndexStrategyData; /* VARIABLE LENGTH STRUCTURE */ + +typedef IndexStrategyData *IndexStrategy; + +#endif /*STRAT_H */ diff --git a/src/backend/access/transam.h b/src/backend/access/transam.h new file mode 100644 index 00000000000..0f5a9724dc0 --- /dev/null +++ b/src/backend/access/transam.h @@ -0,0 +1,213 @@ +/*------------------------------------------------------------------------- + * + * transam.h-- + * postgres transaction access method support code header + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: transam.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + * NOTES + * Transaction System Version 101 now support proper oid + * generation and recording in the variable relation. + * + *------------------------------------------------------------------------- + */ +#ifndef TRANSAM_H +#define TRANSAM_H + +/* ---------------- + * transaction system version id + * + * this is stored on the first page of the log, time and variable + * relations on the first 4 bytes. This is so that if we improve + * the format of the transaction log after postgres version 2, then + * people won't have to rebuild their databases. + * + * TRANS_SYSTEM_VERSION 100 means major version 1 minor version 0. + * Two databases with the same major version should be compatible, + * even if their minor versions differ. + * ---------------- + */ +#define TRANS_SYSTEM_VERSION 101 + +/* ---------------- + * transaction id status values + * + * someday we will use "11" = 3 = XID_INVALID to mean the + * starting of run-length encoded log data. + * ---------------- + */ +#define XID_COMMIT 2 /* transaction commited */ +#define XID_ABORT 1 /* transaction aborted */ +#define XID_INPROGRESS 0 /* transaction in progress */ +#define XID_INVALID 3 /* other */ + +typedef unsigned char XidStatus; /* (2 bits) */ + +/* ---------------- + * BitIndexOf computes the index of the Nth xid on a given block + * ---------------- + */ +#define BitIndexOf(N) ((N) * 2) + +/* ---------------- + * transaction page definitions + * ---------------- + */ +#define TP_DataSize BLCKSZ +#define TP_NumXidStatusPerBlock (TP_DataSize * 4) +#define TP_NumTimePerBlock (TP_DataSize / 4) + +/* ---------------- + * LogRelationContents structure + * + * This structure describes the storage of the data in the + * first 128 bytes of the log relation. This storage is never + * used for transaction status because transaction id's begin + * their numbering at 512. + * + * The first 4 bytes of this relation store the version + * number of the transction system. + * ---------------- + */ +typedef struct LogRelationContentsData { + int TransSystemVersion; +} LogRelationContentsData; + +typedef LogRelationContentsData *LogRelationContents; + +/* ---------------- + * TimeRelationContents structure + * + * This structure describes the storage of the data in the + * first 2048 bytes of the time relation. This storage is never + * used for transaction commit times because transaction id's begin + * their numbering at 512. + * + * The first 4 bytes of this relation store the version + * number of the transction system. + * ---------------- + */ +typedef struct TimeRelationContentsData { + int TransSystemVersion; +} TimeRelationContentsData; + +typedef TimeRelationContentsData *TimeRelationContents; + +/* ---------------- + * VariableRelationContents structure + * + * The variable relation is a special "relation" which + * is used to store various system "variables" persistantly. + * Unlike other relations in the system, this relation + * is updated in place whenever the variables change. + * + * The first 4 bytes of this relation store the version + * number of the transction system. + * + * Currently, the relation has only one page and the next + * available xid, the last committed xid and the next + * available oid are stored there. + * ---------------- + */ +typedef struct VariableRelationContentsData { + int TransSystemVersion; + TransactionId nextXidData; + TransactionId lastXidData; + Oid nextOid; +} VariableRelationContentsData; + +typedef VariableRelationContentsData *VariableRelationContents; + +/* ---------------- + * extern declarations + * ---------------- + */ + +/* + * prototypes for functions in transam/transam.c + */ +extern int RecoveryCheckingEnabled(); +extern void SetRecoveryCheckingEnabled(bool state); +extern bool TransactionLogTest(TransactionId transactionId, XidStatus status); +extern void TransactionLogUpdate(TransactionId transactionId, + XidStatus status); +extern AbsoluteTime TransactionIdGetCommitTime(TransactionId transactionId); +extern void TransRecover(Relation logRelation); +extern void InitializeTransactionLog(); +extern bool TransactionIdDidCommit(TransactionId transactionId); +extern bool TransactionIdDidAbort(TransactionId transactionId); +extern bool TransactionIdIsInProgress(TransactionId transactionId); +extern void TransactionIdCommit(TransactionId transactionId); +extern void TransactionIdAbort(TransactionId transactionId); +extern void TransactionIdSetInProgress(TransactionId transactionId); + +/* in transam/transsup.c */ +extern void AmiTransactionOverride(bool flag); +extern void TransComputeBlockNumber(Relation relation, + TransactionId transactionId, BlockNumber *blockNumberOutP); +extern XidStatus TransBlockGetLastTransactionIdStatus(Block tblock, + TransactionId baseXid, TransactionId *returnXidP); +extern XidStatus TransBlockGetXidStatus(Block tblock, + TransactionId transactionId); +extern void TransBlockSetXidStatus(Block tblock, + TransactionId transactionId, XidStatus xstatus); +extern AbsoluteTime TransBlockGetCommitTime(Block tblock, + TransactionId transactionId); +extern void TransBlockSetCommitTime(Block tblock, + TransactionId transactionId, AbsoluteTime commitTime); +extern XidStatus TransBlockNumberGetXidStatus(Relation relation, + BlockNumber blockNumber, TransactionId xid, bool *failP); +extern void TransBlockNumberSetXidStatus(Relation relation, + BlockNumber blockNumber, TransactionId xid, XidStatus xstatus, + bool *failP); +extern AbsoluteTime TransBlockNumberGetCommitTime(Relation relation, + BlockNumber blockNumber, TransactionId xid, bool *failP); +extern void TransBlockNumberSetCommitTime(Relation relation, + BlockNumber blockNumber, TransactionId xid, AbsoluteTime xtime, + bool *failP); +extern void TransGetLastRecordedTransaction(Relation relation, + TransactionId xid, bool *failP); + +/* in transam/varsup.c */ +extern void VariableRelationGetNextXid(TransactionId *xidP); +extern void VariableRelationGetLastXid(TransactionId *xidP); +extern void VariableRelationPutNextXid(TransactionId xid); +extern void VariableRelationPutLastXid(TransactionId xid); +extern void VariableRelationGetNextOid(Oid *oid_return); +extern void VariableRelationPutNextOid(Oid *oidP); +extern void GetNewTransactionId(TransactionId *xid); +extern void UpdateLastCommittedXid(TransactionId xid); +extern void GetNewObjectIdBlock(Oid *oid_return, int oid_block_size); +extern void GetNewObjectId(Oid *oid_return); + +/* ---------------- + * global variable extern declarations + * ---------------- + */ + +/* in transam.c */ +extern Relation LogRelation; +extern Relation TimeRelation; +extern Relation VariableRelation; + +extern TransactionId cachedGetCommitTimeXid; +extern AbsoluteTime cachedGetCommitTime; +extern TransactionId cachedTestXid; +extern XidStatus cachedTestXidStatus; + +extern TransactionId NullTransactionId; +extern TransactionId AmiTransactionId; +extern TransactionId FirstTransactionId; + +extern int RecoveryCheckingEnableState; + +/* in transsup.c */ +extern bool AMI_OVERRIDE; + +/* in varsup.c */ +extern int OidGenLockId; + +#endif /* TRAMSAM_H */ diff --git a/src/backend/access/transam/Makefile.inc b/src/backend/access/transam/Makefile.inc new file mode 100644 index 00000000000..c4f5b95a0ae --- /dev/null +++ b/src/backend/access/transam/Makefile.inc @@ -0,0 +1,14 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for access/transam +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= transam.c transsup.c varsup.c xact.c xid.c diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c new file mode 100644 index 00000000000..b3789a8c2c5 --- /dev/null +++ b/src/backend/access/transam/transam.c @@ -0,0 +1,675 @@ +/*------------------------------------------------------------------------- + * + * transam.c-- + * postgres transaction log/time interface routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/transam/transam.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + * NOTES + * This file contains the high level access-method interface to the + * transaction system. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "machine.h" /* in port/ directory (needed for BLCKSZ) */ + +#include "access/heapam.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" + +#include "utils/memutils.h" +#include "utils/mcxt.h" +#include "utils/rel.h" +#include "utils/elog.h" + +#include "utils/nabstime.h" +#include "catalog/catname.h" + +#include "access/transam.h" +#include "access/xact.h" +#include "commands/vacuum.h" /* for VacuumRunning */ + +/* ---------------- + * global variables holding pointers to relations used + * by the transaction system. These are initialized by + * InitializeTransactionLog(). + * ---------------- + */ + +Relation LogRelation = (Relation) NULL; +Relation TimeRelation = (Relation) NULL; +Relation VariableRelation = (Relation) NULL; + +/* ---------------- + * global variables holding cached transaction id's and statuses. + * ---------------- + */ +TransactionId cachedGetCommitTimeXid; +AbsoluteTime cachedGetCommitTime; +TransactionId cachedTestXid; +XidStatus cachedTestXidStatus; + +/* ---------------- + * transaction system constants + * ---------------- + */ +/* ---------------------------------------------------------------- + * transaction system constants + * + * read the comments for GetNewTransactionId in order to + * understand the initial values for AmiTransactionId and + * FirstTransactionId. -cim 3/23/90 + * ---------------------------------------------------------------- + */ +TransactionId NullTransactionId = (TransactionId) 0; + +TransactionId AmiTransactionId = (TransactionId) 512; + +TransactionId FirstTransactionId = (TransactionId) 514; + +/* ---------------- + * transaction recovery state variables + * + * When the transaction system is initialized, we may + * need to do recovery checking. This decision is decided + * by the postmaster or the user by supplying the backend + * with a special flag. In general, we want to do recovery + * checking whenever we are running without a postmaster + * or when the number of backends running under the postmaster + * goes from zero to one. -cim 3/21/90 + * ---------------- + */ +int RecoveryCheckingEnableState = 0; + +/* ------------------ + * spinlock for oid generation + * ----------------- + */ +extern int OidGenLockId; + +/* ---------------- + * globals that must be reset at abort + * ---------------- + */ +extern bool BuildingBtree; + + +/* ---------------- + * recovery checking accessors + * ---------------- + */ +int +RecoveryCheckingEnabled() +{ + return RecoveryCheckingEnableState; +} + +void +SetRecoveryCheckingEnabled(bool state) +{ + RecoveryCheckingEnableState = (state == true); +} + +/* ---------------------------------------------------------------- + * postgres log/time access method interface + * + * TransactionLogTest + * TransactionLogUpdate + * ======== + * these functions do work for the interface + * functions - they search/retrieve and append/update + * information in the log and time relations. + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * TransactionLogTest + * -------------------------------- + */ + +bool /* true/false: does transaction id have specified status? */ +TransactionLogTest(TransactionId transactionId, /* transaction id to test */ + XidStatus status) /* transaction status */ +{ + BlockNumber blockNumber; + XidStatus xidstatus; /* recorded status of xid */ + bool fail = false; /* success/failure */ + + /* ---------------- + * during initialization consider all transactions + * as having been committed + * ---------------- + */ + if (! RelationIsValid(LogRelation)) + return (bool) (status == XID_COMMIT); + + /* ---------------- + * before going to the buffer manager, check our single + * item cache to see if we didn't just check the transaction + * status a moment ago. + * ---------------- + */ + if (TransactionIdEquals(transactionId, cachedTestXid)) + return (bool) + (status == cachedTestXidStatus); + + /* ---------------- + * compute the item pointer corresponding to the + * page containing our transaction id. We save the item in + * our cache to speed up things if we happen to ask for the + * same xid's status more than once. + * ---------------- + */ + TransComputeBlockNumber(LogRelation, transactionId, &blockNumber); + xidstatus = TransBlockNumberGetXidStatus(LogRelation, + blockNumber, + transactionId, + &fail); + + if (! fail) { + TransactionIdStore(transactionId, &cachedTestXid); + cachedTestXidStatus = xidstatus; + return (bool) + (status == xidstatus); + } + + /* ---------------- + * here the block didn't contain the information we wanted + * ---------------- + */ + elog(WARN, "TransactionLogTest: failed to get xidstatus"); + + /* + * so lint is happy... + */ + return(false); +} + +/* -------------------------------- + * TransactionLogUpdate + * -------------------------------- + */ +void +TransactionLogUpdate(TransactionId transactionId, /* trans id to update */ + XidStatus status) /* new trans status */ +{ + BlockNumber blockNumber; + bool fail = false; /* success/failure */ + AbsoluteTime currentTime; /* time of this transaction */ + + /* ---------------- + * during initialization we don't record any updates. + * ---------------- + */ + if (! RelationIsValid(LogRelation)) + return; + + /* ---------------- + * get the transaction commit time + * ---------------- + */ + currentTime = getSystemTime(); + + /* ---------------- + * update the log relation + * ---------------- + */ + TransComputeBlockNumber(LogRelation, transactionId, &blockNumber); + TransBlockNumberSetXidStatus(LogRelation, + blockNumber, + transactionId, + status, + &fail); + + /* ---------------- + * update (invalidate) our single item TransactionLogTest cache. + * ---------------- + */ + TransactionIdStore(transactionId, &cachedTestXid); + cachedTestXidStatus = status; + + /* ---------------- + * now we update the time relation, if necessary + * (we only record commit times) + * ---------------- + */ + if (RelationIsValid(TimeRelation) && status == XID_COMMIT) { + TransComputeBlockNumber(TimeRelation, transactionId, &blockNumber); + TransBlockNumberSetCommitTime(TimeRelation, + blockNumber, + transactionId, + currentTime, + &fail); + /* ---------------- + * update (invalidate) our single item GetCommitTime cache. + * ---------------- + */ + TransactionIdStore(transactionId, &cachedGetCommitTimeXid); + cachedGetCommitTime = currentTime; + } + + /* ---------------- + * now we update the "last committed transaction" field + * in the variable relation if we are recording a commit. + * ---------------- + */ + if (RelationIsValid(VariableRelation) && status == XID_COMMIT) + UpdateLastCommittedXid(transactionId); +} + +/* -------------------------------- + * TransactionIdGetCommitTime + * -------------------------------- + */ + +AbsoluteTime /* commit time of transaction id */ +TransactionIdGetCommitTime(TransactionId transactionId) /* transaction id to test */ +{ + BlockNumber blockNumber; + AbsoluteTime commitTime; /* commit time */ + bool fail = false; /* success/failure */ + + /* ---------------- + * return invalid if we aren't running yet... + * ---------------- + */ + if (! RelationIsValid(TimeRelation)) + return INVALID_ABSTIME; + + /* ---------------- + * before going to the buffer manager, check our single + * item cache to see if we didn't just get the commit time + * a moment ago. + * ---------------- + */ + if (TransactionIdEquals(transactionId, cachedGetCommitTimeXid)) + return cachedGetCommitTime; + + /* ---------------- + * compute the item pointer corresponding to the + * page containing our transaction commit time + * ---------------- + */ + TransComputeBlockNumber(TimeRelation, transactionId, &blockNumber); + commitTime = TransBlockNumberGetCommitTime(TimeRelation, + blockNumber, + transactionId, + &fail); + + /* ---------------- + * update our cache and return the transaction commit time + * ---------------- + */ + if (! fail) { + TransactionIdStore(transactionId, &cachedGetCommitTimeXid); + cachedGetCommitTime = commitTime; + return commitTime; + } else + return INVALID_ABSTIME; +} + +/* ---------------------------------------------------------------- + * transaction recovery code + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * TransRecover + * + * preform transaction recovery checking. + * + * Note: this should only be preformed if no other backends + * are running. This is known by the postmaster and + * conveyed by the postmaster passing a "do recovery checking" + * flag to the backend. + * + * here we get the last recorded transaction from the log, + * get the "last" and "next" transactions from the variable relation + * and then preform some integrity tests: + * + * 1) No transaction may exist higher then the "next" available + * transaction recorded in the variable relation. If this is the + * case then it means either the log or the variable relation + * has become corrupted. + * + * 2) The last committed transaction may not be higher then the + * next available transaction for the same reason. + * + * 3) The last recorded transaction may not be lower then the + * last committed transaction. (the reverse is ok - it means + * that some transactions have aborted since the last commit) + * + * Here is what the proper situation looks like. The line + * represents the data stored in the log. 'c' indicates the + * transaction was recorded as committed, 'a' indicates an + * abortted transaction and '.' represents information not + * recorded. These may correspond to in progress transactions. + * + * c c a c . . a . . . . . . . . . . + * | | + * last next + * + * Since "next" is only incremented by GetNewTransactionId() which + * is called when transactions are started. Hence if there + * are commits or aborts after "next", then it means we committed + * or aborted BEFORE we started the transaction. This is the + * rational behind constraint (1). + * + * Likewise, "last" should never greater then "next" for essentially + * the same reason - it would imply we committed before we started. + * This is the reasoning for (2). + * + * (3) implies we may never have a situation such as: + * + * c c a c . . a c . . . . . . . . . + * | | + * last next + * + * where there is a 'c' greater then "last". + * + * Recovery checking is more difficult in the case where + * several backends are executing concurrently because the + * transactions may be executing in the other backends. + * So, we only do recovery stuff when the backend is explicitly + * passed a flag on the command line. + * -------------------------------- + */ +void +TransRecover(Relation logRelation) +{ +#if 0 + /* ---------------- + * first get the last recorded transaction in the log. + * ---------------- + */ + TransGetLastRecordedTransaction(logRelation, logLastXid, &fail); + if (fail == true) + elog(WARN, "TransRecover: failed TransGetLastRecordedTransaction"); + + /* ---------------- + * next get the "last" and "next" variables + * ---------------- + */ + VariableRelationGetLastXid(&varLastXid); + VariableRelationGetNextXid(&varNextXid); + + /* ---------------- + * intregity test (1) + * ---------------- + */ + if (TransactionIdIsLessThan(varNextXid, logLastXid)) + elog(WARN, "TransRecover: varNextXid < logLastXid"); + + /* ---------------- + * intregity test (2) + * ---------------- + */ + + /* ---------------- + * intregity test (3) + * ---------------- + */ + + /* ---------------- + * here we have a valid " + * + * **** RESUME HERE **** + * ---------------- + */ + varNextXid = TransactionIdDup(varLastXid); + TransactionIdIncrement(&varNextXid); + + VarPut(var, VAR_PUT_LASTXID, varLastXid); + VarPut(var, VAR_PUT_NEXTXID, varNextXid); +#endif +} + +/* ---------------------------------------------------------------- + * Interface functions + * + * InitializeTransactionLog + * ======== + * this function (called near cinit) initializes + * the transaction log, time and variable relations. + * + * TransactionId DidCommit + * TransactionId DidAbort + * TransactionId IsInProgress + * ======== + * these functions test the transaction status of + * a specified transaction id. + * + * TransactionId Commit + * TransactionId Abort + * TransactionId SetInProgress + * ======== + * these functions set the transaction status + * of the specified xid. TransactionIdCommit() also + * records the current time in the time relation + * and updates the variable relation counter. + * + * ---------------------------------------------------------------- + */ + +/* + * InitializeTransactionLog -- + * Initializes transaction logging. + */ +void +InitializeTransactionLog() +{ + Relation logRelation; + Relation timeRelation; + MemoryContext oldContext; + + /* ---------------- + * don't do anything during bootstrapping + * ---------------- + */ + if (AMI_OVERRIDE) + return; + + /* ---------------- + * disable the transaction system so the access methods + * don't interfere during initialization. + * ---------------- + */ + OverrideTransactionSystem(true); + + /* ---------------- + * make sure allocations occur within the top memory context + * so that our log management structures are protected from + * garbage collection at the end of every transaction. + * ---------------- + */ + oldContext = MemoryContextSwitchTo(TopMemoryContext); + + /* ---------------- + * first open the log and time relations + * (these are created by amiint so they are guaranteed to exist) + * ---------------- + */ + logRelation = heap_openr(LogRelationName); + timeRelation = heap_openr(TimeRelationName); + VariableRelation = heap_openr(VariableRelationName); + /* ---------------- + * XXX TransactionLogUpdate requires that LogRelation + * and TimeRelation are valid so we temporarily set + * them so we can initialize things properly. + * This could be done cleaner. + * ---------------- + */ + LogRelation = logRelation; + TimeRelation = timeRelation; + + /* ---------------- + * if we have a virgin database, we initialize the log and time + * relation by committing the AmiTransactionId (id 512) and we + * initialize the variable relation by setting the next available + * transaction id to FirstTransactionId (id 514). OID initialization + * happens as a side effect of bootstrapping in varsup.c. + * ---------------- + */ + SpinAcquire(OidGenLockId); + if (!TransactionIdDidCommit(AmiTransactionId)) { + + /* ---------------- + * SOMEDAY initialize the information stored in + * the headers of the log/time/variable relations. + * ---------------- + */ + TransactionLogUpdate(AmiTransactionId, XID_COMMIT); + VariableRelationPutNextXid(FirstTransactionId); + + } else if (RecoveryCheckingEnabled()) { + /* ---------------- + * if we have a pre-initialized database and if the + * perform recovery checking flag was passed then we + * do our database integrity checking. + * ---------------- + */ + TransRecover(logRelation); + } + LogRelation = (Relation) NULL; + TimeRelation = (Relation) NULL; + SpinRelease(OidGenLockId); + + /* ---------------- + * now re-enable the transaction system + * ---------------- + */ + OverrideTransactionSystem(false); + + /* ---------------- + * instantiate the global variables + * ---------------- + */ + LogRelation = logRelation; + TimeRelation = timeRelation; + + /* ---------------- + * restore the memory context to the previous context + * before we return from initialization. + * ---------------- + */ + MemoryContextSwitchTo(oldContext); +} + +/* -------------------------------- + * TransactionId DidCommit + * TransactionId DidAbort + * TransactionId IsInProgress + * -------------------------------- + */ + +/* + * TransactionIdDidCommit -- + * True iff transaction associated with the identifier did commit. + * + * Note: + * Assumes transaction identifier is valid. + */ +bool /* true if given transaction committed */ +TransactionIdDidCommit(TransactionId transactionId) +{ + if (AMI_OVERRIDE) + return true; + + return + TransactionLogTest(transactionId, XID_COMMIT); +} + +/* + * TransactionIdDidAborted -- + * True iff transaction associated with the identifier did abort. + * + * Note: + * Assumes transaction identifier is valid. + * XXX Is this unneeded? + */ +bool /* true if given transaction aborted */ +TransactionIdDidAbort(TransactionId transactionId) +{ + if (AMI_OVERRIDE) + return false; + + return + TransactionLogTest(transactionId, XID_ABORT); +} + +bool /* true if given transaction neither committed nor aborted */ +TransactionIdIsInProgress(TransactionId transactionId) +{ + if (AMI_OVERRIDE) + return false; + + return + TransactionLogTest(transactionId, XID_INPROGRESS); +} + +/* -------------------------------- + * TransactionId Commit + * TransactionId Abort + * TransactionId SetInProgress + * -------------------------------- + */ + +/* + * TransactionIdCommit -- + * Commits the transaction associated with the identifier. + * + * Note: + * Assumes transaction identifier is valid. + */ +void +TransactionIdCommit(TransactionId transactionId) +{ + if (AMI_OVERRIDE) + return; + + /* + * Within TransactionLogUpdate we call UpdateLastCommited() + * which assumes we have exclusive access to pg_variable. + * Therefore we need to get exclusive access before calling + * TransactionLogUpdate. -mer 18 Aug 1992 + */ + SpinAcquire(OidGenLockId); + TransactionLogUpdate(transactionId, XID_COMMIT); + SpinRelease(OidGenLockId); +} + +/* + * TransactionIdAbort -- + * Aborts the transaction associated with the identifier. + * + * Note: + * Assumes transaction identifier is valid. + */ +void +TransactionIdAbort(TransactionId transactionId) +{ + BuildingBtree = false; + + if (VacuumRunning) + vc_abort(); + + if (AMI_OVERRIDE) + return; + + TransactionLogUpdate(transactionId, XID_ABORT); +} + +void +TransactionIdSetInProgress(TransactionId transactionId) +{ + if (AMI_OVERRIDE) + return; + + TransactionLogUpdate(transactionId, XID_INPROGRESS); +} diff --git a/src/backend/access/transam/transsup.c b/src/backend/access/transam/transsup.c new file mode 100644 index 00000000000..a1e5b17ec13 --- /dev/null +++ b/src/backend/access/transam/transsup.c @@ -0,0 +1,663 @@ +/*------------------------------------------------------------------------- + * + * transsup.c-- + * postgres transaction access method support code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/transsup.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + * NOTES + * This file contains support functions for the high + * level access method interface routines found in transam.c + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "machine.h" /* in port/ directory (needed for BLCKSZ) */ + +#include "storage/buf.h" +#include "storage/bufmgr.h" + +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/memutils.h" +#include "utils/nabstime.h" + +#include "catalog/heap.h" +#include "access/transam.h" /* where the declarations go */ +#include "access/xact.h" /* where the declarations go */ + +#include "storage/smgr.h" + +/* ---------------------------------------------------------------- + * general support routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * AmiTransactionOverride + * + * This function is used to manipulate the bootstrap flag. + * -------------------------------- + */ +void +AmiTransactionOverride(bool flag) +{ + AMI_OVERRIDE = flag; +} + +/* -------------------------------- + * TransComputeBlockNumber + * -------------------------------- + */ +void +TransComputeBlockNumber(Relation relation, /* relation to test */ + TransactionId transactionId, /* transaction id to test */ + BlockNumber *blockNumberOutP) +{ + long itemsPerBlock; + + /* ---------------- + * we calculate the block number of our transaction + * by dividing the transaction id by the number of + * transaction things per block. + * ---------------- + */ + if (relation == LogRelation) + itemsPerBlock = TP_NumXidStatusPerBlock; + else if (relation == TimeRelation) + itemsPerBlock = TP_NumTimePerBlock; + else + elog(WARN, "TransComputeBlockNumber: unknown relation"); + + /* ---------------- + * warning! if the transaction id's get too large + * then a BlockNumber may not be large enough to hold the results + * of our division. + * + * XXX this will all vanish soon when we implement an improved + * transaction id schema -cim 3/23/90 + * + * This has vanished now that xid's are 4 bytes (no longer 5). + * -mer 5/24/92 + * ---------------- + */ + (*blockNumberOutP) = transactionId / itemsPerBlock; +} + + +/* ---------------------------------------------------------------- + * trans block support routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * TransBlockGetLastTransactionIdStatus + * + * This returns the status and transaction id of the last + * transaction information recorded on the given TransBlock. + * -------------------------------- + */ + +XidStatus +TransBlockGetLastTransactionIdStatus(Block tblock, + TransactionId baseXid, + TransactionId *returnXidP) +{ + Index index; + Index maxIndex; + bits8 bit1; + bits8 bit2; + BitIndex offset; + XidStatus xstatus; + + /* ---------------- + * sanity check + * ---------------- + */ + Assert((tblock != NULL)); + + /* ---------------- + * search downward from the top of the block data, looking + * for the first Non-in progress transaction status. Since we + * are scanning backward, this will be last recorded transaction + * status on the block. + * ---------------- + */ + maxIndex = TP_NumXidStatusPerBlock; + for (index = maxIndex-1; index>=0; index--) { + offset = BitIndexOf(index); + bit1 = ((bits8) BitArrayBitIsSet((BitArray) tblock, offset++)) << 1; + bit2 = (bits8) BitArrayBitIsSet((BitArray) tblock, offset); + + xstatus = (bit1 | bit2) ; + + /* ---------------- + * here we have the status of some transaction, so test + * if the status is recorded as "in progress". If so, then + * we save the transaction id in the place specified by the caller. + * ---------------- + */ + if (xstatus != XID_INPROGRESS) { + if (returnXidP != NULL) { + TransactionIdStore(baseXid, returnXidP); + TransactionIdAdd(returnXidP, index); + } + break; + } + } + + /* ---------------- + * if we get here and index is 0 it means we couldn't find + * a non-inprogress transaction on the block. For now we just + * return this info to the user. They can check if the return + * status is "in progress" to know this condition has arisen. + * ---------------- + */ + if (index == 0) { + if (returnXidP != NULL) + TransactionIdStore(baseXid, returnXidP); + } + + /* ---------------- + * return the status to the user + * ---------------- + */ + return xstatus; +} + +/* -------------------------------- + * TransBlockGetXidStatus + * + * This returns the status of the desired transaction + * -------------------------------- + */ + +XidStatus +TransBlockGetXidStatus(Block tblock, + TransactionId transactionId) +{ + Index index; + bits8 bit1; + bits8 bit2; + BitIndex offset; + + /* ---------------- + * sanity check + * ---------------- + */ + if (tblock == NULL) { + return XID_INVALID; + } + + /* ---------------- + * calculate the index into the transaction data where + * our transaction status is located + * + * XXX this will be replaced soon when we move to the + * new transaction id scheme -cim 3/23/90 + * + * The old system has now been replaced. -mer 5/24/92 + * ---------------- + */ + index = transactionId % TP_NumXidStatusPerBlock; + + /* ---------------- + * get the data at the specified index + * ---------------- + */ + offset = BitIndexOf(index); + bit1 = ((bits8) BitArrayBitIsSet((BitArray) tblock, offset++)) << 1; + bit2 = (bits8) BitArrayBitIsSet((BitArray) tblock, offset); + + /* ---------------- + * return the transaction status to the caller + * ---------------- + */ + return (XidStatus) + (bit1 | bit2); +} + +/* -------------------------------- + * TransBlockSetXidStatus + * + * This sets the status of the desired transaction + * -------------------------------- + */ +void +TransBlockSetXidStatus(Block tblock, + TransactionId transactionId, + XidStatus xstatus) +{ + Index index; + BitIndex offset; + + /* ---------------- + * sanity check + * ---------------- + */ + if (tblock == NULL) + return; + + /* ---------------- + * calculate the index into the transaction data where + * we sould store our transaction status. + * + * XXX this will be replaced soon when we move to the + * new transaction id scheme -cim 3/23/90 + * + * The new scheme is here -mer 5/24/92 + * ---------------- + */ + index = transactionId % TP_NumXidStatusPerBlock; + + offset = BitIndexOf(index); + + /* ---------------- + * store the transaction value at the specified offset + * ---------------- + */ + switch(xstatus) { + case XID_COMMIT: /* set 10 */ + BitArraySetBit((BitArray) tblock, offset); + BitArrayClearBit((BitArray) tblock, offset + 1); + break; + case XID_ABORT: /* set 01 */ + BitArrayClearBit((BitArray) tblock, offset); + BitArraySetBit((BitArray) tblock, offset + 1); + break; + case XID_INPROGRESS: /* set 00 */ + BitArrayClearBit((BitArray) tblock, offset); + BitArrayClearBit((BitArray) tblock, offset + 1); + break; + default: + elog(NOTICE, + "TransBlockSetXidStatus: invalid status: %d (ignored)", + xstatus); + break; + } +} + +/* -------------------------------- + * TransBlockGetCommitTime + * + * This returns the transaction commit time for the + * specified transaction id in the trans block. + * -------------------------------- + */ +AbsoluteTime +TransBlockGetCommitTime(Block tblock, + TransactionId transactionId) +{ + Index index; + AbsoluteTime *timeArray; + + /* ---------------- + * sanity check + * ---------------- + */ + if (tblock == NULL) + return INVALID_ABSTIME; + + /* ---------------- + * calculate the index into the transaction data where + * our transaction commit time is located + * + * XXX this will be replaced soon when we move to the + * new transaction id scheme -cim 3/23/90 + * + * The new scheme is here. -mer 5/24/92 + * ---------------- + */ + index = transactionId % TP_NumTimePerBlock; + + /* ---------------- + * return the commit time to the caller + * ---------------- + */ + timeArray = (AbsoluteTime *) tblock; + return (AbsoluteTime) + timeArray[ index ]; +} + +/* -------------------------------- + * TransBlockSetCommitTime + * + * This sets the commit time of the specified transaction + * -------------------------------- + */ +void +TransBlockSetCommitTime(Block tblock, + TransactionId transactionId, + AbsoluteTime commitTime) +{ + Index index; + AbsoluteTime *timeArray; + + /* ---------------- + * sanity check + * ---------------- + */ + if (tblock == NULL) + return; + + + /* ---------------- + * calculate the index into the transaction data where + * we sould store our transaction status. + * + * XXX this will be replaced soon when we move to the + * new transaction id scheme -cim 3/23/90 + * + * The new scheme is here. -mer 5/24/92 + * ---------------- + */ + index = transactionId % TP_NumTimePerBlock; + + /* ---------------- + * store the transaction commit time at the specified index + * ---------------- + */ + timeArray = (AbsoluteTime *) tblock; + timeArray[ index ] = commitTime; +} + +/* ---------------------------------------------------------------- + * transam i/o support routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * TransBlockNumberGetXidStatus + * -------------------------------- + */ +XidStatus +TransBlockNumberGetXidStatus(Relation relation, + BlockNumber blockNumber, + TransactionId xid, + bool *failP) +{ + Buffer buffer; /* buffer associated with block */ + Block block; /* block containing xstatus */ + XidStatus xstatus; /* recorded status of xid */ + bool localfail; /* bool used if failP = NULL */ + + /* ---------------- + * SOMEDAY place a read lock on the log relation + * That someday is today 5 Aug 1991 -mer + * ---------------- + */ + RelationSetLockForRead(relation); + + /* ---------------- + * get the page containing the transaction information + * ---------------- + */ + buffer = ReadBuffer(relation, blockNumber); + block = BufferGetBlock(buffer); + + /* ---------------- + * get the status from the block. note, for now we always + * return false in failP. + * ---------------- + */ + if (failP == NULL) + failP = &localfail; + (*failP) = false; + + xstatus = TransBlockGetXidStatus(block, xid); + + /* ---------------- + * release the buffer and return the status + * ---------------- + */ + ReleaseBuffer(buffer); + + /* ---------------- + * SOMEDAY release our lock on the log relation + * ---------------- + */ + RelationUnsetLockForRead(relation); + + return + xstatus; +} + +/* -------------------------------- + * TransBlockNumberSetXidStatus + * -------------------------------- + */ +void +TransBlockNumberSetXidStatus(Relation relation, + BlockNumber blockNumber, + TransactionId xid, + XidStatus xstatus, + bool *failP) +{ + Buffer buffer; /* buffer associated with block */ + Block block; /* block containing xstatus */ + bool localfail; /* bool used if failP = NULL */ + + /* ---------------- + * SOMEDAY gain exclusive access to the log relation + * + * That someday is today 5 Aug 1991 -mer + * ---------------- + */ + RelationSetLockForWrite(relation); + + /* ---------------- + * get the block containing the transaction status + * ---------------- + */ + buffer = ReadBuffer(relation, blockNumber); + block = BufferGetBlock(buffer); + + /* ---------------- + * attempt to update the status of the transaction on the block. + * if we are successful, write the block. otherwise release the buffer. + * note, for now we always return false in failP. + * ---------------- + */ + if (failP == NULL) + failP = &localfail; + (*failP) = false; + + TransBlockSetXidStatus(block, xid, xstatus); + + if ((*failP) == false) + WriteBuffer(buffer); + else + ReleaseBuffer(buffer); + + /* ---------------- + * SOMEDAY release our lock on the log relation + * ---------------- + */ + RelationUnsetLockForWrite(relation); +} + +/* -------------------------------- + * TransBlockNumberGetCommitTime + * -------------------------------- + */ +AbsoluteTime +TransBlockNumberGetCommitTime(Relation relation, + BlockNumber blockNumber, + TransactionId xid, + bool *failP) +{ + Buffer buffer; /* buffer associated with block */ + Block block; /* block containing commit time */ + bool localfail; /* bool used if failP = NULL */ + AbsoluteTime xtime; /* commit time */ + + /* ---------------- + * SOMEDAY place a read lock on the time relation + * + * That someday is today 5 Aug. 1991 -mer + * ---------------- + */ + RelationSetLockForRead(relation); + + /* ---------------- + * get the block containing the transaction information + * ---------------- + */ + buffer = ReadBuffer(relation, blockNumber); + block = BufferGetBlock(buffer); + + /* ---------------- + * get the commit time from the block + * note, for now we always return false in failP. + * ---------------- + */ + if (failP == NULL) + failP = &localfail; + (*failP) = false; + + xtime = TransBlockGetCommitTime(block, xid); + + /* ---------------- + * release the buffer and return the commit time + * ---------------- + */ + ReleaseBuffer(buffer); + + /* ---------------- + * SOMEDAY release our lock on the time relation + * ---------------- + */ + RelationUnsetLockForRead(relation); + + if ((*failP) == false) + return xtime; + else + return INVALID_ABSTIME; + +} + +/* -------------------------------- + * TransBlockNumberSetCommitTime + * -------------------------------- + */ +void +TransBlockNumberSetCommitTime(Relation relation, + BlockNumber blockNumber, + TransactionId xid, + AbsoluteTime xtime, + bool *failP) +{ + Buffer buffer; /* buffer associated with block */ + Block block; /* block containing commit time */ + bool localfail; /* bool used if failP = NULL */ + + /* ---------------- + * SOMEDAY gain exclusive access to the time relation + * + * That someday is today 5 Aug. 1991 -mer + * ---------------- + */ + RelationSetLockForWrite(relation); + + /* ---------------- + * get the block containing our commit time + * ---------------- + */ + buffer = ReadBuffer(relation, blockNumber); + block = BufferGetBlock(buffer); + + /* ---------------- + * attempt to update the commit time of the transaction on the block. + * if we are successful, write the block. otherwise release the buffer. + * note, for now we always return false in failP. + * ---------------- + */ + if (failP == NULL) + failP = &localfail; + (*failP) = false; + + TransBlockSetCommitTime(block, xid, xtime); + + if ((*failP) == false) + WriteBuffer(buffer); + else + ReleaseBuffer(buffer); + + /* ---------------- + * SOMEDAY release our lock on the time relation + * ---------------- + */ + RelationUnsetLockForWrite(relation); + +} + +/* -------------------------------- + * TransGetLastRecordedTransaction + * -------------------------------- + */ +void +TransGetLastRecordedTransaction(Relation relation, + TransactionId xid, /* return: transaction id */ + bool *failP) +{ + BlockNumber blockNumber; /* block number */ + Buffer buffer; /* buffer associated with block */ + Block block; /* block containing xid status */ + BlockNumber n; /* number of blocks in the relation */ + TransactionId baseXid; + + (*failP) = false; + + /* ---------------- + * SOMEDAY gain exclusive access to the log relation + * + * That someday is today 5 Aug. 1991 -mer + * It looks to me like we only need to set a read lock here, despite + * the above comment about exclusive access. The block is never + * actually written into, we only check status bits. + * ---------------- + */ + RelationSetLockForRead(relation); + + /* ---------------- + * we assume the last block of the log contains the last + * recorded transaction. If the relation is empty we return + * failure to the user. + * ---------------- + */ + n = RelationGetNumberOfBlocks(relation); + if (n == 0) { + (*failP) = true; + return; + } + + /* ---------------- + * get the block containing the transaction information + * ---------------- + */ + blockNumber = n-1; + buffer = ReadBuffer(relation, blockNumber); + block = BufferGetBlock(buffer); + + /* ---------------- + * get the last xid on the block + * ---------------- + */ + baseXid = blockNumber * TP_NumXidStatusPerBlock; + +/* XXX ???? xid won't get returned! - AY '94 */ + (void) TransBlockGetLastTransactionIdStatus(block, baseXid, &xid); + + ReleaseBuffer(buffer); + + /* ---------------- + * SOMEDAY release our lock on the log relation + * ---------------- + */ + RelationUnsetLockForRead(relation); +} diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c new file mode 100644 index 00000000000..a53cc7d35b1 --- /dev/null +++ b/src/backend/access/transam/varsup.c @@ -0,0 +1,606 @@ +/*------------------------------------------------------------------------- + * + * varsup.c-- + * postgres variable relation support routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/transam/varsup.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "machine.h" /* in port/ directory (needed for BLCKSZ) */ +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/ipc.h" /* for OIDGENLOCKID */ + +#include "utils/rel.h" +#include "utils/elog.h" + +#include "access/heapam.h" +#include "access/transam.h" /* where the declarations go */ +#include "access/xact.h" /* where the declarations go */ + +#include "catalog/catname.h" + +/* ---------- + * note: we reserve the first 16384 object ids for internal use. + * oid's less than this appear in the .bki files. the choice of + * 16384 is completely arbitrary. + * ---------- + */ +#define BootstrapObjectIdData 16384 + +/* --------------------- + * spin lock for oid generation + * --------------------- + */ +int OidGenLockId; + +/* ---------------------------------------------------------------- + * variable relation query/update routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * VariableRelationGetNextXid + * -------------------------------- + */ +void +VariableRelationGetNextXid(TransactionId *xidP) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * do nothing before things are initialized + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + return; + + /* ---------------- + * read the variable page, get the the nextXid field and + * release the buffer + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationGetNextXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + TransactionIdStore(var->nextXidData, xidP); + ReleaseBuffer(buf); +} + +/* -------------------------------- + * VariableRelationGetLastXid + * -------------------------------- + */ +void +VariableRelationGetLastXid(TransactionId *xidP) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * do nothing before things are initialized + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + return; + + /* ---------------- + * read the variable page, get the the lastXid field and + * release the buffer + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationGetNextXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + TransactionIdStore(var->lastXidData, xidP); + + ReleaseBuffer(buf); +} + +/* -------------------------------- + * VariableRelationPutNextXid + * -------------------------------- + */ +void +VariableRelationPutNextXid(TransactionId xid) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * do nothing before things are initialized + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + return; + + /* ---------------- + * read the variable page, update the nextXid field and + * write the page back out to disk. + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationPutNextXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + TransactionIdStore(xid, &(var->nextXidData)); + + WriteBuffer(buf); +} + +/* -------------------------------- + * VariableRelationPutLastXid + * -------------------------------- + */ +void +VariableRelationPutLastXid(TransactionId xid) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * do nothing before things are initialized + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + return; + + /* ---------------- + * read the variable page, update the lastXid field and + * force the page back out to disk. + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationPutLastXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + TransactionIdStore(xid, &(var->lastXidData)); + + WriteBuffer(buf); +} + +/* -------------------------------- + * VariableRelationGetNextOid + * -------------------------------- + */ +void +VariableRelationGetNextOid(Oid *oid_return) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * if the variable relation is not initialized, then we + * assume we are running at bootstrap time and so we return + * an invalid object id -- during this time GetNextBootstrapObjectId + * should be called instead.. + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) { + if (PointerIsValid(oid_return)) + (*oid_return) = InvalidOid; + return; + } + + /* ---------------- + * read the variable page, get the the nextOid field and + * release the buffer + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationGetNextXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + if (PointerIsValid(oid_return)) { + + /* ---------------- + * nothing up my sleeve... what's going on here is that this code + * is guaranteed never to be called until all files in data/base/ + * are created, and the template database exists. at that point, + * we want to append a pg_database tuple. the first time we do + * this, the oid stored in pg_variable will be bogus, so we use + * a bootstrap value defined at the top of this file. + * + * this comment no longer holds true. This code is called before + * all of the files in data/base are created and you can't rely + * on system oid's to be less than BootstrapObjectIdData. mer 9/18/91 + * ---------------- + */ + if (OidIsValid(var->nextOid)) + (*oid_return) = var->nextOid; + else + (*oid_return) = BootstrapObjectIdData; + } + + ReleaseBuffer(buf); +} + +/* -------------------------------- + * VariableRelationPutNextOid + * -------------------------------- + */ +void +VariableRelationPutNextOid(Oid *oidP) +{ + Buffer buf; + VariableRelationContents var; + + /* ---------------- + * We assume that a spinlock has been acquire to guarantee + * exclusive access to the variable relation. + * ---------------- + */ + + /* ---------------- + * do nothing before things are initialized + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + return; + + /* ---------------- + * sanity check + * ---------------- + */ + if (! PointerIsValid(oidP)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationPutNextOid: invalid oid pointer"); + } + + /* ---------------- + * read the variable page, update the nextXid field and + * write the page back out to disk. + * ---------------- + */ + buf = ReadBuffer(VariableRelation, 0); + + if (! BufferIsValid(buf)) + { + SpinRelease(OidGenLockId); + elog(WARN, "VariableRelationPutNextXid: ReadBuffer failed"); + } + + var = (VariableRelationContents) BufferGetBlock(buf); + + var->nextOid = (*oidP); + + WriteBuffer(buf); +} + +/* ---------------------------------------------------------------- + * transaction id generation support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * GetNewTransactionId + * + * In the version 2 transaction system, transaction id's are + * restricted in several ways. + * + * First, all transaction id's are even numbers (4, 88, 121342, etc). + * This means the binary representation of the number will never + * have the least significent bit set. This bit is reserved to + * indicate that the transaction id does not in fact hold an XID, + * but rather a commit time. This makes it possible for the + * vaccuum daemon to disgard information from the log and time + * relations for committed tuples. This is important when archiving + * tuples to an optical disk because tuples with commit times + * stored in their xid fields will not need to consult the log + * and time relations. + * + * Second, since we may someday preform compression of the data + * in the log and time relations, we cause the numbering of the + * transaction ids to begin at 512. This means that some space + * on the page of the log and time relations corresponding to + * transaction id's 0 - 510 will never be used. This space is + * in fact used to store the version number of the postgres + * transaction log and will someday store compression information + * about the log. + * + * Lastly, rather then access the variable relation each time + * a backend requests a new transction id, we "prefetch" 32 + * transaction id's by incrementing the nextXid stored in the + * var relation by 64 (remember only even xid's are legal) and then + * returning these id's one at a time until they are exhausted. + * This means we reduce the number of accesses to the variable + * relation by 32 for each backend. + * + * Note: 32 has no special significance. We don't want the + * number to be too large because if when the backend + * terminates, we lose the xid's we cached. + * + * ---------------- + */ + +#define VAR_XID_PREFETCH 32 + +static int prefetched_xid_count = 0; +static TransactionId next_prefetched_xid; + +void +GetNewTransactionId(TransactionId *xid) +{ + TransactionId nextid; + + /* ---------------- + * during bootstrap initialization, we return the special + * bootstrap transaction id. + * ---------------- + */ + if (AMI_OVERRIDE) { + TransactionIdStore(AmiTransactionId, xid); + return; + } + + /* ---------------- + * if we run out of prefetched xids, then we get some + * more before handing them out to the caller. + * ---------------- + */ + + if (prefetched_xid_count == 0) { + /* ---------------- + * obtain exclusive access to the variable relation page + * + * get the "next" xid from the variable relation + * and save it in the prefetched id. + * ---------------- + */ + SpinAcquire(OidGenLockId); + VariableRelationGetNextXid(&nextid); + TransactionIdStore(nextid, &next_prefetched_xid); + + /* ---------------- + * now increment the variable relation's next xid + * and reset the prefetched_xid_count. We multiply + * the id by two because our xid's are always even. + * ---------------- + */ + prefetched_xid_count = VAR_XID_PREFETCH; + TransactionIdAdd(&nextid, prefetched_xid_count); + VariableRelationPutNextXid(nextid); + SpinRelease(OidGenLockId); + } + + /* ---------------- + * return the next prefetched xid in the pointer passed by + * the user and decrement the prefetch count. We add two + * to id we return the next time this is called because our + * transaction ids are always even. + * + * XXX Transaction Ids used to be even as the low order bit was + * used to determine commit status. This is no long true so + * we now use even and odd transaction ids. -mer 5/26/92 + * ---------------- + */ + TransactionIdStore(next_prefetched_xid, xid); + TransactionIdAdd(&next_prefetched_xid, 1); + prefetched_xid_count--; +} + +/* ---------------- + * UpdateLastCommittedXid + * ---------------- + */ + +void +UpdateLastCommittedXid(TransactionId xid) +{ + TransactionId lastid; + + + /* we assume that spinlock OidGenLockId has been acquired + * prior to entering this function + */ + + /* ---------------- + * get the "last committed" transaction id from + * the variable relation page. + * ---------------- + */ + VariableRelationGetLastXid(&lastid); + + /* ---------------- + * if the transaction id is greater than the last committed + * transaction then we update the last committed transaction + * in the variable relation. + * ---------------- + */ + if (TransactionIdIsLessThan(lastid, xid)) + VariableRelationPutLastXid(xid); + +} + +/* ---------------------------------------------------------------- + * object id generation support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * GetNewObjectIdBlock + * + * This support function is used to allocate a block of object ids + * of the given size. applications wishing to do their own object + * id assignments should use this + * ---------------- + */ +void +GetNewObjectIdBlock(Oid *oid_return, /* place to return the new object id */ + int oid_block_size) /* number of oids desired */ +{ + Oid nextoid; + + /* ---------------- + * SOMEDAY obtain exclusive access to the variable relation page + * That someday is today -mer 6 Aug 1992 + * ---------------- + */ + SpinAcquire(OidGenLockId); + + /* ---------------- + * get the "next" oid from the variable relation + * and give it to the caller. + * ---------------- + */ + VariableRelationGetNextOid(&nextoid); + if (PointerIsValid(oid_return)) + (*oid_return) = nextoid; + + /* ---------------- + * now increment the variable relation's next oid + * field by the size of the oid block requested. + * ---------------- + */ + nextoid += oid_block_size; + VariableRelationPutNextOid(&nextoid); + + /* ---------------- + * SOMEDAY relinquish our lock on the variable relation page + * That someday is today -mer 6 Apr 1992 + * ---------------- + */ + SpinRelease(OidGenLockId); +} + +/* ---------------- + * GetNewObjectId + * + * This function allocates and parses out object ids. Like + * GetNewTransactionId(), it "prefetches" 32 object ids by + * incrementing the nextOid stored in the var relation by 32 and then + * returning these id's one at a time until they are exhausted. + * This means we reduce the number of accesses to the variable + * relation by 32 for each backend. + * + * Note: 32 has no special significance. We don't want the + * number to be too large because if when the backend + * terminates, we lose the oids we cached. + * + * ---------------- + */ + +#define VAR_OID_PREFETCH 32 + +static int prefetched_oid_count = 0; +static Oid next_prefetched_oid; + +void +GetNewObjectId(Oid *oid_return) /* place to return the new object id */ +{ + /* ---------------- + * if we run out of prefetched oids, then we get some + * more before handing them out to the caller. + * ---------------- + */ + + if (prefetched_oid_count == 0) { + int oid_block_size = VAR_OID_PREFETCH; + + /* ---------------- + * during bootstrap time, we want to allocate oids + * one at a time. Otherwise there might be some + * bootstrap oid's left in the block we prefetch which + * would be passed out after the variable relation was + * initialized. This would be bad. + * ---------------- + */ + if (! RelationIsValid(VariableRelation)) + VariableRelation = heap_openr(VariableRelationName); + + /* ---------------- + * get a new block of prefetched object ids. + * ---------------- + */ + GetNewObjectIdBlock(&next_prefetched_oid, oid_block_size); + + /* ---------------- + * now reset the prefetched_oid_count. + * ---------------- + */ + prefetched_oid_count = oid_block_size; + } + + /* ---------------- + * return the next prefetched oid in the pointer passed by + * the user and decrement the prefetch count. + * ---------------- + */ + if (PointerIsValid(oid_return)) + (*oid_return) = next_prefetched_oid; + + next_prefetched_oid++; + prefetched_oid_count--; +} diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c new file mode 100644 index 00000000000..1798d09d054 --- /dev/null +++ b/src/backend/access/transam/xact.c @@ -0,0 +1,1314 @@ +/*------------------------------------------------------------------------- + * + * xact.c-- + * top level transaction system support routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.1.1.1 1996/07/09 06:21:13 scrappy Exp $ + * + * NOTES + * Transaction aborts can now occur two ways: + * + * 1) system dies from some internal cause (Assert, etc..) + * 2) user types abort + * + * These two cases used to be treated identically, but now + * we need to distinguish them. Why? consider the following + * two situatuons: + * + * case 1 case 2 + * ------ ------ + * 1) user types BEGIN 1) user types BEGIN + * 2) user does something 2) user does something + * 3) user does not like what 3) system aborts for some reason + * she shes and types ABORT + * + * In case 1, we want to abort the transaction and return to the + * default state. In case 2, there may be more commands coming + * our way which are part of the same transaction block and we have + * to ignore these commands until we see an END transaction. + * + * Internal aborts are now handled by AbortTransactionBlock(), just as + * they always have been, and user aborts are now handled by + * UserAbortTransactionBlock(). Both of them rely on AbortTransaction() + * to do all the real work. The only difference is what state we + * enter after AbortTransaction() does it's work: + * + * * AbortTransactionBlock() leaves us in TBLOCK_ABORT and + * * UserAbortTransactionBlock() leaves us in TBLOCK_ENDABORT + * + * NOTES + * This file is an attempt at a redesign of the upper layer + * of the V1 transaction system which was too poorly thought + * out to describe. This new system hopes to be both simpler + * in design, simpler to extend and needs to contain added + * functionality to solve problems beyond the scope of the V1 + * system. (In particuler, communication of transaction + * information between parallel backends has to be supported) + * + * The essential aspects of the transaction system are: + * + * o transaction id generation + * o transaction log updating + * o memory cleanup + * o cache invalidation + * o lock cleanup + * + * Hence, the functional division of the transaction code is + * based on what of the above things need to be done during + * a start/commit/abort transaction. For instance, the + * routine AtCommit_Memory() takes care of all the memory + * cleanup stuff done at commit time. + * + * The code is layered as follows: + * + * StartTransaction + * CommitTransaction + * AbortTransaction + * UserAbortTransaction + * + * are provided to do the lower level work like recording + * the transaction status in the log and doing memory cleanup. + * above these routines are another set of functions: + * + * StartTransactionCommand + * CommitTransactionCommand + * AbortCurrentTransaction + * + * These are the routines used in the postgres main processing + * loop. They are sensitive to the current transaction block state + * and make calls to the lower level routines appropriately. + * + * Support for transaction blocks is provided via the functions: + * + * StartTransactionBlock + * CommitTransactionBlock + * AbortTransactionBlock + * + * These are invoked only in responce to a user "BEGIN", "END", + * or "ABORT" command. The tricky part about these functions + * is that they are called within the postgres main loop, in between + * the StartTransactionCommand() and CommitTransactionCommand(). + * + * For example, consider the following sequence of user commands: + * + * 1) begin + * 2) retrieve (foo.all) + * 3) append foo (bar = baz) + * 4) end + * + * in the main processing loop, this results in the following + * transaction sequence: + * + * / StartTransactionCommand(); + * 1) / ProcessUtility(); << begin + * \ StartTransactionBlock(); + * \ CommitTransactionCommand(); + * + * / StartTransactionCommand(); + * 2) < ProcessQuery(); << retrieve (foo.all) + * \ CommitTransactionCommand(); + * + * / StartTransactionCommand(); + * 3) < ProcessQuery(); << append foo (bar = baz) + * \ CommitTransactionCommand(); + * + * / StartTransactionCommand(); + * 4) / ProcessUtility(); << end + * \ CommitTransactionBlock(); + * \ CommitTransactionCommand(); + * + * The point of this example is to demonstrate the need for + * StartTransactionCommand() and CommitTransactionCommand() to + * be state smart -- they should do nothing in between the calls + * to StartTransactionBlock() and EndTransactionBlock() and + * outside these calls they need to do normal start/commit + * processing. + * + * Furthermore, suppose the "retrieve (foo.all)" caused an abort + * condition. We would then want to abort the transaction and + * ignore all subsequent commands up to the "end". + * -cim 3/23/90 + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "access/xact.h" +#include "commands/async.h" +#include "storage/bufmgr.h" +#include "storage/block.h" +#include "storage/proc.h" +#include "utils/inval.h" +#include "utils/relcache.h" +#include "access/transam.h" +#include "catalog/heap.h" + +/* ---------------- + * global variables holding the current transaction state. + * + * Note: when we are running several slave processes, the + * current transaction state data is copied into shared memory + * and the CurrentTransactionState pointer changed to + * point to the shared copy. All this occurrs in slaves.c + * ---------------- + */ +TransactionStateData CurrentTransactionStateData = { + 0, /* transaction id */ + FirstCommandId, /* command id */ + 0x0, /* start time */ + TRANS_DEFAULT, /* transaction state */ + TBLOCK_DEFAULT /* transaction block state */ + }; + +TransactionState CurrentTransactionState = + &CurrentTransactionStateData; + +/* ---------------- + * info returned when the system is desabled + * + * Note: I have no idea what the significance of the + * 1073741823 in DisabledStartTime.. I just carried + * this over when converting things from the old + * V1 transaction system. -cim 3/18/90 + * ---------------- + */ +TransactionId DisabledTransactionId = (TransactionId)-1; + +CommandId DisabledCommandId = (CommandId) -1; + +AbsoluteTime DisabledStartTime = (AbsoluteTime) 1073741823; + +/* ---------------- + * overflow flag + * ---------------- + */ +bool CommandIdCounterOverflowFlag; + +/* ---------------- + * catalog creation transaction bootstrapping flag. + * This should be eliminated and added to the transaction + * state stuff. -cim 3/19/90 + * ---------------- + */ +bool AMI_OVERRIDE = false; + +/* ---------------------------------------------------------------- + * transaction state accessors + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * TranactionFlushEnabled() + * SetTranactionFlushEnabled() + * + * These are used to test and set the "TransactionFlushState" + * varable. If this variable is true (the default), then + * the system will flush all dirty buffers to disk at the end + * of each transaction. If false then we are assuming the + * buffer pool resides in stable main memory, in which case we + * only do writes as necessary. + * -------------------------------- + */ +static int TransactionFlushState = 1; + +int +TransactionFlushEnabled() +{ + return TransactionFlushState; +} + +void +SetTransactionFlushEnabled(bool state) +{ + TransactionFlushState = (state == true); +} + +/* -------------------------------- + * IsTransactionState + * + * This returns true if we are currently running a query + * within an executing transaction. + * -------------------------------- + */ +bool +IsTransactionState() +{ + TransactionState s = CurrentTransactionState; + + switch (s->state) { + case TRANS_DEFAULT: return false; + case TRANS_START: return true; + case TRANS_INPROGRESS: return true; + case TRANS_COMMIT: return true; + case TRANS_ABORT: return true; + case TRANS_DISABLED: return false; + } + /* + * Shouldn't get here, but lint is not happy with this... + */ + return(false); +} + +/* -------------------------------- + * IsAbortedTransactionBlockState + * + * This returns true if we are currently running a query + * within an aborted transaction block. + * -------------------------------- + */ +bool +IsAbortedTransactionBlockState() +{ + TransactionState s = CurrentTransactionState; + + if (s->blockState == TBLOCK_ABORT) + return true; + + return false; +} + +/* -------------------------------- + * OverrideTransactionSystem + * + * This is used to temporarily disable the transaction + * processing system in order to do initialization of + * the transaction system data structures and relations + * themselves. + * -------------------------------- + */ +int SavedTransactionState; + +void +OverrideTransactionSystem(bool flag) +{ + TransactionState s = CurrentTransactionState; + + if (flag == true) { + if (s->state == TRANS_DISABLED) + return; + + SavedTransactionState = s->state; + s->state = TRANS_DISABLED; + } else { + if (s->state != TRANS_DISABLED) + return; + + s->state = SavedTransactionState; + } +} + +/* -------------------------------- + * GetCurrentTransactionId + * + * This returns the id of the current transaction, or + * the id of the "disabled" transaction. + * -------------------------------- + */ +TransactionId +GetCurrentTransactionId() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * if the transaction system is disabled, we return + * the special "disabled" transaction id. + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return (TransactionId) DisabledTransactionId; + + /* ---------------- + * otherwise return the current transaction id. + * ---------------- + */ + return (TransactionId) s->transactionIdData; +} + + +/* -------------------------------- + * GetCurrentCommandId + * -------------------------------- + */ +CommandId +GetCurrentCommandId() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * if the transaction system is disabled, we return + * the special "disabled" command id. + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return (CommandId) DisabledCommandId; + + return s->commandId; +} + + +/* -------------------------------- + * GetCurrentTransactionStartTime + * -------------------------------- + */ +AbsoluteTime +GetCurrentTransactionStartTime() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * if the transaction system is disabled, we return + * the special "disabled" starting time. + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return (AbsoluteTime) DisabledStartTime; + + return s->startTime; +} + + +/* -------------------------------- + * TransactionIdIsCurrentTransactionId + * -------------------------------- + */ +bool +TransactionIdIsCurrentTransactionId(TransactionId xid) +{ + TransactionState s = CurrentTransactionState; + + if (AMI_OVERRIDE) + return false; + + return (bool) + TransactionIdEquals(xid, s->transactionIdData); +} + + +/* -------------------------------- + * CommandIdIsCurrentCommandId + * -------------------------------- + */ +bool +CommandIdIsCurrentCommandId(CommandId cid) +{ + TransactionState s = CurrentTransactionState; + + if (AMI_OVERRIDE) + return false; + + return + (cid == s->commandId) ? true : false; +} + + +/* -------------------------------- + * ClearCommandIdCounterOverflowFlag + * -------------------------------- + */ +void +ClearCommandIdCounterOverflowFlag() +{ + CommandIdCounterOverflowFlag = false; +} + + +/* -------------------------------- + * CommandCounterIncrement + * -------------------------------- + */ +void +CommandCounterIncrement() +{ + CurrentTransactionStateData.commandId += 1; + if (CurrentTransactionStateData.commandId == FirstCommandId) { + CommandIdCounterOverflowFlag = true; + elog(WARN, "You may only have 65535 commands per transaction"); + } + + /* make cache changes visible to me */ + AtCommit_Cache(); + AtStart_Cache(); +} + +/* ---------------------------------------------------------------- + * initialization stuff + * ---------------------------------------------------------------- + */ +void +InitializeTransactionSystem() +{ + InitializeTransactionLog(); +} + +/* ---------------------------------------------------------------- + * StartTransaction stuff + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * AtStart_Cache + * -------------------------------- + */ +void +AtStart_Cache() +{ + DiscardInvalid(); +} + +/* -------------------------------- + * AtStart_Locks + * -------------------------------- + */ +void +AtStart_Locks() +{ + /* + * at present, it is unknown to me what belongs here -cim 3/18/90 + * + * There isn't anything to do at the start of a xact for locks. + * -mer 5/24/92 + */ +} + +/* -------------------------------- + * AtStart_Memory + * -------------------------------- + */ +void +AtStart_Memory() +{ + Portal portal; + MemoryContext portalContext; + + /* ---------------- + * get the blank portal and its memory context + * ---------------- + */ + portal = GetPortalByName(NULL); + portalContext = (MemoryContext) PortalGetHeapMemory(portal); + + /* ---------------- + * tell system to allocate in the blank portal context + * ---------------- + */ + (void) MemoryContextSwitchTo(portalContext); + StartPortalAllocMode(DefaultAllocMode, 0); +} + + +/* ---------------------------------------------------------------- + * CommitTransaction stuff + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * RecordTransactionCommit + * + * Note: the two calls to BufferManagerFlush() exist to ensure + * that data pages are written before log pages. These + * explicit calls should be replaced by a more efficient + * ordered page write scheme in the buffer manager + * -cim 3/18/90 + * -------------------------------- + */ +void +RecordTransactionCommit() +{ + TransactionId xid; + int leak; + + /* ---------------- + * get the current transaction id + * ---------------- + */ + xid = GetCurrentTransactionId(); + + /* ---------------- + * flush the buffer manager pages. Note: if we have stable + * main memory, dirty shared buffers are not flushed + * plai 8/7/90 + * ---------------- + */ + leak = BufferPoolCheckLeak(); + FlushBufferPool(!TransactionFlushEnabled()); + if (leak) ResetBufferPool(); + + /* ---------------- + * have the transaction access methods record the status + * of this transaction id in the pg_log / pg_time relations. + * ---------------- + */ + TransactionIdCommit(xid); + + /* ---------------- + * Now write the log/time info to the disk too. + * ---------------- + */ + leak = BufferPoolCheckLeak(); + FlushBufferPool(!TransactionFlushEnabled()); + if (leak) ResetBufferPool(); +} + + +/* -------------------------------- + * AtCommit_Cache + * -------------------------------- + */ +void +AtCommit_Cache() +{ + /* ---------------- + * Make catalog changes visible to me for the next command. + * Other backends will not process my invalidation messages until + * after I commit and free my locks--though they will do + * unnecessary work if I abort. + * ---------------- + */ + RegisterInvalid(true); +} + +/* -------------------------------- + * AtCommit_Locks + * -------------------------------- + */ +void +AtCommit_Locks() +{ + /* ---------------- + * XXX What if ProcReleaseLocks fails? (race condition?) + * + * Then you're up a creek! -mer 5/24/92 + * ---------------- + */ + ProcReleaseLocks(); +} + +/* -------------------------------- + * AtCommit_Memory + * -------------------------------- + */ +void +AtCommit_Memory() +{ + /* ---------------- + * now that we're "out" of a transaction, have the + * system allocate things in the top memory context instead + * of the blank portal memory context. + * ---------------- + */ + EndPortalAllocMode(); + (void) MemoryContextSwitchTo(TopMemoryContext); +} + +/* ---------------------------------------------------------------- + * AbortTransaction stuff + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * RecordTransactionAbort + * -------------------------------- + */ +void +RecordTransactionAbort() +{ + TransactionId xid; + + /* ---------------- + * get the current transaction id + * ---------------- + */ + xid = GetCurrentTransactionId(); + + /* ---------------- + * have the transaction access methods record the status + * of this transaction id in the pg_log / pg_time relations. + * ---------------- + */ + TransactionIdAbort(xid); + + /* ---------------- + * flush the buffer manager pages. Note: if we have stable + * main memory, dirty shared buffers are not flushed + * plai 8/7/90 + * ---------------- + */ + ResetBufferPool(); +} + +/* -------------------------------- + * AtAbort_Cache + * -------------------------------- + */ +void +AtAbort_Cache() +{ + RegisterInvalid(false); +} + +/* -------------------------------- + * AtAbort_Locks + * -------------------------------- + */ +void +AtAbort_Locks() +{ + /* ---------------- + * XXX What if ProcReleaseLocks() fails? (race condition?) + * + * Then you're up a creek without a paddle! -mer + * ---------------- + */ + ProcReleaseLocks(); +} + + +/* -------------------------------- + * AtAbort_Memory + * -------------------------------- + */ +void +AtAbort_Memory() +{ + /* ---------------- + * after doing an abort transaction, make certain the + * system uses the top memory context rather then the + * portal memory context (until the next transaction). + * ---------------- + */ + (void) MemoryContextSwitchTo(TopMemoryContext); +} + +/* ---------------------------------------------------------------- + * interface routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * StartTransaction + * + * -------------------------------- + */ +void +StartTransaction() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * Check the current transaction state. If the transaction system + * is switched off, or if we're already in a transaction, do nothing. + * We're already in a transaction when the monitor sends a null + * command to the backend to flush the comm channel. This is a + * hacky fix to a communications problem, and we keep having to + * deal with it here. We should fix the comm channel code. mao 080891 + * ---------------- + */ + if (s->state == TRANS_DISABLED || s->state == TRANS_INPROGRESS) + return; + + /* ---------------- + * set the current transaction state information + * appropriately during start processing + * ---------------- + */ + s->state = TRANS_START; + + /* ---------------- + * generate a new transaction id + * ---------------- + */ + GetNewTransactionId(&(s->transactionIdData)); + + /* ---------------- + * initialize current transaction state fields + * ---------------- + */ + s->commandId = FirstCommandId; + s->startTime = GetCurrentAbsoluteTime(); + + /* ---------------- + * initialize the various transaction subsystems + * ---------------- + */ + AtStart_Cache(); + AtStart_Locks(); + AtStart_Memory(); + + /* -------------- + initialize temporary relations list + the tempRelList is a list of temporary relations that + are created in the course of the transactions + they need to be destroyed properly at the end of the transactions + */ + InitTempRelList(); + + /* ---------------- + * done with start processing, set current transaction + * state to "in progress" + * ---------------- + */ + s->state = TRANS_INPROGRESS; +} + +/* --------------- + * Tell me if we are currently in progress + * --------------- + */ +bool +CurrentXactInProgress() +{ + return (CurrentTransactionState->state == TRANS_INPROGRESS); +} + +/* -------------------------------- + * CommitTransaction + * + * -------------------------------- + */ +void +CommitTransaction() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->state != TRANS_INPROGRESS) + elog(NOTICE, "CommitTransaction and not in in-progress state "); + + /* ---------------- + * set the current transaction state information + * appropriately during the abort processing + * ---------------- + */ + s->state = TRANS_COMMIT; + + /* ---------------- + * do commit processing + * ---------------- + */ + DestroyTempRels(); + AtEOXact_portals(); + RecordTransactionCommit(); + RelationPurgeLocalRelation(true); + AtCommit_Cache(); + AtCommit_Locks(); + AtCommit_Memory(); + + /* ---------------- + * done with commit processing, set current transaction + * state back to default + * ---------------- + */ + s->state = TRANS_DEFAULT; + { /* want this after commit */ + if (IsNormalProcessingMode()) + Async_NotifyAtCommit(); + } +} + +/* -------------------------------- + * AbortTransaction + * + * -------------------------------- + */ +void +AbortTransaction() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->state != TRANS_INPROGRESS) + elog(NOTICE, "AbortTransaction and not in in-progress state "); + + /* ---------------- + * set the current transaction state information + * appropriately during the abort processing + * ---------------- + */ + s->state = TRANS_ABORT; + + /* ---------------- + * do abort processing + * ---------------- + */ + AtEOXact_portals(); + RecordTransactionAbort(); + RelationPurgeLocalRelation(false); + DestroyTempRels(); + AtAbort_Cache(); + AtAbort_Locks(); + AtAbort_Memory(); + + /* ---------------- + * done with abort processing, set current transaction + * state back to default + * ---------------- + */ + s->state = TRANS_DEFAULT; + { + /* We need to do this in case another process notified us while + we are in the middle of an aborted transaction. We need to + notify our frontend after we finish the current transaction. + -- jw, 1/3/94 + */ + if (IsNormalProcessingMode()) + Async_NotifyAtAbort(); + } +} + +/* -------------------------------- + * StartTransactionCommand + * -------------------------------- + */ +void +StartTransactionCommand() +{ + TransactionState s = CurrentTransactionState; + + switch(s->blockState) { + /* ---------------- + * if we aren't in a transaction block, we + * just do our usual start transaction. + * ---------------- + */ + case TBLOCK_DEFAULT: + StartTransaction(); + break; + + /* ---------------- + * We should never experience this -- if we do it + * means the BEGIN state was not changed in the previous + * CommitTransactionCommand(). If we get it, we print + * a warning and change to the in-progress state. + * ---------------- + */ + case TBLOCK_BEGIN: + elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_BEGIN"); + s->blockState = TBLOCK_INPROGRESS; + break; + + /* ---------------- + * This is the case when are somewhere in a transaction + * block and about to start a new command. For now we + * do nothing but someday we may do command-local resource + * initialization. + * ---------------- + */ + case TBLOCK_INPROGRESS: + break; + + /* ---------------- + * As with BEGIN, we should never experience this -- + * if we do it means the END state was not changed in the + * previous CommitTransactionCommand(). If we get it, we + * print a warning, commit the transaction, start a new + * transaction and change to the default state. + * ---------------- + */ + case TBLOCK_END: + elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_END"); + s->blockState = TBLOCK_DEFAULT; + CommitTransaction(); + StartTransaction(); + break; + + /* ---------------- + * Here we are in the middle of a transaction block but + * one of the commands caused an abort so we do nothing + * but remain in the abort state. Eventually we will get + * to the "END TRANSACTION" which will set things straight. + * ---------------- + */ + case TBLOCK_ABORT: + break; + + /* ---------------- + * This means we somehow aborted and the last call to + * CommitTransactionCommand() didn't clear the state so + * we remain in the ENDABORT state and mabey next time + * we get to CommitTransactionCommand() the state will + * get reset to default. + * ---------------- + */ + case TBLOCK_ENDABORT: + elog(NOTICE, "StartTransactionCommand: unexpected TBLOCK_ENDABORT"); + break; + } +} +/* -------------------------------- + * CommitTransactionCommand + * -------------------------------- + */ +void +CommitTransactionCommand() +{ + TransactionState s = CurrentTransactionState; + + switch(s->blockState) { + /* ---------------- + * if we aren't in a transaction block, we + * just do our usual transaction commit + * ---------------- + */ + case TBLOCK_DEFAULT: + CommitTransaction(); + break; + + /* ---------------- + * This is the case right after we get a "BEGIN TRANSACTION" + * command, but the user hasn't done anything else yet, so + * we change to the "transaction block in progress" state + * and return. + * ---------------- + */ + case TBLOCK_BEGIN: + s->blockState = TBLOCK_INPROGRESS; + break; + + /* ---------------- + * This is the case when we have finished executing a command + * someplace within a transaction block. We increment the + * command counter and return. Someday we may free resources + * local to the command. + * ---------------- + */ + case TBLOCK_INPROGRESS: + CommandCounterIncrement(); + break; + + /* ---------------- + * This is the case when we just got the "END TRANSACTION" + * statement, so we go back to the default state and + * commit the transaction. + * ---------------- + */ + case TBLOCK_END: + s->blockState = TBLOCK_DEFAULT; + CommitTransaction(); + break; + + /* ---------------- + * Here we are in the middle of a transaction block but + * one of the commands caused an abort so we do nothing + * but remain in the abort state. Eventually we will get + * to the "END TRANSACTION" which will set things straight. + * ---------------- + */ + case TBLOCK_ABORT: + break; + + /* ---------------- + * Here we were in an aborted transaction block which + * just processed the "END TRANSACTION" command from the + * user, so now we return the to default state. + * ---------------- + */ + case TBLOCK_ENDABORT: + s->blockState = TBLOCK_DEFAULT; + break; + } +} + +/* -------------------------------- + * AbortCurrentTransaction + * -------------------------------- + */ +void +AbortCurrentTransaction() +{ + TransactionState s = CurrentTransactionState; + + switch(s->blockState) { + /* ---------------- + * if we aren't in a transaction block, we + * just do our usual abort transaction. + * ---------------- + */ + case TBLOCK_DEFAULT: + AbortTransaction(); + break; + + /* ---------------- + * If we are in the TBLOCK_BEGIN it means something + * screwed up right after reading "BEGIN TRANSACTION" + * so we enter the abort state. Eventually an "END + * TRANSACTION" will fix things. + * ---------------- + */ + case TBLOCK_BEGIN: + s->blockState = TBLOCK_ABORT; + AbortTransaction(); + break; + + /* ---------------- + * This is the case when are somewhere in a transaction + * block which aborted so we abort the transaction and + * set the ABORT state. Eventually an "END TRANSACTION" + * will fix things and restore us to a normal state. + * ---------------- + */ + case TBLOCK_INPROGRESS: + s->blockState = TBLOCK_ABORT; + AbortTransaction(); + break; + + /* ---------------- + * Here, the system was fouled up just after the + * user wanted to end the transaction block so we + * abort the transaction and put us back into the + * default state. + * ---------------- + */ + case TBLOCK_END: + s->blockState = TBLOCK_DEFAULT; + AbortTransaction(); + break; + + /* ---------------- + * Here, we are already in an aborted transaction + * state and are waiting for an "END TRANSACTION" to + * come along and lo and behold, we abort again! + * So we just remain in the abort state. + * ---------------- + */ + case TBLOCK_ABORT: + break; + + /* ---------------- + * Here we were in an aborted transaction block which + * just processed the "END TRANSACTION" command but somehow + * aborted again.. since we must have done the abort + * processing, we return to the default state. + * ---------------- + */ + case TBLOCK_ENDABORT: + s->blockState = TBLOCK_DEFAULT; + break; + } +} + +/* ---------------------------------------------------------------- + * transaction block support + * ---------------------------------------------------------------- + */ +/* -------------------------------- + * BeginTransactionBlock + * -------------------------------- + */ +void +BeginTransactionBlock() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->blockState != TBLOCK_DEFAULT) + elog(NOTICE, "BeginTransactionBlock and not in default state "); + + /* ---------------- + * set the current transaction block state information + * appropriately during begin processing + * ---------------- + */ + s->blockState = TBLOCK_BEGIN; + + /* ---------------- + * do begin processing + * ---------------- + */ + + /* ---------------- + * done with begin processing, set block state to inprogress + * ---------------- + */ + s->blockState = TBLOCK_INPROGRESS; +} + +/* -------------------------------- + * EndTransactionBlock + * -------------------------------- + */ +void +EndTransactionBlock() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->blockState == TBLOCK_INPROGRESS) { + /* ---------------- + * here we are in a transaction block which should commit + * when we get to the upcoming CommitTransactionCommand() + * so we set the state to "END". CommitTransactionCommand() + * will recognize this and commit the transaction and return + * us to the default state + * ---------------- + */ + s->blockState = TBLOCK_END; + return; + } + + if (s->blockState == TBLOCK_ABORT) { + /* ---------------- + * here, we are in a transaction block which aborted + * and since the AbortTransaction() was already done, + * we do whatever is needed and change to the special + * "END ABORT" state. The upcoming CommitTransactionCommand() + * will recognise this and then put us back in the default + * state. + * ---------------- + */ + s->blockState = TBLOCK_ENDABORT; + return; + } + + /* ---------------- + * We should not get here, but if we do, we go to the ENDABORT + * state after printing a warning. The upcoming call to + * CommitTransactionCommand() will then put us back into the + * default state. + * ---------------- + */ + elog(NOTICE, "EndTransactionBlock and not inprogress/abort state "); + s->blockState = TBLOCK_ENDABORT; +} + +/* -------------------------------- + * AbortTransactionBlock + * -------------------------------- + */ +void +AbortTransactionBlock() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->blockState == TBLOCK_INPROGRESS) { + /* ---------------- + * here we were inside a transaction block something + * screwed up inside the system so we enter the abort state, + * do the abort processing and then return. + * We remain in the abort state until we see the upcoming + * END TRANSACTION command. + * ---------------- + */ + s->blockState = TBLOCK_ABORT; + + /* ---------------- + * do abort processing and return + * ---------------- + */ + AbortTransaction(); + return; + } + + /* ---------------- + * this case should not be possible, because it would mean + * the user entered an "abort" from outside a transaction block. + * So we print an error message, abort the transaction and + * enter the "ENDABORT" state so we will end up in the default + * state after the upcoming CommitTransactionCommand(). + * ---------------- + */ + elog(NOTICE, "AbortTransactionBlock and not inprogress state"); + AbortTransaction(); + s->blockState = TBLOCK_ENDABORT; +} + +/* -------------------------------- + * UserAbortTransactionBlock + * -------------------------------- + */ +void +UserAbortTransactionBlock() +{ + TransactionState s = CurrentTransactionState; + + /* ---------------- + * check the current transaction state + * ---------------- + */ + if (s->state == TRANS_DISABLED) + return; + + if (s->blockState == TBLOCK_INPROGRESS) { + /* ---------------- + * here we were inside a transaction block and we + * got an abort command from the user, so we move to + * the abort state, do the abort processing and + * then change to the ENDABORT state so we will end up + * in the default state after the upcoming + * CommitTransactionCommand(). + * ---------------- + */ + s->blockState = TBLOCK_ABORT; + + /* ---------------- + * do abort processing + * ---------------- + */ + AbortTransaction(); + + /* ---------------- + * change to the end abort state and return + * ---------------- + */ + s->blockState = TBLOCK_ENDABORT; + return; + } + + /* ---------------- + * this case should not be possible, because it would mean + * the user entered an "abort" from outside a transaction block. + * So we print an error message, abort the transaction and + * enter the "ENDABORT" state so we will end up in the default + * state after the upcoming CommitTransactionCommand(). + * ---------------- + */ + elog(NOTICE, "UserAbortTransactionBlock and not inprogress state"); + AbortTransaction(); + s->blockState = TBLOCK_ENDABORT; +} + +bool +IsTransactionBlock() +{ + TransactionState s = CurrentTransactionState; + + if (s->blockState == TBLOCK_INPROGRESS + || s->blockState == TBLOCK_ENDABORT) { + return (true); + } + + return (false); +} diff --git a/src/backend/access/transam/xid.c b/src/backend/access/transam/xid.c new file mode 100644 index 00000000000..faeeb623d58 --- /dev/null +++ b/src/backend/access/transam/xid.c @@ -0,0 +1,156 @@ +/*------------------------------------------------------------------------- + * + * xid.c-- + * POSTGRES transaction identifier code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/access/transam/Attic/xid.c,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ + * + * OLD COMMENTS + * XXX WARNING + * Much of this file will change when we change our representation + * of transaction ids -cim 3/23/90 + * + * It is time to make the switch from 5 byte to 4 byte transaction ids + * This file was totally reworked. -mer 5/22/92 + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "utils/palloc.h" +#include "utils/elog.h" +#include "utils/memutils.h" +#include "utils/nabstime.h" + +extern TransactionId NullTransactionId; +extern TransactionId DisabledTransactionId; +extern TransactionId AmiTransactionId; +extern TransactionId FirstTransactionId; + +/* ---------------------------------------------------------------- + * TransactionIdIsValid + * + * Macro-ize me. + * ---------------------------------------------------------------- + */ +bool +TransactionIdIsValid(TransactionId transactionId) +{ + return ((bool) (transactionId != NullTransactionId) ); +} + +/* XXX char16 name for catalogs */ +TransactionId +xidin(char *representation) +{ + return (atol(representation)); +} + +/* XXX char16 name for catalogs */ +char* +xidout(TransactionId transactionId) +{ +/* return(TransactionIdFormString(transactionId)); */ + char *representation; + + /* maximum 32 bit unsigned integer representation takes 10 chars */ + representation = palloc(11); + + (void)sprintf(representation, "%u", transactionId); + + return (representation); + +} + +/* ---------------------------------------------------------------- + * StoreInvalidTransactionId + * + * Maybe do away with Pointer types in these routines. + * Macro-ize this one. + * ---------------------------------------------------------------- + */ +void +StoreInvalidTransactionId(TransactionId *destination) +{ + *destination = NullTransactionId; +} + +/* ---------------------------------------------------------------- + * TransactionIdStore + * + * Macro-ize this one. + * ---------------------------------------------------------------- + */ +void +TransactionIdStore(TransactionId transactionId, + TransactionId *destination) +{ + *destination = transactionId; +} + +/* ---------------------------------------------------------------- + * TransactionIdEquals + * ---------------------------------------------------------------- + */ +bool +TransactionIdEquals(TransactionId id1, TransactionId id2) +{ + return ((bool) (id1 == id2)); +} + +/* ---------------------------------------------------------------- + * TransactionIdIsLessThan + * ---------------------------------------------------------------- + */ +bool +TransactionIdIsLessThan(TransactionId id1, TransactionId id2) +{ + return ((bool)(id1 < id2)); +} + +/* ---------------------------------------------------------------- + * xideq + * ---------------------------------------------------------------- + */ + +/* + * xideq - returns 1, iff xid1 == xid2 + * 0 else; + */ +bool +xideq(TransactionId xid1, TransactionId xid2) +{ + return( (bool) (xid1 == xid2) ); +} + + + +/* ---------------------------------------------------------------- + * TransactionIdIncrement + * ---------------------------------------------------------------- + */ +void +TransactionIdIncrement(TransactionId *transactionId) +{ + + (*transactionId)++; + if (*transactionId == DisabledTransactionId) + elog(FATAL, "TransactionIdIncrement: exhausted XID's"); + return; +} + +/* ---------------------------------------------------------------- + * TransactionIdAdd + * ---------------------------------------------------------------- + */ +void +TransactionIdAdd(TransactionId *xid, int value) +{ + *xid += value; + return; +} + diff --git a/src/backend/access/tupdesc.h b/src/backend/access/tupdesc.h new file mode 100644 index 00000000000..a26bbc704da --- /dev/null +++ b/src/backend/access/tupdesc.h @@ -0,0 +1,53 @@ +/*------------------------------------------------------------------------- + * + * tupdesc.h-- + * POSTGRES tuple descriptor definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: tupdesc.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef TUPDESC_H +#define TUPDESC_H + +#include "postgres.h" +#include "access/attnum.h" +#include "nodes/pg_list.h" /* for List */ +#include "catalog/pg_attribute.h" + +/* + * a TupleDesc is an array of AttributeTupleForms, each of which is a + * pointer to a AttributeTupleForm + */ +/* typedef AttributeTupleForm *TupleDesc; */ + +/* a TupleDesc is a pointer to a structure which includes an array of */ +/* AttributeTupleForms, i.e. pg_attribute information, and the size of */ +/* the array, i.e. the number of attributes */ +/* in short, a TupleDesc completely captures the attribute information */ +/* for a tuple */ + +typedef struct tupleDesc { + int natts; + AttributeTupleForm *attrs; +} *TupleDesc; + +extern TupleDesc CreateTemplateTupleDesc(int natts); + +extern TupleDesc CreateTupleDesc(int natts, AttributeTupleForm *attrs); + +extern TupleDesc CreateTupleDescCopy(TupleDesc tupdesc); + +extern bool TupleDescInitEntry(TupleDesc desc, + AttrNumber attributeNumber, + char *attributeName, + char *typeName, + int attdim, + bool attisset); + +extern TupleDesc BuildDescForRelation(List *schema, char *relname); + +#endif /* TUPDESC_H */ diff --git a/src/backend/access/tupmacs.h b/src/backend/access/tupmacs.h new file mode 100644 index 00000000000..9a9bcce3b41 --- /dev/null +++ b/src/backend/access/tupmacs.h @@ -0,0 +1,43 @@ +/*------------------------------------------------------------------------- + * + * tupmacs.h-- + * Tuple macros used by both index tuples and heap tuples. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: tupmacs.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef TUPMACS_H +#define TUPMACS_H + +/* + * check to see if the ATT'th bit of an array of 8-bit bytes is set. + */ +#define att_isnull(ATT, BITS) (!((BITS)[(ATT) >> 3] & (1 << ((ATT) & 0x07)))) + +/* + * given a AttributeTupleForm and a pointer into a tuple's data + * area, return the correct value or pointer. + * + * note that T must already be properly LONGALIGN/SHORTALIGN'd for + * this to work correctly. + * + * the double-cast is to stop gcc from (correctly) complaining about + * casting integer types with size < sizeof(char *) to (char *). + * sign-extension may get weird if you use an integer type that + * isn't the same size as (char *) for the first cast. (on the other + * hand, it's safe to use another type for the (foo *)(T).) + */ +#define fetchatt(A, T) \ + ((*(A))->attbyval \ + ? ((*(A))->attlen > sizeof(int16) \ + ? (char *) (long) *((int32 *)(T)) \ + : ((*(A))->attlen < sizeof(int16) \ + ? (char *) (long) *((char *)(T)) \ + : (char *) (long) *((int16 *)(T)))) \ + : (char *) (T)) + +#endif diff --git a/src/backend/access/valid.h b/src/backend/access/valid.h new file mode 100644 index 00000000000..1c5cf8cdeb3 --- /dev/null +++ b/src/backend/access/valid.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * valid.h-- + * POSTGRES tuple qualification validity definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: valid.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef VALID_H +#define VALID_H + +#include "c.h" +#include "access/skey.h" +#include "storage/buf.h" +#include "utils/tqual.h" +#include "access/tupdesc.h" +#include "utils/rel.h" +#include "storage/bufpage.h" + +/* ---------------- + * extern decl's + * ---------------- + */ + +extern bool heap_keytest(HeapTuple t, TupleDesc tupdesc, + int nkeys, ScanKey keys); + +extern HeapTuple heap_tuple_satisfies(ItemId itemId, Relation relation, + PageHeader disk_page, TimeQual qual, int nKeys, ScanKey key); + +extern bool TupleUpdatedByCurXactAndCmd(HeapTuple t); + +#endif /* VALID_H */ diff --git a/src/backend/access/xact.h b/src/backend/access/xact.h new file mode 100644 index 00000000000..15f376ec5ed --- /dev/null +++ b/src/backend/access/xact.h @@ -0,0 +1,115 @@ +/*------------------------------------------------------------------------- + * + * xact.h-- + * postgres transaction system header + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: xact.h,v 1.1.1.1 1996/07/09 06:21:09 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef XACT_H +#define XACT_H + +#include + +#include "storage/ipc.h" +#include "miscadmin.h" +#include "utils/portal.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/nabstime.h" + +/* ---------------- + * transaction state structure + * ---------------- + */ +typedef struct TransactionStateData { + TransactionId transactionIdData; + CommandId commandId; + AbsoluteTime startTime; + int state; + int blockState; +} TransactionStateData; + +/* ---------------- + * transaction states + * ---------------- + */ +#define TRANS_DEFAULT 0 +#define TRANS_START 1 +#define TRANS_INPROGRESS 2 +#define TRANS_COMMIT 3 +#define TRANS_ABORT 4 +#define TRANS_DISABLED 5 + +/* ---------------- + * transaction block states + * ---------------- + */ +#define TBLOCK_DEFAULT 0 +#define TBLOCK_BEGIN 1 +#define TBLOCK_INPROGRESS 2 +#define TBLOCK_END 3 +#define TBLOCK_ABORT 4 +#define TBLOCK_ENDABORT 5 + +typedef TransactionStateData *TransactionState; + +/* ---------------- + * extern definitions + * ---------------- + */ +extern int TransactionFlushEnabled(); +extern void SetTransactionFlushEnabled(bool state); + +extern bool IsTransactionState(void); +extern bool IsAbortedTransactionBlockState(void); +extern void OverrideTransactionSystem(bool flag); +extern TransactionId GetCurrentTransactionId(void); +extern CommandId GetCurrentCommandId(void); +extern AbsoluteTime GetCurrentTransactionStartTime(void); +extern bool TransactionIdIsCurrentTransactionId(TransactionId xid); +extern bool CommandIdIsCurrentCommandId(CommandId cid); +extern void ClearCommandIdCounterOverflowFlag(void); +extern void CommandCounterIncrement(void); +extern void InitializeTransactionSystem(void); +extern void AtStart_Cache(void); +extern void AtStart_Locks(void); +extern void AtStart_Memory(void); +extern void RecordTransactionCommit(void); +extern void AtCommit_Cache(void); +extern void AtCommit_Locks(void); +extern void AtCommit_Memory(void); +extern void RecordTransactionAbort(void); +extern void AtAbort_Cache(void); +extern void AtAbort_Locks(void); +extern void AtAbort_Memory(void); +extern void StartTransaction(void); +extern bool CurrentXactInProgress(void); +extern void CommitTransaction(void); +extern void AbortTransaction(void); +extern void StartTransactionCommand(void); +extern void CommitTransactionCommand(void); +extern void AbortCurrentTransaction(void); +extern void BeginTransactionBlock(void); +extern void EndTransactionBlock(void); +extern void AbortTransactionBlock(void); +extern bool IsTransactionBlock(); +extern void UserAbortTransactionBlock(); + +extern TransactionId DisabledTransactionId; + +/* defined in xid.c */ +extern bool TransactionIdIsValid(TransactionId transactionId); +extern void StoreInvalidTransactionId(TransactionId *destination); +extern void TransactionIdStore(TransactionId transactionId, + TransactionId *destination); +extern bool TransactionIdEquals(TransactionId id1, TransactionId id2); +extern bool TransactionIdIsLessThan(TransactionId id1, TransactionId id2); +extern void TransactionIdIncrement(TransactionId *transactionId); +extern void TransactionIdAdd(TransactionId *xid, int value); + +#endif /* XACT_H */ diff --git a/src/backend/bootstrap/Makefile.inc b/src/backend/bootstrap/Makefile.inc new file mode 100644 index 00000000000..72871343e83 --- /dev/null +++ b/src/backend/bootstrap/Makefile.inc @@ -0,0 +1,63 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the bootstrap module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/bootstrap/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ +# +# +# Another kinda weird Makefile.inc cause we need two +# scanner/parsers in the backend and most yaccs and lexs +# don't have the prefix option. +# +# sed files are HACK CITY! - redo... +# +#------------------------------------------------------------------------- + +bootdir= $(CURDIR)/bootstrap +VPATH:= $(VPATH):$(bootdir) + +#BOOTYACCS= bootstrap_tokens.h bootparse.c +BOOTYACCS= bootparse.c + +SRCS_BOOTSTRAP= bootparse.c bootscanner.c bootstrap.c + +$(BOOTYACCS): bootparse.y + cd $(objdir); \ + $(YACC) $(YFLAGS) $<; \ + sed -f $(bootdir)/boot.sed < y.tab.c > bootparse.c; \ + mv y.tab.h bootstrap_tokens.h; \ + rm -f y.tab.c + +$(objdir)/bootparse.o: bootparse.c + $(cc_inobjdir) + + +bootscanner.c: bootscanner.l + cd $(objdir); \ + $(LEX) $<; \ + sed -f $(bootdir)/boot.sed < lex.yy.c > bootscanner.c; \ + rm -f lex.yy.c + +$(objdir)/bootscanner.o: bootscanner.c + $(cc_inobjdir) + + + +# +# The following insures that y.tab.h gets made as bootstrap.c +# includes it +# +bootstrap.o: $(BOOTYACCS) + +POSTGRES_DEPEND+= $(BOOTYACCS) bootscanner.c + + +CLEANFILES+= bootscanner.c $(BOOTYACCS) y.tab.h y.output + +HEADERS+= bootstrap.h + diff --git a/src/backend/bootstrap/boot.sed b/src/backend/bootstrap/boot.sed new file mode 100644 index 00000000000..8ec71025cea --- /dev/null +++ b/src/backend/bootstrap/boot.sed @@ -0,0 +1,9 @@ +# +# lex.sed - sed rules to remove conflicts between the +# bootstrap backend interface LEX scanner and the +# normal backend SQL LEX scanner +# +# $Header: /cvsroot/pgsql/src/backend/bootstrap/Attic/boot.sed,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ +# +s/^yy/Int_yy/g +s/\([^a-zA-Z0-9_]\)yy/\1Int_yy/g diff --git a/src/backend/bootstrap/bootparse.y b/src/backend/bootstrap/bootparse.y new file mode 100644 index 00000000000..0362b302b16 --- /dev/null +++ b/src/backend/bootstrap/bootparse.y @@ -0,0 +1,293 @@ +%{ +/*------------------------------------------------------------------------- + * + * backendparse.y-- + * yacc parser grammer for the "backend" initialization program. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootparse.y,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "access/heapam.h" +#include "access/tupdesc.h" +#include "bootstrap/bootstrap.h" +#include "utils/portal.h" +#include "storage/smgr.h" +#include "nodes/pg_list.h" +#include "catalog/catalog.h" +#include "catalog/catname.h" +#include "catalog/heap.h" +#include "catalog/index.h" +#include "commands/rename.h" +#include "commands/defrem.h" +#include "access/transam.h" +#include "access/xact.h" + +#define DO_START { StartTransactionCommand();\ + } + +#define DO_END { CommitTransactionCommand();\ + if (!Quiet) { EMITPROMPT; }\ + fflush(stdout); \ + } + +int num_tuples_read = 0; +static Oid objectid; + +%} + +%union { + List *list; + IndexElem *ielem; + char *str; + int ival; +} + +%type arg_list +%type index_params index_on +%type const ident +%type optbootstrap optoideq tuple tuplelist + +%token CONST ID +%token OPEN XCLOSE XCREATE INSERT_TUPLE +%token STRING XDEFINE +%token XDECLARE INDEX ON USING XBUILD INDICES +%token COMMA EQUALS LPAREN RPAREN +%token OBJ_ID XBOOTSTRAP NULLVAL +%start TopLevel + +%nonassoc low +%nonassoc high + +%% + +TopLevel: + Queries + | + ; + +Queries: + Query + | Queries Query + ; + +Query : + OpenStmt + | CloseStmt + | CreateStmt + | InsertStmt + | DeclareIndexStmt + | BuildIndsStmt + ; + +OpenStmt: + OPEN ident + { + DO_START; + boot_openrel(LexIDStr($2)); + DO_END; + } + ; + +CloseStmt: + XCLOSE ident %prec low + { + DO_START; + closerel(LexIDStr($2)); + DO_END; + } + | XCLOSE %prec high + { + DO_START; + closerel(NULL); + DO_END; + } + ; + +CreateStmt: + XCREATE optbootstrap ident LPAREN + { + DO_START; + numattr=(int)0; + } + typelist + { + if (!Quiet) putchar('\n'); + DO_END; + } + RPAREN + { + DO_START; + + if ($2) { + extern Relation reldesc; + TupleDesc tupdesc; + + if (reldesc) { + puts("create bootstrap: Warning, open relation"); + puts("exists, closing first"); + closerel(NULL); + } + if (DebugMode) + puts("creating bootstrap relation"); + tupdesc = CreateTupleDesc(numattr,attrtypes); + reldesc = heap_creatr(LexIDStr($3), + DEFAULT_SMGR, + tupdesc); + if (DebugMode) + puts("bootstrap relation created ok"); + } else { + Oid id; + TupleDesc tupdesc; + /* extern Oid heap_create();*/ + + tupdesc = CreateTupleDesc(numattr,attrtypes); + id = heap_create(LexIDStr($3), + NULL, + 'n', + DEFAULT_SMGR, + tupdesc); + if (!Quiet) + printf("CREATED relation %s with OID %d\n", + LexIDStr($3), id); + } + DO_END; + if (DebugMode) + puts("Commit End"); + } + ; + +InsertStmt: + INSERT_TUPLE optoideq + { + DO_START; + if (DebugMode) + printf("tuple %d<", $2); + num_tuples_read = 0; + } + LPAREN tuplelist RPAREN + { + if (num_tuples_read != numattr) + elog(WARN,"incorrect number of values for tuple"); + if (reldesc == (Relation)NULL) { + elog(WARN,"must OPEN RELATION before INSERT\n"); + err(); + } + if (DebugMode) + puts("Insert Begin"); + objectid = $2; + InsertOneTuple(objectid); + if (DebugMode) + puts("Insert End"); + if (!Quiet) { putchar('\n'); } + DO_END; + if (DebugMode) + puts("Transaction End"); + } + ; + +DeclareIndexStmt: + XDECLARE INDEX ident ON ident USING ident LPAREN index_params RPAREN + { + List *params; + + DO_START; + + params = lappend(NIL, (List*)$9); + DefineIndex(LexIDStr($5), + LexIDStr($3), + LexIDStr($7), + params, NIL, 0, NIL); + DO_END; + } + ; + +BuildIndsStmt: + XBUILD INDICES { build_indices(); } + +index_params: + index_on ident + { + IndexElem *n = (IndexElem*)$1; + n->class = LexIDStr($2); + $$ = n; + } + +index_on: + ident + { + IndexElem *n = makeNode(IndexElem); + n->name = LexIDStr($1); + $$ = n; + } + | ident LPAREN arg_list RPAREN + { + IndexElem *n = makeNode(IndexElem); + n->name = LexIDStr($1); + n->args = (List*)$3; + $$ = n; + } + +arg_list: + ident + { + $$ = lappend(NIL, makeString(LexIDStr($1))); + } + | arg_list COMMA ident + { + $$ = lappend((List*)$1, makeString(LexIDStr($3))); + } + +optbootstrap: + XBOOTSTRAP { $$ = 1; } + | { $$ = 0; } + ; + +typelist: + typething + | typelist COMMA typething + ; + +typething: + ident EQUALS ident + { + if(++numattr > MAXATTR) + elog(FATAL,"Too many attributes\n"); + DefineAttr(LexIDStr($1),LexIDStr($3),numattr-1); + if (DebugMode) + printf("\n"); + } + ; + +optoideq: + OBJ_ID EQUALS ident { $$ = atol(LexIDStr($3)); } + | { extern Oid newoid(); $$ = newoid(); } + ; + +tuplelist: + tuple + | tuplelist tuple + | tuplelist COMMA tuple + ; + +tuple: + ident {InsertOneValue(objectid, LexIDStr($1), num_tuples_read++); } + | const {InsertOneValue(objectid, LexIDStr($1), num_tuples_read++); } + | NULLVAL + { InsertOneNull(num_tuples_read++); } + ; + +const : + CONST { $$=yylval.ival; } + ; + +ident : + ID { $$=yylval.ival; } + ; +%% + + diff --git a/src/backend/bootstrap/bootscanner.l b/src/backend/bootstrap/bootscanner.l new file mode 100644 index 00000000000..9dbd92cb93a --- /dev/null +++ b/src/backend/bootstrap/bootscanner.l @@ -0,0 +1,108 @@ +%{ +/*------------------------------------------------------------------------- + * + * bootscanner.lex-- + * a lexical scanner for the bootstrap parser + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootscanner.l,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "bootstrap/bootstrap.h" +#include "utils/portal.h" +#include "access/xact.h" +#include "parser/scansup.h" + +#include "bootstrap_tokens.h" + +/* some versions of lex define this as a macro */ +#if defined(yywrap) +#undef yywrap +#endif /* yywrap */ + +YYSTYPE yylval; +int yyline; /* keep track of the line number for error reporting */ + +%} + +D [0-9] +oct \\{D}{D}{D} +Exp [Ee][-+]?{D}+ +id ([A-Za-z0-9_]|{oct}|\-)+ +sid \"([^\"])*\" +arrayid [A-Za-z0-9_]+\[{D}*\] + +%% + +open { return(OPEN); } + +close { return(XCLOSE); } + +create { return(XCREATE); } + +OID { return(OBJ_ID); } +bootstrap { return(XBOOTSTRAP); } +_null_ { return(NULLVAL); } + +insert { return(INSERT_TUPLE); } + +"," { return(COMMA); } +"=" { return(EQUALS); } +"(" { return(LPAREN); } +")" { return(RPAREN); } + +[\n] { yyline++; } +[\t] ; +" " ; + +^\#[^\n]* ; /* drop everything after "#" for comments */ + + +"declare" { return(XDECLARE); } +"build" { return(XBUILD); } +"indices" { return(INDICES); } +"index" { return(INDEX); } +"on" { return(ON); } +"using" { return(USING); } +{arrayid} { + yylval.ival = EnterString(MapArrayTypeName((char*)yytext)); + return(ID); + } +{id} { + yylval.ival = EnterString(scanstr((char*)yytext)); + return(ID); + } +{sid} { + yylval.ival = EnterString(scanstr((char*)yytext)); + return(ID); + } + +(-)?{D}+"."{D}*({Exp})? | +(-)?{D}*"."{D}+({Exp})? | +(-)?{D}+{Exp} { + yylval.ival = EnterString((char*)yytext); + return(CONST); + } + +. { + printf("syntax error %d : -> %s\n", yyline, yytext); + } + + + +%% + +yywrap() +{ + return 1; +} + +yyerror(str) + char *str; +{ + fprintf(stderr,"\tsyntax error %d : %s",yyline, str); +} diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c new file mode 100644 index 00000000000..e2df755109c --- /dev/null +++ b/src/backend/bootstrap/bootstrap.c @@ -0,0 +1,1049 @@ +/*------------------------------------------------------------------------- + * + * bootstrap.c-- + * routines to support running postgres in 'bootstrap' mode + * bootstrap mode is used to create the initial template database + * + * Copyright (c) 1994, Regents of the University of California + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "libpq/pqsignal.h" /* substitute for */ +#if defined(PORTNAME_linux) +#ifndef __USE_POSIX +#define __USE_POSIX +#endif +#endif /* defined(PORTNAME_linux) */ +#include + +#define BOOTSTRAP_INCLUDE /* mask out stuff in tcop/tcopprot.h */ + +#include "bootstrap/bootstrap.h" +#include "postgres.h" +#include "miscadmin.h" +#include "tcop/tcopprot.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/tupdesc.h" +#include "utils/builtins.h" +#include "utils/rel.h" +#include "utils/tqual.h" +#include "utils/lsyscache.h" +#include "access/xact.h" +#include "utils/exc.h" /* for ExcAbort and */ +#include "fmgr.h" +#include "utils/palloc.h" +#include "utils/mcxt.h" +#include "storage/smgr.h" +#include "commands/defrem.h" + +#include "catalog/pg_type.h" +#include "catalog/catname.h" +#include "catalog/indexing.h" +#include "catalog/index.h" + +#define ALLOC(t, c) (t *)calloc((unsigned)(c), sizeof(t)) +#define FIRST_TYPE_OID 16 /* OID of the first type */ + +/* ---------------- + * global variables + * ---------------- + */ +/* + * In the lexical analyzer, we need to get the reference number quickly from + * the string, and the string from the reference number. Thus we have + * as our data structure a hash table, where the hashing key taken from + * the particular string. The hash table is chained. One of the fields + * of the hash table node is an index into the array of character pointers. + * The unique index number that every string is assigned is simply the + * position of its string pointer in the array of string pointers. + */ + +#define STRTABLESIZE 10000 +#define HASHTABLESIZE 503 + +/* Hash function numbers */ +#define NUM 23 +#define NUMSQR 529 +#define NUMCUBE 12167 + +char *strtable [STRTABLESIZE]; +hashnode *hashtable [HASHTABLESIZE]; + +static int strtable_end = -1; /* Tells us last occupied string space */ + +/*- + * Basic information associated with each type. This is used before + * pg_type is created. + * + * XXX several of these input/output functions do catalog scans + * (e.g., F_REGPROCIN scans pg_proc). this obviously creates some + * order dependencies in the catalog creation process. + */ +struct typinfo { + char name[NAMEDATALEN]; + Oid oid; + Oid elem; + int16 len; + Oid inproc; + Oid outproc; +}; + +static struct typinfo Procid[] = { + { "bool", 16, 0, 1, F_BOOLIN, F_BOOLOUT }, + { "bytea", 17, 0, -1, F_BYTEAIN, F_BYTEAOUT }, + { "char", 18, 0, 1, F_CHARIN, F_CHAROUT }, + { "name", 19, 0, NAMEDATALEN, F_NAMEIN, F_NAMEOUT }, + { "char16", 20, 0, 16, F_CHAR16IN, F_CHAR16OUT}, +/* { "dt", 20, 0, 4, F_DTIN, F_DTOUT}, */ + { "int2", 21, 0, 2, F_INT2IN, F_INT2OUT }, + { "int28", 22, 0, 16, F_INT28IN, F_INT28OUT }, + { "int4", 23, 0, 4, F_INT4IN, F_INT4OUT }, + { "regproc", 24, 0, 4, F_REGPROCIN, F_REGPROCOUT }, + { "text", 25, 0, -1, F_TEXTIN, F_TEXTOUT }, + { "oid", 26, 0, 4, F_INT4IN, F_INT4OUT }, + { "tid", 27, 0, 6, F_TIDIN, F_TIDOUT }, + { "xid", 28, 0, 5, F_XIDIN, F_XIDOUT }, + { "iid", 29, 0, 1, F_CIDIN, F_CIDOUT }, + { "oid8", 30, 0, 32, F_OID8IN, F_OID8OUT }, + { "smgr", 210, 0, 2, F_SMGRIN, F_SMGROUT }, + { "_int4", 1007, 23, -1, F_ARRAY_IN, F_ARRAY_OUT }, + { "_aclitem", 1034, 1033, -1, F_ARRAY_IN, F_ARRAY_OUT } +}; + +static int n_types = sizeof(Procid) / sizeof(struct typinfo); + +struct typmap { /* a hack */ + Oid am_oid; + TypeTupleFormData am_typ; +}; + +static struct typmap **Typ = (struct typmap **)NULL; +static struct typmap *Ap = (struct typmap *)NULL; + +static int Warnings = 0; +static char Blanks[MAXATTR]; + +Relation reldesc; /* current relation descriptor */ +static char *relname; /* current relation name */ + +AttributeTupleForm attrtypes[MAXATTR]; /* points to attribute info */ +static char *values[MAXATTR]; /* cooresponding attribute values */ +int numattr; /* number of attributes for cur. rel */ + +#if defined(WIN32) || defined(PORTNAME_next) +static jmp_buf Warn_restart; +#define sigsetjmp(x,y) setjmp(x) +#define siglongjmp longjmp +#else +static sigjmp_buf Warn_restart; +#endif + +int DebugMode; +static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem context */ + +extern int optind; +extern char *optarg; + +/* + * At bootstrap time, we first declare all the indices to be built, and + * then build them. The IndexList structure stores enough information + * to allow us to build the indices after they've been declared. + */ + +typedef struct _IndexList { + char* il_heap; + char* il_ind; + int il_natts; + AttrNumber *il_attnos; + uint16 il_nparams; + Datum * il_params; + FuncIndexInfo *il_finfo; + PredInfo *il_predInfo; + struct _IndexList *il_next; +} IndexList; + +static IndexList *ILHead = (IndexList *) NULL; + +typedef void (*sig_func)(); + + + +/* ---------------------------------------------------------------- + * misc functions + * ---------------------------------------------------------------- + */ + +/* ---------------- + * error handling / abort routines + * ---------------- + */ +#if !defined(PORTNAME_bsdi) +void err() +{ + Warnings++; + cleanup(); +} +#endif + +/* usage: + usage help for the bootstrap backen +*/ +static void +usage() +{ + fprintf(stderr,"Usage: postgres -boot [-d] [-C] [-O] [-Q] [-P portno] [dbName]\n"); + fprintf(stderr," d: debug mode\n"); + fprintf(stderr," C: disable version checking\n"); + fprintf(stderr," O: set BootstrapProcessing mode\n"); + fprintf(stderr," P portno: specify port number\n"); + + exitpg(1); +} + +/* ---------------------------------------------------------------- + * BootstrapMain + * the main loop for handling the backend in bootstrap mode + * the bootstrap mode is used to initialize the template database + * the bootstrap backend doesn't speak SQL, but instead expects + * commands in a special bootstrap language. + * they are a special bootstrap language. + * + * the arguments passed in to BootstrapMain are the run-time arguments + * without the argument '-boot', the caller is required to have + * removed -boot from the run-time args + * ---------------------------------------------------------------- + */ +int +BootstrapMain(int argc, char *argv[]) +{ + int i; + int portFd = -1; + char *dbName; + int flag; + int override = 1; /* use BootstrapProcessing or InitProcessing mode */ + + extern int optind; + extern char *optarg; + + /* ---------------- + * initialize signal handlers + * ---------------- + */ + signal(SIGINT, (sig_func) die); +#ifndef WIN32 + signal(SIGHUP, (sig_func) die); + signal(SIGTERM, (sig_func) die); +#endif /* WIN32 */ + + /* -------------------- + * initialize globals + * ------------------- + */ + + InitGlobals(); + + /* ---------------- + * process command arguments + * ---------------- + */ + Quiet = 0; + Noversion = 0; + dbName = NULL; + + while ((flag = getopt(argc, argv, "dCOQP")) != EOF) { + switch (flag) { + case 'd': + DebugMode = 1; /* print out debuggin info while parsing */ + break; + case 'C': + Noversion = 1; + break; + case 'O': + override = true; + break; + case 'Q': + Quiet = 1; + break; + case 'P':/* specify port */ + portFd = atoi(optarg); + break; + default: + usage(); + break; + } + } /* while */ + + if (argc - optind > 1) { + usage(); + } else + if (argc - optind == 1) { + dbName = argv[optind]; + } + + if (dbName == NULL) { + dbName = getenv("USER"); + if (dbName == NULL) { + fputs("bootstrap backend: failed, no db name specified\n", stderr); + fputs(" and no USER enviroment variable\n", stderr); + exitpg(1); + } + } + + /* ---------------- + * initialize input fd + * ---------------- + */ + if (IsUnderPostmaster == true && portFd < 0) { + fputs("backend: failed, no -P option with -postmaster opt.\n", stderr); + exitpg(1); + } + +#ifdef WIN32 + _nt_init(); + _nt_attach(); +#endif /* WIN32 */ + + + /* ---------------- + * backend initialization + * ---------------- + */ + SetProcessingMode((override) ? BootstrapProcessing : InitProcessing); + InitPostgres(dbName); + LockDisable(true); + + for (i = 0 ; i < MAXATTR; i++) { + attrtypes[i]=(AttributeTupleForm )NULL; + Blanks[i] = ' '; + } + for(i = 0; i < STRTABLESIZE; ++i) + strtable[i] = NULL; + for(i = 0; i < HASHTABLESIZE; ++i) + hashtable[i] = NULL; + + /* ---------------- + * abort processing resumes here - What to do in WIN32? + * ---------------- + */ +#ifndef WIN32 + signal(SIGHUP, handle_warn); + + if (sigsetjmp(Warn_restart, 1) != 0) { +#else + if (setjmp(Warn_restart) != 0) { +#endif /* WIN32 */ + Warnings++; + AbortCurrentTransaction(); + } + + /* ---------------- + * process input. + * ---------------- + */ + + /* the sed script boot.sed renamed yyparse to Int_yyparse + for the bootstrap parser to avoid conflicts with the normal SQL + parser */ + Int_yyparse(); + + /* clean up processing */ + StartTransactionCommand(); + cleanup(); + + /* not reached, here to make compiler happy */ + return 0; + +} + +/* ---------------------------------------------------------------- + * MANUAL BACKEND INTERACTIVE INTERFACE COMMANDS + * ---------------------------------------------------------------- + */ + +/* ---------------- + * boot_openrel + * ---------------- + */ +void +boot_openrel(char *relname) +{ + int i; + struct typmap **app; + Relation rdesc; + HeapScanDesc sdesc; + HeapTuple tup; + + if (strlen(relname) > 15) + relname[15] ='\000'; + + if (Typ == (struct typmap **)NULL) { + StartPortalAllocMode(DefaultAllocMode, 0); + rdesc = heap_openr(TypeRelationName); + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey)NULL); + for (i=0; PointerIsValid(tup=heap_getnext(sdesc,0,(Buffer *)NULL)); ++i); + heap_endscan(sdesc); + app = Typ = ALLOC(struct typmap *, i + 1); + while (i-- > 0) + *app++ = ALLOC(struct typmap, 1); + *app = (struct typmap *)NULL; + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey)NULL); + app = Typ; + while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *)NULL))) { + (*app)->am_oid = tup->t_oid; + memmove((char *)&(*app++)->am_typ, + (char *)GETSTRUCT(tup), + sizeof ((*app)->am_typ)); + } + heap_endscan(sdesc); + heap_close(rdesc); + EndPortalAllocMode(); + } + + if (reldesc != NULL) { + closerel(NULL); + } + + if (!Quiet) + printf("Amopen: relation %s. attrsize %d\n", relname, + ATTRIBUTE_TUPLE_SIZE); + + reldesc = heap_openr(relname); + Assert(reldesc); + numattr = reldesc->rd_rel->relnatts; + for (i = 0; i < numattr; i++) { + if (attrtypes[i] == NULL) { + attrtypes[i] = AllocateAttribute(); + } + memmove((char *)attrtypes[i], + (char *)reldesc->rd_att->attrs[i], + ATTRIBUTE_TUPLE_SIZE); + + /* Some old pg_attribute tuples might not have attisset. */ + /* If the attname is attisset, don't look for it - it may + not be defined yet. + */ + if (namestrcmp(&attrtypes[i]->attname, "attisset") == 0) + attrtypes[i]->attisset = get_attisset(reldesc->rd_id, + attrtypes[i]->attname.data); + else + attrtypes[i]->attisset = false; + + if (DebugMode) { + AttributeTupleForm at = attrtypes[i]; + printf("create attribute %d name %.*s len %d num %d type %d\n", + i, NAMEDATALEN, at->attname.data, at->attlen, at->attnum, + at->atttypid + ); + fflush(stdout); + } + } +} + +/* ---------------- + * closerel + * ---------------- + */ +void +closerel(char *name) +{ + if (name) { + if (reldesc) { + if (namestrcmp(RelationGetRelationName(reldesc), name) != 0) + elog(WARN,"closerel: close of '%s' when '%s' was expected", + name, relname); + } else + elog(WARN,"closerel: close of '%s' before any relation was opened", + name); + + } + + if (reldesc == NULL) { + elog(WARN,"Warning: no opened relation to close.\n"); + } else { + if (!Quiet) printf("Amclose: relation %s.\n", relname); + heap_close(reldesc); + reldesc = (Relation)NULL; + } +} + + +/* ---------------- + * DEFINEATTR() + * + * define a pair + * if there are n fields in a relation to be created, this routine + * will be called n times + * ---------------- + */ +void +DefineAttr(char *name, char *type, int attnum) +{ + int attlen; + int t; + + if (reldesc != NULL) { + fputs("Warning: no open relations allowed with 't' command.\n",stderr); + closerel(relname); + } + + t = gettype(type); + if (attrtypes[attnum] == (AttributeTupleForm )NULL) + attrtypes[attnum] = AllocateAttribute(); + if (Typ != (struct typmap **)NULL) { + attrtypes[attnum]->atttypid = Ap->am_oid; + namestrcpy(&attrtypes[attnum]->attname, name); + if (!Quiet) printf("<%.*s %s> ", NAMEDATALEN, + attrtypes[attnum]->attname.data, type); + attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */ + attlen = attrtypes[attnum]->attlen = Ap->am_typ.typlen; + attrtypes[attnum]->attbyval = Ap->am_typ.typbyval; + } else { + attrtypes[attnum]->atttypid = Procid[t].oid; + namestrcpy(&attrtypes[attnum]->attname,name); + if (!Quiet) printf("<%.*s %s> ", NAMEDATALEN, + attrtypes[attnum]->attname.data, type); + attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */ + attlen = attrtypes[attnum]->attlen = Procid[t].len; + attrtypes[attnum]->attbyval = (attlen==1) || (attlen==2)||(attlen==4); + } +} + + +/* ---------------- + * InsertOneTuple + * assumes that 'oid' will not be zero. + * ---------------- + */ +void +InsertOneTuple(Oid objectid) +{ + HeapTuple tuple; + TupleDesc tupDesc; + + int i; + + if (DebugMode) { + printf("InsertOneTuple oid %d, %d attrs\n", objectid, numattr); + fflush(stdout); + } + + tupDesc = CreateTupleDesc(numattr,attrtypes); + tuple = heap_formtuple(tupDesc,(Datum*)values,Blanks); + pfree(tupDesc); /* just free's tupDesc, not the attrtypes */ + + if(objectid !=(Oid)0) { + tuple->t_oid=objectid; + } + heap_insert(reldesc, tuple); + pfree(tuple); + if (DebugMode) { + printf("End InsertOneTuple, objectid=%d\n", objectid); + fflush(stdout); + } + /* + * Reset blanks for next tuple + */ + for (i = 0; i= MAXATTR) { + printf("i out of range: %d\n", i); + Assert(0); + } + + if (Typ != (struct typmap **)NULL) { + struct typmap *ap; + if (DebugMode) + puts("Typ != NULL"); + app = Typ; + while (*app && (*app)->am_oid != reldesc->rd_att->attrs[i]->atttypid) + ++app; + ap = *app; + if (ap == NULL) { + printf("Unable to find atttypid in Typ list! %d\n", + reldesc->rd_att->attrs[i]->atttypid + ); + Assert(0); + } + values[i] = fmgr(ap->am_typ.typinput, + value, + ap->am_typ.typelem, + -1); /* shouldn't have char() or varchar() types + during boostrapping but just to be safe */ + prt = fmgr(ap->am_typ.typoutput, values[i], + ap->am_typ.typelem); + if (!Quiet) printf("%s ", prt); + pfree(prt); + } else { + typeindex = attrtypes[i]->atttypid - FIRST_TYPE_OID; + if (DebugMode) + printf("Typ == NULL, typeindex = %d idx = %d\n", typeindex, i); + values[i] = fmgr(Procid[typeindex].inproc, value, + Procid[typeindex].elem, -1); + prt = fmgr(Procid[typeindex].outproc, values[i], + Procid[typeindex].elem); + if (!Quiet) printf("%s ", prt); + pfree(prt); + } + if (DebugMode) { + puts("End InsertValue"); + fflush(stdout); + } +} + +/* ---------------- + * InsertOneNull + * ---------------- + */ +void +InsertOneNull(int i) +{ + if (DebugMode) + printf("Inserting null\n"); + if (i < 0 || i >= MAXATTR) { + elog(FATAL, "i out of range (too many attrs): %d\n", i); + } + values[i] = (char *)NULL; + Blanks[i] = 'n'; +} + +#define MORE_THAN_THE_NUMBER_OF_CATALOGS 256 + +bool +BootstrapAlreadySeen(Oid id) +{ + static Oid seenArray[MORE_THAN_THE_NUMBER_OF_CATALOGS]; + static int nseen = 0; + bool seenthis; + int i; + + seenthis = false; + + for (i=0; i < nseen; i++) { + if (seenArray[i] == id) { + seenthis = true; + break; + } + } + if (!seenthis) { + seenArray[nseen] = id; + nseen++; + } + return (seenthis); +} + +/* ---------------- + * cleanup + * ---------------- + */ +void +cleanup() +{ + static int beenhere = 0; + + if (!beenhere) + beenhere = 1; + else { + elog(FATAL,"Memory manager fault: cleanup called twice.\n", stderr); + exitpg(1); + } + if (reldesc != (Relation)NULL) { + heap_close(reldesc); + } + CommitTransactionCommand(); + exitpg(Warnings); +} + +/* ---------------- + * gettype + * ---------------- + */ +int +gettype(char *type) +{ + int i; + Relation rdesc; + HeapScanDesc sdesc; + HeapTuple tup; + struct typmap **app; + + if (Typ != (struct typmap **)NULL) { + for (app = Typ; *app != (struct typmap *)NULL; app++) { + if (strncmp((*app)->am_typ.typname.data, type, NAMEDATALEN) == 0) { + Ap = *app; + return((*app)->am_oid); + } + } + } else { + for (i = 0; i <= n_types; i++) { + if (strncmp(type, Procid[i].name, NAMEDATALEN) == 0) { + return(i); + } + } + if (DebugMode) + printf("bootstrap.c: External Type: %.*s\n", NAMEDATALEN, type); + rdesc = heap_openr(TypeRelationName); + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey)NULL); + i = 0; + while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *)NULL))) + ++i; + heap_endscan(sdesc); + app = Typ = ALLOC(struct typmap *, i + 1); + while (i-- > 0) + *app++ = ALLOC(struct typmap, 1); + *app = (struct typmap *)NULL; + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 0, (ScanKey)NULL); + app = Typ; + while (PointerIsValid(tup = heap_getnext(sdesc, 0, (Buffer *)NULL))) { + (*app)->am_oid = tup->t_oid; + memmove((char *)&(*app++)->am_typ, + (char *)GETSTRUCT(tup), + sizeof ((*app)->am_typ)); + } + heap_endscan(sdesc); + heap_close(rdesc); + return(gettype(type)); + } + elog(WARN, "Error: unknown type '%s'.\n", type); + err(); + /* not reached, here to make compiler happy */ + return 0; +} + +/* ---------------- + * AllocateAttribute + * ---------------- + */ +AttributeTupleForm /* XXX */ +AllocateAttribute() +{ + AttributeTupleForm attribute = + (AttributeTupleForm)malloc(ATTRIBUTE_TUPLE_SIZE); + + if (!PointerIsValid(attribute)) { + elog(FATAL, "AllocateAttribute: malloc failed"); + } + memset(attribute, 0, ATTRIBUTE_TUPLE_SIZE); + + return (attribute); +} + +/* ---------------- + * MapArrayTypeName + * XXX arrays of "basetype" are always "_basetype". + * this is an evil hack inherited from rel. 3.1. + * XXX array dimension is thrown away because we + * don't support fixed-dimension arrays. again, + * sickness from 3.1. + * + * the string passed in must have a '[' character in it + * + * the string returned is a pointer to static storage and should NOT + * be freed by the CALLER. + * ---------------- + */ +char* +MapArrayTypeName(char *s) +{ + int i, j; + static char newStr[NAMEDATALEN]; /* array type names < NAMEDATALEN long */ + + if (s == NULL || s[0] == '\0') + return s; + + j = 1; + newStr[0] = '_'; + for (i=0; istrnum); + } else { + node = AddStr(str, len, 0); + return (node->strnum); + } +} + +/* ---------------- + * LexIDStr + * when given an idnum into the 'string-table' return the string + * associated with the idnum + * ---------------- + */ +char * +LexIDStr(int ident_num) +{ + return(strtable[ident_num]); +} + + +/* ---------------- + * CompHash + * + * Compute a hash function for a given string. We look at the first, + * the last, and the middle character of a string to try to get spread + * the strings out. The function is rather arbitrary, except that we + * are mod'ing by a prime number. + * ---------------- + */ +int +CompHash(char *str, int len) +{ + register int result; + + result =(NUM * str[0] + NUMSQR * str[len-1] + NUMCUBE * str[(len-1)/2]); + + return (result % HASHTABLESIZE); + +} + +/* ---------------- + * FindStr + * + * This routine looks for the specified string in the hash + * table. It returns a pointer to the hash node found, + * or NULL if the string is not in the table. + * ---------------- + */ +hashnode * +FindStr(char *str, int length, hashnode *mderef) +{ + hashnode *node; + node = hashtable [CompHash (str, length)]; + while (node != NULL) { + /* + * We must differentiate between string constants that + * might have the same value as a identifier + * and the identifier itself. + */ + if (!strcmp(str, strtable[node->strnum])) { + return(node); /* no need to check */ + } else { + node = node->next; + } + } + /* Couldn't find it in the list */ + return (NULL); +} + +/* ---------------- + * AddStr + * + * This function adds the specified string, along with its associated + * data, to the hash table and the string table. We return the node + * so that the calling routine can find out the unique id that AddStr + * has assigned to this string. + * ---------------- + */ +hashnode * +AddStr(char *str, int strlength, int mderef) +{ + hashnode *temp, *trail, *newnode; + int hashresult; + int len; + + if (++strtable_end == STRTABLESIZE) { + /* Error, string table overflow, so we Punt */ + elog(FATAL, + "There are too many string constants and identifiers for the compiler to handle."); + + + } + + /* + * Some of the utilites (eg, define type, create relation) assume + * that the string they're passed is a NAMEDATALEN. We get array bound + * read violations from purify if we don't allocate at least NAMEDATALEN + * bytes for strings of this sort. Because we're lazy, we allocate + * at least NAMEDATALEN bytes all the time. + */ + + if ((len = strlength + 1) < NAMEDATALEN) + len = NAMEDATALEN; + + strtable [strtable_end] = malloc((unsigned) len); + strcpy (strtable[strtable_end], str); + + /* Now put a node in the hash table */ + + newnode = (hashnode*)malloc(sizeof(hashnode)*1); + newnode->strnum = strtable_end; + newnode->next = NULL; + + /* Find out where it goes */ + + hashresult = CompHash (str, strlength); + if (hashtable [hashresult] == NULL) { + hashtable [hashresult] = newnode; + } else { /* There is something in the list */ + trail = hashtable [hashresult]; + temp = trail->next; + while (temp != NULL) { + trail = temp; + temp = temp->next; + } + trail->next = newnode; + } + return (newnode); +} + + + +/* + * index_register() -- record an index that has been set up for building + * later. + * + * At bootstrap time, we define a bunch of indices on system catalogs. + * We postpone actually building the indices until just before we're + * finished with initialization, however. This is because more classes + * and indices may be defined, and we want to be sure that all of them + * are present in the index. + */ +void +index_register(char *heap, + char *ind, + int natts, + AttrNumber *attnos, + uint16 nparams, + Datum *params, + FuncIndexInfo *finfo, + PredInfo *predInfo) +{ + Datum *v; + IndexList *newind; + int len; + MemoryContext oldcxt; + + /* + * XXX mao 10/31/92 -- don't gc index reldescs, associated info + * at bootstrap time. we'll declare the indices now, but want to + * create them later. + */ + + if (nogc == (GlobalMemory) NULL) + nogc = CreateGlobalMemory("BootstrapNoGC"); + + oldcxt = MemoryContextSwitchTo((MemoryContext) nogc); + + newind = (IndexList *) palloc(sizeof(IndexList)); + newind->il_heap = pstrdup(heap); + newind->il_ind = pstrdup(ind); + newind->il_natts = natts; + + if (PointerIsValid(finfo)) + len = FIgetnArgs(finfo) * sizeof(AttrNumber); + else + len = natts * sizeof(AttrNumber); + + newind->il_attnos = (AttrNumber *) palloc(len); + memmove(newind->il_attnos, attnos, len); + + if ((newind->il_nparams = nparams) > 0) { + v = newind->il_params = (Datum *) palloc(2 * nparams * sizeof(Datum)); + nparams *= 2; + while (nparams-- > 0) { + *v = (Datum) palloc(strlen((char *)(*params)) + 1); + strcpy((char *) *v++, (char *) *params++); + } + } else { + newind->il_params = (Datum *) NULL; + } + + if (finfo != (FuncIndexInfo *) NULL) { + newind->il_finfo = (FuncIndexInfo *) palloc(sizeof(FuncIndexInfo)); + memmove(newind->il_finfo, finfo, sizeof(FuncIndexInfo)); + } else { + newind->il_finfo = (FuncIndexInfo *) NULL; + } + + if (predInfo != NULL) { + newind->il_predInfo = (PredInfo*)palloc(sizeof(PredInfo)); + newind->il_predInfo->pred = predInfo->pred; + newind->il_predInfo->oldPred = predInfo->oldPred; + } else { + newind->il_predInfo = NULL; + } + + newind->il_next = ILHead; + + ILHead = newind; + + (void) MemoryContextSwitchTo(oldcxt); +} + +void +build_indices() +{ + Relation heap; + Relation ind; + + for ( ; ILHead != (IndexList *) NULL; ILHead = ILHead->il_next) { + heap = heap_openr(ILHead->il_heap); + ind = index_openr(ILHead->il_ind); + index_build(heap, ind, ILHead->il_natts, ILHead->il_attnos, + ILHead->il_nparams, ILHead->il_params, ILHead->il_finfo, + ILHead->il_predInfo); + + /* + * All of the rest of this routine is needed only because in bootstrap + * processing we don't increment xact id's. The normal DefineIndex + * code replaces a pg_class tuple with updated info including the + * relhasindex flag (which we need to have updated). Unfortunately, + * there are always two indices defined on each catalog causing us to + * update the same pg_class tuple twice for each catalog getting an + * index during bootstrap resulting in the ghost tuple problem (see + * heap_replace). To get around this we change the relhasindex + * field ourselves in this routine keeping track of what catalogs we + * already changed so that we don't modify those tuples twice. The + * normal mechanism for updating pg_class is disabled during bootstrap. + * + * -mer + */ + heap = heap_openr(ILHead->il_heap); + + if (!BootstrapAlreadySeen(heap->rd_id)) + UpdateStats(heap->rd_id, 0, true); + } +} + diff --git a/src/backend/bootstrap/bootstrap.h b/src/backend/bootstrap/bootstrap.h new file mode 100644 index 00000000000..8ade7664f1f --- /dev/null +++ b/src/backend/bootstrap/bootstrap.h @@ -0,0 +1,78 @@ +/*------------------------------------------------------------------------- + * + * bootstrap.h-- + * include file for the bootstrapping code + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: bootstrap.h,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef BOOTSTRAP_H +#define BOOTSTRAP_H + +#include +#include +#include +#include +#include + +#include "access/htup.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/tqual.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" /* for BufferManagerFlush */ +#include "utils/portal.h" +#include "utils/elog.h" +#include "utils/rel.h" + +#define MAXATTR 40 /* max. number of attributes in a relation */ + +typedef struct hashnode { + int strnum; /* Index into string table */ + struct hashnode *next; +} hashnode; + +#define EMITPROMPT printf("> ") + +extern Relation reldesc; +extern AttributeTupleForm attrtypes[MAXATTR]; +extern int numattr; +extern int DebugMode; + +extern int BootstrapMain(int ac, char *av[]); +extern void index_register(char *heap, + char *ind, + int natts, + AttrNumber *attnos, + uint16 nparams, + Datum *params, + FuncIndexInfo *finfo, + PredInfo *predInfo); + +extern void err(void); +extern void InsertOneTuple(Oid objectid); +extern void closerel(char *name); +extern void boot_openrel(char *name); +extern char *LexIDStr(int ident_num); + +extern void DefineAttr(char *name, char *type, int attnum); +extern void InsertOneValue(Oid objectid, char *value, int i); +extern void InsertOneNull(int i); +extern bool BootstrapAlreadySeen(Oid id); +extern void cleanup(void); +extern int gettype(char *type); +extern AttributeTupleForm AllocateAttribute(void); +extern char* MapArrayTypeName(char *s); +extern char* CleanUpStr(char *s); +extern int EnterString (char *str); +extern int CompHash (char *str, int len); +extern hashnode *FindStr (char *str, int length, hashnode *mderef); +extern hashnode *AddStr(char *str, int strlength, int mderef); +extern void build_indices(void); + +#endif /* BOOTSTRAP_H */ diff --git a/src/backend/catalog/Makefile.inc b/src/backend/catalog/Makefile.inc new file mode 100644 index 00000000000..b29a0bfad25 --- /dev/null +++ b/src/backend/catalog/Makefile.inc @@ -0,0 +1,69 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the system catalogs module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/catalog/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:14 scrappy Exp $ +# +#------------------------------------------------------------------------- + +catdir=$(CURDIR)/catalog +VPATH:=$(VPATH):$(catdir) + + +SRCS_CATALOG= catalog.c heap.c index.c indexing.c \ + pg_aggregate.c pg_operator.c pg_proc.c pg_type.c + +HEADERS+= catalog.h catname.h heap.h index.h indexing.h pg_aggregate.h \ + pg_am.h pg_amop.h pg_amproc.h pg_attribute.h pg_database.h \ + pg_defaults.h pg_demon.h pg_group.h pg_index.h pg_inheritproc.h \ + pg_inherits.h pg_ipl.h pg_language.h pg_listener.h \ + pg_log.h pg_magic.h pg_opclass.h pg_operator.h pg_parg.h \ + pg_proc.h pg_class.h \ + pg_rewrite.h pg_server.h pg_statistic.h pg_time.h pg_type.h \ + pg_user.h pg_variable.h pg_version.h + +# +# The following is to create the .bki files. +# TODO: sort headers, (figure some automatic way of of determining +# the bki sources?) +# +# XXX - more grot. includes names and uid's in the header file. FIX THIS +# (not sure if i got this right - which do i need - or should i +# burn the whole damned thing) +# +ifdef ALLOW_PG_GROUP +BKIOPTS= -DALLOW_PG_GROUP +endif + +GENBKI= $(catdir)/genbki.sh +BKIFILES= global1.bki local1_template1.bki + +GLOBALBKI_SRCS= pg_database.h pg_demon.h pg_magic.h pg_defaults.h \ + pg_variable.h pg_server.h pg_user.h pg_hosts.h \ + pg_group.h pg_log.h pg_time.h + +LOCALBKI_SRCS= pg_proc.h pg_type.h pg_attribute.h pg_class.h \ + pg_inherits.h pg_index.h pg_version.h pg_statistic.h pg_operator.h \ + pg_opclass.h pg_am.h pg_amop.h pg_amproc.h pg_language.h pg_parg.h \ + pg_aggregate.h pg_ipl.h pg_inheritproc.h \ + pg_rewrite.h pg_listener.h indexing.h + +global1.bki: $(GENBKI) $(GLOBALBKI_SRCS) + sh $(SHOPTS) $(GENBKI) $(BKIOPTS) \ + $(patsubst $(GENBKI),,$^) > $(objdir)/$(@F) + + +local1_template1.bki: $(GENBKI) $(LOCALBKI_SRCS) + sh $(SHOPTS) $(GENBKI) $(BKIOPTS) \ + $(patsubst $(GENBKI),,$^) > $(objdir)/$(@F) + + +#${PROG}: ${BKIFILES} +# + +CLEANFILES+= ${BKIFILES} diff --git a/src/backend/catalog/README b/src/backend/catalog/README new file mode 100644 index 00000000000..5bfc359e382 --- /dev/null +++ b/src/backend/catalog/README @@ -0,0 +1,66 @@ +$Header: /cvsroot/pgsql/src/backend/catalog/README,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + +This directory contains .c files that manipulate the system catalogs +as well as .h files that define the structure of the system catalogs. + +When the compile-time scripts (such as Gen_fmgrtab.sh and genbki.sh) +execute, they grep the DATA statements out of the .h files and munge +these in order to generate the .bki files. The .bki files are then +used as input to initdb (which is just a wrapper around postgres +running single-user in bootstrapping mode) in order to generate the +initial (template) system catalog relation files. + +----------------------------------------------------------------- + +People who are going to hose around with the .h files should be aware +of the following facts: + +- It is very important that the DATA statements be properly formatted +(e.g., no broken lines, proper use of white-space and _null_). The +scripts are line-oriented and break easily. In addition, the only +documentation on the proper format for them is the code in the +bootstrap/ directory. Just be careful when adding new DATA +statements. + +- Some catalogs require that OIDs be preallocated to tuples because +certain catalogs contain circular references. For example, pg_type +contains pointers into pg_proc (pg_type.typinput), and pg_proc +contains back-pointers into pg_type (pg_proc.proargtypes). In these +cases, the references may be explicitly set by use of the "OID =" +clause of the .bki insert statement. If no such pointers are required +to a given tuple, then the OID may be set to the wildcard value 0 +(i.e., the system generates a random OID in the usual way). + +If you need to find a valid OID for a set of tuples that refer to each +other, use the unused_oids script. It generates inclusive ranges of +*unused* OIDs (i.e., the line "45-900" means OIDs 45 through 900 have +not been allocated yet). However, you should not rely 100% on this +script, since it only looks at the .h files in the catalog/ directory. +Do a pg_grepsrc (recursive grep) of the source tree to insure that +there aren't any hidden crocks (i.e., explicit use of a numeric OID) +anywhere in the code. + +----------------------------------------------------------------- + +When munging the .c files, you should be aware of certain conventions: + +- The system catalog cache code (and most catalog-munging code in +general) assumes that the fixed-length portion of all system catalog +tuples are in fact present. That is, only the variable-length +portions of a catalog tuple are assumed to be permitted to be +non-NULL. For example, if you set pg_type.typdelim to be NULL, a +piece of code will likely perform "typetup->typdelim" (or, worse, +"typetyp->typelem", which follows typdelim). This will result in +random errors or even segmentation violations. Hence, do NOT insert +catalog tuples that contain NULL attributes except in their +variable-length portions! + +- Modification of the catalogs must be performed with the proper +updating of catalog indexes! That is, several catalogs have indexes +on them; when you munge them using the executor, the executor will +take care of doing the index updates, but if you make direct access +method calls to insert new or modified tuples into a heap, you must +also make the calls to insert the tuple into ALL of its indexes! If +not, the new tuple will generally be "invisible" to the system because +most of the accesses to the catalogs in question will be through the +associated indexes. diff --git a/src/backend/catalog/catalog.c b/src/backend/catalog/catalog.c new file mode 100644 index 00000000000..25588c0f88b --- /dev/null +++ b/src/backend/catalog/catalog.c @@ -0,0 +1,205 @@ +/*------------------------------------------------------------------------- + * + * catalog.c-- + * + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/catalog.c,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include /* XXX */ +#include "postgres.h" +#include "miscadmin.h" /* for DataDir */ +#include "access/htup.h" +#include "storage/buf.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +#include "utils/syscache.h" +#include "catalog/catname.h" /* NameIs{,Shared}SystemRelationName */ +#include "catalog/pg_attribute.h" +#include "catalog/pg_type.h" +#include "catalog/catalog.h" +#include "storage/bufmgr.h" +#include "access/transam.h" + + +#ifndef MAXPATHLEN +#define MAXPATHLEN 80 +#endif + +/* + * relpath - path to the relation + * Perhaps this should be in-line code in relopen(). + */ +char * +relpath(char relname[]) +{ + char *path; + + if (IsSharedSystemRelationName(relname)) { + path = (char *) palloc(strlen(DataDir) + sizeof(NameData) + 2); + sprintf(path, "%s/%.*s", DataDir, NAMEDATALEN, relname); + return (path); + } + return(relname); +} + +/* + * issystem - returns non-zero iff relname is a system catalog + * + * We now make a new requirement where system catalog relns must begin + * with pg_ while user relns are forbidden to do so. Make the test + * trivial and instantaneous. + * + * XXX this is way bogus. -- pma + */ +bool +issystem(char relname[]) +{ + if (relname[0] && relname[1] && relname[2]) + return (relname[0] == 'p' && + relname[1] == 'g' && + relname[2] == '_'); + else + return FALSE; +} + +/* + * IsSystemRelationName -- + * True iff name is the name of a system catalog relation. + * + * We now make a new requirement where system catalog relns must begin + * with pg_ while user relns are forbidden to do so. Make the test + * trivial and instantaneous. + * + * XXX this is way bogus. -- pma + */ +bool +IsSystemRelationName(char *relname) +{ + if (relname[0] && relname[1] && relname[2]) + return (relname[0] == 'p' && + relname[1] == 'g' && + relname[2] == '_'); + else + return FALSE; +} + +/* + * IsSharedSystemRelationName -- + * True iff name is the name of a shared system catalog relation. + */ +bool +IsSharedSystemRelationName(char *relname) +{ + int i; + + /* + * Quick out: if it's not a system relation, it can't be a shared + * system relation. + */ + if (!IsSystemRelationName(relname)) + return FALSE; + + i = 0; + while ( SharedSystemRelationNames[i] != NULL) { + if (strcmp(SharedSystemRelationNames[i],relname) == 0) + return TRUE; + i++; + } + return FALSE; +} + +/* + * newoid - returns a unique identifier across all catalogs. + * + * Object Id allocation is now done by GetNewObjectID in + * access/transam/varsup.c. oids are now allocated correctly. + * + * old comments: + * This needs to change soon, it fails if there are too many more + * than one call per second when postgres restarts after it dies. + * + * The distribution of OID's should be done by the POSTMASTER. + * Also there needs to be a facility to preallocate OID's. Ie., + * for a block of OID's to be declared as invalid ones to allow + * user programs to use them for temporary object identifiers. + */ +Oid newoid() +{ + Oid lastoid; + + GetNewObjectId(&lastoid); + if (! OidIsValid(lastoid)) + elog(WARN, "newoid: GetNewObjectId returns invalid oid"); + return lastoid; +} + +/* + * fillatt - fills the ATTRIBUTE relation fields from the TYP + * + * Expects that the atttypid domain is set for each att[]. + * Returns with the attnum, and attlen domains set. + * attnum, attproc, atttyparg, ... should be set by the user. + * + * In the future, attnum may not be set?!? or may be passed as an arg?!? + * + * Current implementation is very inefficient--should cashe the + * information if this is at all possible. + * + * Check to see if this is really needed, and especially in the case + * of index tuples. + */ +void +fillatt(TupleDesc tupleDesc) +{ + AttributeTupleForm *attributeP; + register TypeTupleForm typp; + HeapTuple tuple; + int i; + int natts = tupleDesc->natts; + AttributeTupleForm *att = tupleDesc->attrs; + + if (natts < 0 || natts > MaxHeapAttributeNumber) + elog(WARN, "fillatt: %d attributes is too large", natts); + if (natts == 0) { + elog(DEBUG, "fillatt: called with natts == 0"); + return; + } + + attributeP = &att[0]; + + for (i = 0; i < natts;) { + tuple = SearchSysCacheTuple(TYPOID, + Int32GetDatum((*attributeP)->atttypid), + 0,0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "fillatt: unknown atttypid %ld", + (*attributeP)->atttypid); + } else { + (*attributeP)->attnum = (int16) ++i; + /* Check if the attr is a set before messing with the length + and byval, since those were already set in + TupleDescInitEntry. In fact, this seems redundant + here, but who knows what I'll break if I take it out... + + same for char() and varchar() stuff. I share the same + sentiments. This function is poorly written anyway. -ay 6/95 + */ + if (!(*attributeP)->attisset && + (*attributeP)->atttypid!=BPCHAROID && + (*attributeP)->atttypid!=VARCHAROID) { + + typp = (TypeTupleForm) GETSTRUCT(tuple); /* XXX */ + (*attributeP)->attlen = typp->typlen; + (*attributeP)->attbyval = typp->typbyval; + } + } + attributeP += 1; + } +} diff --git a/src/backend/catalog/catalog.h b/src/backend/catalog/catalog.h new file mode 100644 index 00000000000..9a54e833b11 --- /dev/null +++ b/src/backend/catalog/catalog.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * catalog.h-- + * prototypes for functions in lib/catalog/catalog.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: catalog.h,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef CATALOG_H +#define CATALOG_H + +#include "access/tupdesc.h" + +extern char *relpath(char relname[]); +extern bool IsSystemRelationName(char *relname); +extern bool IsSharedSystemRelationName(char *relname); +extern Oid newoid(void); +extern void fillatt(TupleDesc att); + +#endif /* CATALOG_H */ diff --git a/src/backend/catalog/catname.h b/src/backend/catalog/catname.h new file mode 100644 index 00000000000..8d965419493 --- /dev/null +++ b/src/backend/catalog/catname.h @@ -0,0 +1,52 @@ +/*------------------------------------------------------------------------- + * + * catname.h-- + * POSTGRES system catalog relation name definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: catname.h,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef CATNAME_H +#define CATNAME_H + +#include "postgres.h" + + +#define AggregateRelationName "pg_aggregate" +#define AccessMethodRelationName "pg_am" +#define AccessMethodOperatorRelationName "pg_amop" +#define AccessMethodProcedureRelationName "pg_amproc" +#define AttributeRelationName "pg_attribute" +#define DatabaseRelationName "pg_database" +#define DefaultsRelationName "pg_defaults" +#define DemonRelationName "pg_demon" +#define GroupRelationName "pg_group" +#define HostsRelationName "pg_hosts" +#define IndexRelationName "pg_index" +#define InheritProcedureRelationName "pg_inheritproc" +#define InheritsRelationName "pg_inherits" +#define InheritancePrecidenceListRelationName "pg_ipl" +#define LanguageRelationName "pg_language" +#define ListenerRelationName "pg_listener" +#define LogRelationName "pg_log" +#define MagicRelationName "pg_magic" +#define OperatorClassRelationName "pg_opclass" +#define OperatorRelationName "pg_operator" +#define ProcedureRelationName "pg_proc" +#define RelationRelationName "pg_class" +#define RewriteRelationName "pg_rewrite" +#define ServerRelationName "pg_server" +#define StatisticRelationName "pg_statistic" +#define TimeRelationName "pg_time" +#define TypeRelationName "pg_type" +#define UserRelationName "pg_user" +#define VariableRelationName "pg_variable" +#define VersionRelationName "pg_version" + +extern char *SharedSystemRelationNames[]; + +#endif /* CATNAME_H */ diff --git a/src/backend/catalog/genbki.sh b/src/backend/catalog/genbki.sh new file mode 100644 index 00000000000..2f7e4025b83 --- /dev/null +++ b/src/backend/catalog/genbki.sh @@ -0,0 +1,218 @@ +#!/bin/sh +#------------------------------------------------------------------------- +# +# genbki.sh-- +# shell script which generates .bki files from specially formatted .h +# files. These .bki files are used to initialize the postgres template +# database. +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/catalog/Attic/genbki.sh,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ +# +# NOTES +# non-essential whitespace is removed from the generated file. +# if this is ever a problem, then the sed script at the very +# end can be changed into another awk script or something smarter.. +# +#------------------------------------------------------------------------- + +PATH=$PATH:/lib:/usr/ccs/lib # to find cpp +BKIOPTS='' +if [ $? != 0 ] +then + echo `basename $0`: Bad option + exit 1 +fi + +for opt in $* +do + case $opt in + -D) BKIOPTS="$BKIOPTS -D$2"; shift; shift;; + -D*) BKIOPTS="$BKIOPTS $1";shift;; + --) shift; break;; + esac +done + +# ---------------- +# collect nodefiles +# ---------------- +SYSFILES='' +x=1 +numargs=$# +while test $x -le $numargs ; do + SYSFILES="$SYSFILES $1" + x=`expr $x + 1` + shift +done + +# ---------------- +# strip comments and trash from .h before we generate +# the .bki file... +# ---------------- +# also, change Oid to oid. -- AY 8/94. +# also, change NameData to name. -- jolly 8/21/95. +# +cat $SYSFILES | \ +sed -e 's/\/\*.*\*\///g' \ + -e 's/;[ ]*$//g' \ + -e 's/\ Oid/\ oid/g' \ + -e 's/\ NameData/\ name/g' \ + -e 's/(NameData/(name/g' \ + -e 's/(Oid/(oid/g' | \ +awk ' +# ---------------- +# now use awk to process remaining .h file.. +# +# nc is the number of catalogs +# inside is a variable set to 1 when we are scanning the +# contents of a catalog definition. +# inserting_data is a flag indicating when we are processing DATA lines. +# (i.e. have a relation open and need to close it) +# ---------------- +BEGIN { + inside = 0; + raw = 0; + bootstrap = 0; + nc = 0; + reln_open = 0; +} + +# ---------------- +# anything in a BKI_BEGIN .. BKI_END block should be passed +# along without interpretation. +# ---------------- +/^BKI_BEGIN/ { raw = 1; next; } +/^BKI_END/ { raw = 0; next; } +raw == 1 { print; next; } + +# ---------------- +# DATA() statements should get passed right through after +# stripping off the DATA( and the ) on the end. +# ---------------- +/^DATA\(/ { + data = substr($0, 6, length($0) - 6); + print data; + next; +} + +/^DECLARE_INDEX\(/ { +# ---- +# end any prior catalog data insertions before starting a define index +# ---- + if (reln_open == 1) { +# print "show"; + print "close " catalog; + reln_open = 0; + } + + data = substr($0, 15, length($0) - 15); + print "declare index " data +} + +/^BUILD_INDICES/ { print "build indices"; } + +# ---------------- +# CATALOG() definitions take some more work. +# ---------------- +/^CATALOG\(/ { +# ---- +# end any prior catalog data insertions before starting a new one.. +# ---- + if (reln_open == 1) { +# print "show"; + print "close " catalog; + reln_open = 0; + } + +# ---- +# get the name of the new catalog +# ---- + pos = index($1,")"); + catalog = substr($1,9,pos-9); + + if ($0 ~ /BOOTSTRAP/) { + bootstrap = 1; + } + + i = 1; + inside = 1; + nc++; + next; +} + +# ---------------- +# process the contents of the catalog definition +# +# attname[ x ] contains the attribute name for attribute x +# atttype[ x ] contains the attribute type fot attribute x +# ---------------- +inside == 1 { +# ---- +# ignore a leading brace line.. +# ---- + if ($1 ~ /\{/) + next; + +# ---- +# if this is the last line, then output the bki catalog stuff. +# ---- + if ($1 ~ /}/) { + if (bootstrap) { + print "create bootstrap " catalog; + } else { + print "create " catalog; + } + print "\t("; + + for (j=1; j /* for sprintf() */ +#include +#include + +#include "postgres.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/htup.h" +#include "access/istrat.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/tqual.h" /* for NowTimeQual */ +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/itemptr.h" +#include "lib/hasht.h" +#include "miscadmin.h" +#include "fmgr.h" +#include "utils/builtins.h" +#include "utils/elog.h" /* XXX */ +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/relcache.h" + +#include "catalog/catname.h" +#include "catalog/pg_class.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_index.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_ipl.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/index.h" +#include "catalog/indexing.h" + +#include "catalog/catalog.h" +#include "parser/catalog_utils.h" + +#include "storage/lmgr.h" + +#include "rewrite/rewriteRemove.h" + +static void AddNewAttributeTuples(Oid new_rel_oid, TupleDesc tupdesc); +static void CheckAttributeNames(TupleDesc tupdesc); + +/* ---------------------------------------------------------------- + * XXX UGLY HARD CODED BADNESS FOLLOWS XXX + * + * these should all be moved to someplace in the lib/catalog + * module, if not obliterated first. + * ---------------------------------------------------------------- + */ + + +/* + * Note: + * Should the executor special case these attributes in the future? + * Advantage: consume 1/2 the space in the ATTRIBUTE relation. + * Disadvantage: having rules to compute values in these tuples may + * be more difficult if not impossible. + */ + +static FormData_pg_attribute a1 = { + 0xffffffff, {"ctid"}, 27l, 0l, 0l, 0l, sizeof (ItemPointerData), + SelfItemPointerAttributeNumber, 0, '\0', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a2 = { + 0xffffffff, {"oid"}, 26l, 0l, 0l, 0l, sizeof(Oid), + ObjectIdAttributeNumber, 0, '\001', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a3 = { + 0xffffffff, {"xmin"}, 28l, 0l, 0l, 0l, sizeof (TransactionId), + MinTransactionIdAttributeNumber, 0, '\0', '\001', 0l, 'i', +}; + +static FormData_pg_attribute a4 = { + 0xffffffff, {"cmin"}, 29l, 0l, 0l, 0l, sizeof (CommandId), + MinCommandIdAttributeNumber, 0, '\001', '\001', 0l, 's' +}; + +static FormData_pg_attribute a5 = { + 0xffffffff, {"xmax"}, 28l, 0l, 0l, 0l, sizeof (TransactionId), + MaxTransactionIdAttributeNumber, 0, '\0', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a6 = { + 0xffffffff, {"cmax"}, 29l, 0l, 0l, 0l, sizeof (CommandId), + MaxCommandIdAttributeNumber, 0, '\001', '\001', 0l, 's' +}; + +static FormData_pg_attribute a7 = { + 0xffffffff, {"chain"}, 27l, 0l, 0l, 0l, sizeof (ItemPointerData), + ChainItemPointerAttributeNumber, 0, '\0', '\001', 0l, 'i', +}; + +static FormData_pg_attribute a8 = { + 0xffffffff, {"anchor"}, 27l, 0l, 0l, 0l, sizeof (ItemPointerData), + AnchorItemPointerAttributeNumber, 0, '\0', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a9 = { + 0xffffffff, {"tmin"}, 20l, 0l, 0l, 0l, sizeof (AbsoluteTime), + MinAbsoluteTimeAttributeNumber, 0, '\001', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a10 = { + 0xffffffff, {"tmax"}, 20l, 0l, 0l, 0l, sizeof (AbsoluteTime), + MaxAbsoluteTimeAttributeNumber, 0, '\001', '\001', 0l, 'i' +}; + +static FormData_pg_attribute a11 = { + 0xffffffff, {"vtype"}, 18l, 0l, 0l, 0l, sizeof (char), + VersionTypeAttributeNumber, 0, '\001', '\001', 0l, 'c' +}; + +static AttributeTupleForm HeapAtt[] = +{ &a1, &a2, &a3, &a4, &a5, &a6, &a7, &a8, &a9, &a10, &a11 }; + +/* ---------------------------------------------------------------- + * XXX END OF UGLY HARD CODED BADNESS XXX + * ---------------------------------------------------------------- + */ + +/* the tempRelList holds + the list of temporary uncatalogued relations that are created. + these relations should be destroyed at the end of transactions +*/ +typedef struct tempRelList { + Relation *rels; /* array of relation descriptors */ + int num; /* number of temporary relations */ + int size; /* size of space allocated for the rels array */ +} TempRelList; + +#define TEMP_REL_LIST_SIZE 32 + +static TempRelList *tempRels = NULL; + + +/* ---------------------------------------------------------------- + * heap_creatr - Create an uncataloged heap relation + * + * Fields relpages, reltuples, reltuples, relkeys, relhistory, + * relisindexed, and relkind of rdesc->rd_rel are initialized + * to all zeros, as are rd_last and rd_hook. Rd_refcnt is set to 1. + * + * Remove the system relation specific code to elsewhere eventually. + * + * Eventually, must place information about this temporary relation + * into the transaction context block. + * + * + * if heap_creatr is called with "" as the name, then heap_creatr will create a + * temporary name "temp_$RELOID" for the relation + * ---------------------------------------------------------------- + */ +Relation +heap_creatr(char *name, + unsigned smgr, + TupleDesc tupDesc) +{ + register unsigned i; + Oid relid; + Relation rdesc; + int len; + bool nailme = false; + char* relname = name; + char tempname[40]; + int isTemp = 0; + int natts = tupDesc->natts; +/* AttributeTupleForm *att = tupDesc->attrs; */ + + extern GlobalMemory CacheCxt; + MemoryContext oldcxt; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(natts > 0); + + if (IsSystemRelationName(relname) && IsNormalProcessingMode()) + { + elog(WARN, + "Illegal class name: %s -- pg_ is reserved for system catalogs", + relname); + } + + /* ---------------- + * switch to the cache context so that we don't lose + * allocations at the end of this transaction, I guess. + * -cim 6/14/90 + * ---------------- + */ + if (!CacheCxt) + CacheCxt = CreateGlobalMemory("Cache"); + + oldcxt = MemoryContextSwitchTo((MemoryContext)CacheCxt); + + /* ---------------- + * real ugly stuff to assign the proper relid in the relation + * descriptor follows. + * ---------------- + */ + if (! strcmp(RelationRelationName,relname)) + { + relid = RelOid_pg_class; + nailme = true; + } + else if (! strcmp(AttributeRelationName,relname)) + { + relid = RelOid_pg_attribute; + nailme = true; + } + else if (! strcmp(ProcedureRelationName, relname)) + { + relid = RelOid_pg_proc; + nailme = true; + } + else if (! strcmp(TypeRelationName,relname)) + { + relid = RelOid_pg_type; + nailme = true; + } + else + { + relid = newoid(); + + if (name[0] == '\0') + { + sprintf(tempname, "temp_%d", relid); + relname = tempname; + isTemp = 1; + }; + } + + /* ---------------- + * allocate a new relation descriptor. + * + * XXX the length computation may be incorrect, handle elsewhere + * ---------------- + */ + len = sizeof(RelationData); + + rdesc = (Relation) palloc(len); + memset((char *)rdesc, 0,len); + + /* ---------- + create a new tuple descriptor from the one passed in + */ + rdesc->rd_att = CreateTupleDescCopy(tupDesc); + + /* ---------------- + * initialize the fields of our new relation descriptor + * ---------------- + */ + + /* ---------------- + * nail the reldesc if this is a bootstrap create reln and + * we may need it in the cache later on in the bootstrap + * process so we don't ever want it kicked out. e.g. pg_attribute!!! + * ---------------- + */ + if (nailme) + rdesc->rd_isnailed = true; + + RelationSetReferenceCount(rdesc, 1); + + rdesc->rd_rel = (Form_pg_class)palloc(sizeof *rdesc->rd_rel); + + memset((char *)rdesc->rd_rel, 0, + sizeof *rdesc->rd_rel); + namestrcpy(&(rdesc->rd_rel->relname), relname); + rdesc->rd_rel->relkind = RELKIND_UNCATALOGED; + rdesc->rd_rel->relnatts = natts; + rdesc->rd_rel->relsmgr = smgr; + + for (i = 0; i < natts; i++) { + rdesc->rd_att->attrs[i]->attrelid = relid; + } + + rdesc->rd_id = relid; + + if (nailme) { + /* for system relations, set the reltype field here */ + rdesc->rd_rel->reltype = relid; + } + + /* ---------------- + * have the storage manager create the relation. + * ---------------- + */ + + rdesc->rd_fd = (File)smgrcreate(smgr, rdesc); + + RelationRegisterRelation(rdesc); + + MemoryContextSwitchTo(oldcxt); + + /* add all temporary relations to the tempRels list + so they can be properly disposed of at the end of transaction + */ + if (isTemp) + AddToTempRelList(rdesc); + + return (rdesc); +} + + +/* ---------------------------------------------------------------- + * heap_create - Create a cataloged relation + * + * this is done in 6 steps: + * + * 1) CheckAttributeNames() is used to make certain the tuple + * descriptor contains a valid set of attribute names + * + * 2) pg_class is opened and RelationAlreadyExists() + * preforms a scan to ensure that no relation with the + * same name already exists. + * + * 3) heap_creater() is called to create the new relation on + * disk. + * + * 4) TypeDefine() is called to define a new type corresponding + * to the new relation. + * + * 5) AddNewAttributeTuples() is called to register the + * new relation's schema in pg_attribute. + * + * 6) AddPgRelationTuple() is called to register the + * relation itself in the catalogs. + * + * 7) the relations are closed and the new relation's oid + * is returned. + * + * old comments: + * A new relation is inserted into the RELATION relation + * with the specified attribute(s) (newly inserted into + * the ATTRIBUTE relation). How does concurrency control + * work? Is it automatic now? Expects the caller to have + * attname, atttypid, atttyparg, attproc, and attlen domains filled. + * Create fills the attnum domains sequentually from zero, + * fills the attnvals domains with zeros, and fills the + * attrelid fields with the relid. + * + * scan relation catalog for name conflict + * scan type catalog for typids (if not arg) + * create and insert attribute(s) into attribute catalog + * create new relation + * insert new relation into attribute catalog + * + * Should coordinate with heap_creater(). Either it should + * not be called or there should be a way to prevent + * the relation from being removed at the end of the + * transaction if it is successful ('u'/'r' may be enough). + * Also, if the transaction does not commit, then the + * relation should be removed. + * + * XXX amcreate ignores "off" when inserting (for now). + * XXX amcreate (like the other utilities) needs to understand indexes. + * + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * CheckAttributeNames + * + * this is used to make certain the tuple descriptor contains a + * valid set of attribute names. a problem simply generates + * elog(WARN) which aborts the current transaction. + * -------------------------------- + */ +static void +CheckAttributeNames(TupleDesc tupdesc) +{ + unsigned i; + unsigned j; + int natts = tupdesc->natts; + + /* ---------------- + * first check for collision with system attribute names + * ---------------- + * + * also, warn user if attribute to be created has + * an unknown typid (usually as a result of a 'retrieve into' + * - jolly + */ + for (i = 0; i < natts; i += 1) { + for (j = 0; j < sizeof HeapAtt / sizeof HeapAtt[0]; j += 1) { + if (nameeq(&(HeapAtt[j]->attname), + &(tupdesc->attrs[i]->attname))) { + elog(WARN, + "create: system attribute named \"%s\"", + HeapAtt[j]->attname.data); + } + } + if (tupdesc->attrs[i]->atttypid == UNKNOWNOID) + { + elog(NOTICE, + "create: attribute named \"%s\" has an unknown type", + tupdesc->attrs[i]->attname.data); + } + } + + /* ---------------- + * next check for repeated attribute names + * ---------------- + */ + for (i = 1; i < natts; i += 1) { + for (j = 0; j < i; j += 1) { + if (nameeq(&(tupdesc->attrs[j]->attname), + &(tupdesc->attrs[i]->attname))) { + elog(WARN, + "create: repeated attribute \"%s\"", + tupdesc->attrs[j]->attname.data); + } + } + } +} + +/* -------------------------------- + * RelationAlreadyExists + * + * this preforms a scan of pg_class to ensure that + * no relation with the same name already exists. The caller + * has to open pg_class and pass an open descriptor. + * -------------------------------- + */ +int +RelationAlreadyExists(Relation pg_class_desc, char relname[]) +{ + ScanKeyData key; + HeapScanDesc pg_class_scan; + HeapTuple tup; + + /* + * If this is not bootstrap (initdb) time, use the catalog index + * on pg_class. + */ + + if (!IsBootstrapProcessingMode()) { + tup = ClassNameIndexScan(pg_class_desc, relname); + if (HeapTupleIsValid(tup)) { + pfree(tup); + return ((int) true); + } else + return ((int) false); + } + + /* ---------------- + * At bootstrap time, we have to do this the hard way. Form the + * scan key. + * ---------------- + */ + ScanKeyEntryInitialize(&key, + 0, + (AttrNumber)Anum_pg_class_relname, + (RegProcedure)NameEqualRegProcedure, + (Datum) relname); + + /* ---------------- + * begin the scan + * ---------------- + */ + pg_class_scan = heap_beginscan(pg_class_desc, + 0, + NowTimeQual, + 1, + &key); + + /* ---------------- + * get a tuple. if the tuple is NULL then it means we + * didn't find an existing relation. + * ---------------- + */ + tup = heap_getnext(pg_class_scan, 0, (Buffer *)NULL); + + /* ---------------- + * end the scan and return existance of relation. + * ---------------- + */ + heap_endscan(pg_class_scan); + + return + (PointerIsValid(tup) == true); +} + +/* -------------------------------- + * AddNewAttributeTuples + * + * this registers the new relation's schema by adding + * tuples to pg_attribute. + * -------------------------------- + */ +static void +AddNewAttributeTuples(Oid new_rel_oid, + TupleDesc tupdesc) +{ + AttributeTupleForm *dpp; + unsigned i; + HeapTuple tup; + Relation rdesc; + bool hasindex; + Relation idescs[Num_pg_attr_indices]; + int natts = tupdesc->natts; + + /* ---------------- + * open pg_attribute + * ---------------- + */ + rdesc = heap_openr(AttributeRelationName); + + /* ----------------- + * Check if we have any indices defined on pg_attribute. + * ----------------- + */ + Assert(rdesc); + Assert(rdesc->rd_rel); + hasindex = RelationGetRelationTupleForm(rdesc)->relhasindex; + if (hasindex) + CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs); + + /* ---------------- + * initialize tuple descriptor. Note we use setheapoverride() + * so that we can see the effects of our TypeDefine() done + * previously. + * ---------------- + */ + setheapoverride(true); + fillatt(tupdesc); + setheapoverride(false); + + /* ---------------- + * first we add the user attributes.. + * ---------------- + */ + dpp = tupdesc->attrs; + for (i = 0; i < natts; i++) { + (*dpp)->attrelid = new_rel_oid; + (*dpp)->attnvals = 0l; + + tup = heap_addheader(Natts_pg_attribute, + ATTRIBUTE_TUPLE_SIZE, + (char *) *dpp); + + heap_insert(rdesc, tup); + + if (hasindex) + CatalogIndexInsert(idescs, Num_pg_attr_indices, rdesc, tup); + + pfree(tup); + dpp++; + } + + /* ---------------- + * next we add the system attributes.. + * ---------------- + */ + dpp = HeapAtt; + for (i = 0; i < -1 - FirstLowInvalidHeapAttributeNumber; i++) { + (*dpp)->attrelid = new_rel_oid; + /* (*dpp)->attnvals = 0l; unneeded */ + + tup = heap_addheader(Natts_pg_attribute, + ATTRIBUTE_TUPLE_SIZE, + (char *)*dpp); + + heap_insert(rdesc, tup); + + if (hasindex) + CatalogIndexInsert(idescs, Num_pg_attr_indices, rdesc, tup); + + pfree(tup); + dpp++; + } + + heap_close(rdesc); + + /* + * close pg_attribute indices + */ + if (hasindex) + CatalogCloseIndices(Num_pg_attr_indices, idescs); +} + +/* -------------------------------- + * AddPgRelationTuple + * + * this registers the new relation in the catalogs by + * adding a tuple to pg_class. + * -------------------------------- + */ +void +AddPgRelationTuple(Relation pg_class_desc, + Relation new_rel_desc, + Oid new_rel_oid, + int arch, + unsigned natts) +{ + Form_pg_class new_rel_reltup; + HeapTuple tup; + Relation idescs[Num_pg_class_indices]; + bool isBootstrap; + + /* ---------------- + * first we munge some of the information in our + * uncataloged relation's relation descriptor. + * ---------------- + */ + new_rel_reltup = new_rel_desc->rd_rel; + + /* CHECK should get new_rel_oid first via an insert then use XXX */ + /* new_rel_reltup->reltuples = 1; */ /* XXX */ + + new_rel_reltup->relowner = GetUserId(); + new_rel_reltup->relkind = RELKIND_RELATION; + new_rel_reltup->relarch = arch; + new_rel_reltup->relnatts = natts; + + /* ---------------- + * now form a tuple to add to pg_class + * XXX Natts_pg_class_fixed is a hack - see pg_class.h + * ---------------- + */ + tup = heap_addheader(Natts_pg_class_fixed, + CLASS_TUPLE_SIZE, + (char *) new_rel_reltup); + tup->t_oid = new_rel_oid; + + /* ---------------- + * finally insert the new tuple and free it. + * + * Note: I have no idea why we do a + * SetProcessingMode(BootstrapProcessing); + * here -cim 6/14/90 + * ---------------- + */ + isBootstrap = IsBootstrapProcessingMode() ? true : false; + + SetProcessingMode(BootstrapProcessing); + + heap_insert(pg_class_desc, tup); + + if (! isBootstrap) { + /* + * First, open the catalog indices and insert index tuples for + * the new relation. + */ + + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class_desc, tup); + CatalogCloseIndices(Num_pg_class_indices, idescs); + + /* now restore processing mode */ + SetProcessingMode(NormalProcessing); + } + + pfree(tup); +} + + +/* -------------------------------- + * addNewRelationType - + * + * define a complex type corresponding to the new relation + * -------------------------------- + */ +void +addNewRelationType(char *typeName, Oid new_rel_oid) +{ + Oid new_type_oid; + + /* The sizes are set to oid size because it makes implementing sets MUCH + * easier, and no one (we hope) uses these fields to figure out + * how much space to allocate for the type. + * An oid is the type used for a set definition. When a user + * requests a set, what they actually get is the oid of a tuple in + * the pg_proc catalog, so the size of the "set" is the size + * of an oid. + * Similarly, byval being true makes sets much easier, and + * it isn't used by anything else. + * Note the assumption that OIDs are the same size as int4s. + */ + new_type_oid = TypeCreate(typeName, /* type name */ + new_rel_oid, /* relation oid */ + tlen(type("oid")), /* internal size */ + tlen(type("oid")), /* external size */ + 'c', /* type-type (catalog) */ + ',', /* default array delimiter */ + "int4in", /* input procedure */ + "int4out", /* output procedure */ + "int4in", /* send procedure */ + "int4out", /* receive procedure */ + NULL, /* array element type - irrelevent */ + "-", /* default type value */ + (bool) 1, /* passed by value */ + 'i'); /* default alignment */ +} + +/* -------------------------------- + * heap_create + * + * creates a new cataloged relation. see comments above. + * -------------------------------- + */ +Oid +heap_create(char relname[], + char *typename, /* not used currently */ + int arch, + unsigned smgr, + TupleDesc tupdesc) +{ + Relation pg_class_desc; + Relation new_rel_desc; + Oid new_rel_oid; +/* NameData typeNameData; */ + int natts = tupdesc->natts; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertState(IsNormalProcessingMode() || IsBootstrapProcessingMode()); + if (natts == 0 || natts > MaxHeapAttributeNumber) + elog(WARN, "amcreate: from 1 to %d attributes must be specified", + MaxHeapAttributeNumber); + + CheckAttributeNames(tupdesc); + + /* ---------------- + * open pg_class and see that the relation doesn't + * already exist. + * ---------------- + */ + pg_class_desc = heap_openr(RelationRelationName); + + if (RelationAlreadyExists(pg_class_desc, relname)) { + heap_close(pg_class_desc); + elog(WARN, "amcreate: %s relation already exists", relname); + } + + /* ---------------- + * ok, relation does not already exist so now we + * create an uncataloged relation and pull its relation oid + * from the newly formed relation descriptor. + * + * Note: The call to heap_creatr() does all the "real" work + * of creating the disk file for the relation. + * ---------------- + */ + new_rel_desc = heap_creatr(relname, smgr, tupdesc); + new_rel_oid = new_rel_desc->rd_att->attrs[0]->attrelid; + + /* ---------------- + * since defining a relation also defines a complex type, + * we add a new system type corresponding to the new relation. + * ---------------- + */ +/* namestrcpy(&typeNameData, relname);*/ +/* addNewRelationType(&typeNameData, new_rel_oid);*/ + addNewRelationType(relname, new_rel_oid); + + /* ---------------- + * now add tuples to pg_attribute for the attributes in + * our new relation. + * ---------------- + */ + AddNewAttributeTuples(new_rel_oid, tupdesc); + + /* ---------------- + * now update the information in pg_class. + * ---------------- + */ + AddPgRelationTuple(pg_class_desc, + new_rel_desc, + new_rel_oid, + arch, + natts); + + /* ---------------- + * ok, the relation has been cataloged, so close our relations + * and return the oid of the newly created relation. + * + * SOMEDAY: fill the STATISTIC relation properly. + * ---------------- + */ + heap_close(new_rel_desc); + heap_close(pg_class_desc); + + return new_rel_oid; +} + + +/* ---------------------------------------------------------------- + * heap_destroy - removes all record of named relation from catalogs + * + * 1) open relation, check for existence, etc. + * 2) remove inheritance information + * 3) remove indexes + * 4) remove pg_class tuple + * 5) remove pg_attribute tuples + * 6) remove pg_type tuples + * 7) unlink relation + * + * old comments + * Except for vital relations, removes relation from + * relation catalog, and related attributes from + * attribute catalog (needed?). (Anything else???) + * + * get proper relation from relation catalog (if not arg) + * check if relation is vital (strcmp()/reltype???) + * scan attribute catalog deleting attributes of reldesc + * (necessary?) + * delete relation from relation catalog + * (How are the tuples of the relation discarded???) + * + * XXX Must fix to work with indexes. + * There may be a better order for doing things. + * Problems with destroying a deleted database--cannot create + * a struct reldesc without having an open file descriptor. + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * RelationRemoveInheritance + * + * Note: for now, we cause an exception if relation is a + * superclass. Someday, we may want to allow this and merge + * the type info into subclass procedures.... this seems like + * lots of work. + * -------------------------------- + */ +void +RelationRemoveInheritance(Relation relation) +{ + Relation catalogRelation; + HeapTuple tuple; + HeapScanDesc scan; + ScanKeyData entry; + + /* ---------------- + * open pg_inherits + * ---------------- + */ + catalogRelation = heap_openr(InheritsRelationName); + + /* ---------------- + * form a scan key for the subclasses of this class + * and begin scanning + * ---------------- + */ + ScanKeyEntryInitialize(&entry, 0x0, Anum_pg_inherits_inhparent, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(RelationGetRelationId(relation))); + + scan = heap_beginscan(catalogRelation, + false, + NowTimeQual, + 1, + &entry); + + /* ---------------- + * if any subclasses exist, then we disallow the deletion. + * ---------------- + */ + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (HeapTupleIsValid(tuple)) { + heap_endscan(scan); + heap_close(catalogRelation); + + elog(WARN, "relation <%d> inherits \"%s\"", + ((InheritsTupleForm) GETSTRUCT(tuple))->inhrel, + RelationGetRelationName(relation)); + } + + /* ---------------- + * If we get here, it means the relation has no subclasses + * so we can trash it. First we remove dead INHERITS tuples. + * ---------------- + */ + entry.sk_attno = Anum_pg_inherits_inhrel; + + scan = heap_beginscan(catalogRelation, + false, + NowTimeQual, + 1, + &entry); + + for (;;) { + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (!HeapTupleIsValid(tuple)) { + break; + } + heap_delete(catalogRelation, &tuple->t_ctid); + } + + heap_endscan(scan); + heap_close(catalogRelation); + + /* ---------------- + * now remove dead IPL tuples + * ---------------- + */ + catalogRelation = + heap_openr(InheritancePrecidenceListRelationName); + + entry.sk_attno = Anum_pg_ipl_iplrel; + + scan = heap_beginscan(catalogRelation, + false, + NowTimeQual, + 1, + &entry); + + for (;;) { + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (!HeapTupleIsValid(tuple)) { + break; + } + heap_delete(catalogRelation, &tuple->t_ctid); + } + + heap_endscan(scan); + heap_close(catalogRelation); +} + +/* -------------------------------- + * RelationRemoveIndexes + * + * -------------------------------- + */ +void +RelationRemoveIndexes(Relation relation) +{ + Relation indexRelation; + HeapTuple tuple; + HeapScanDesc scan; + ScanKeyData entry; + + indexRelation = heap_openr(IndexRelationName); + + ScanKeyEntryInitialize(&entry, 0x0, Anum_pg_index_indrelid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(RelationGetRelationId(relation))); + + scan = heap_beginscan(indexRelation, + false, + NowTimeQual, + 1, + &entry); + + for (;;) { + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (!HeapTupleIsValid(tuple)) { + break; + } + + index_destroy(((IndexTupleForm)GETSTRUCT(tuple))->indexrelid); + } + + heap_endscan(scan); + heap_close(indexRelation); +} + +/* -------------------------------- + * DeletePgRelationTuple + * + * -------------------------------- + */ +void +DeletePgRelationTuple(Relation rdesc) +{ + Relation pg_class_desc; + HeapScanDesc pg_class_scan; + ScanKeyData key; + HeapTuple tup; + + /* ---------------- + * open pg_class + * ---------------- + */ + pg_class_desc = heap_openr(RelationRelationName); + + /* ---------------- + * create a scan key to locate the relation oid of the + * relation to delete + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, ObjectIdAttributeNumber, + F_INT4EQ, rdesc->rd_att->attrs[0]->attrelid); + + pg_class_scan = heap_beginscan(pg_class_desc, + 0, + NowTimeQual, + 1, + &key); + + /* ---------------- + * use heap_getnext() to fetch the pg_class tuple. If this + * tuple is not valid then something's wrong. + * ---------------- + */ + tup = heap_getnext(pg_class_scan, 0, (Buffer *) NULL); + + if (! PointerIsValid(tup)) { + heap_endscan(pg_class_scan); + heap_close(pg_class_desc); + elog(WARN, "DeletePgRelationTuple: %s relation nonexistent", + &rdesc->rd_rel->relname); + } + + /* ---------------- + * delete the relation tuple from pg_class, and finish up. + * ---------------- + */ + heap_endscan(pg_class_scan); + heap_delete(pg_class_desc, &tup->t_ctid); + + heap_close(pg_class_desc); +} + +/* -------------------------------- + * DeletePgAttributeTuples + * + * -------------------------------- + */ +void +DeletePgAttributeTuples(Relation rdesc) +{ + Relation pg_attribute_desc; + HeapScanDesc pg_attribute_scan; + ScanKeyData key; + HeapTuple tup; + + /* ---------------- + * open pg_attribute + * ---------------- + */ + pg_attribute_desc = heap_openr(AttributeRelationName); + + /* ---------------- + * create a scan key to locate the attribute tuples to delete + * and begin the scan. + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, Anum_pg_attribute_attrelid, + F_INT4EQ, rdesc->rd_att->attrs[0]->attrelid); + + /* ----------------- + * Get a write lock _before_ getting the read lock in the scan + * ---------------- + */ + RelationSetLockForWrite(pg_attribute_desc); + + pg_attribute_scan = heap_beginscan(pg_attribute_desc, + 0, + NowTimeQual, + 1, + &key); + + /* ---------------- + * use heap_getnext() / amdelete() until all attribute tuples + * have been deleted. + * ---------------- + */ + while (tup = heap_getnext(pg_attribute_scan, 0, (Buffer *)NULL), + PointerIsValid(tup)) { + + heap_delete(pg_attribute_desc, &tup->t_ctid); + } + + /* ---------------- + * finish up. + * ---------------- + */ + heap_endscan(pg_attribute_scan); + + /* ---------------- + * Release the write lock + * ---------------- + */ + RelationUnsetLockForWrite(pg_attribute_desc); + heap_close(pg_attribute_desc); +} + + +/* -------------------------------- + * DeletePgTypeTuple + * + * If the user attempts to destroy a relation and there + * exists attributes in other relations of type + * "relation we are deleting", then we have to do something + * special. presently we disallow the destroy. + * -------------------------------- + */ +void +DeletePgTypeTuple(Relation rdesc) +{ + Relation pg_type_desc; + HeapScanDesc pg_type_scan; + Relation pg_attribute_desc; + HeapScanDesc pg_attribute_scan; + ScanKeyData key; + ScanKeyData attkey; + HeapTuple tup; + HeapTuple atttup; + Oid typoid; + + /* ---------------- + * open pg_type + * ---------------- + */ + pg_type_desc = heap_openr(TypeRelationName); + + /* ---------------- + * create a scan key to locate the type tuple corresponding + * to this relation. + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, Anum_pg_type_typrelid, F_INT4EQ, + rdesc->rd_att->attrs[0]->attrelid); + + pg_type_scan = heap_beginscan(pg_type_desc, + 0, + NowTimeQual, + 1, + &key); + + /* ---------------- + * use heap_getnext() to fetch the pg_type tuple. If this + * tuple is not valid then something's wrong. + * ---------------- + */ + tup = heap_getnext(pg_type_scan, 0, (Buffer *)NULL); + + if (! PointerIsValid(tup)) { + heap_endscan(pg_type_scan); + heap_close(pg_type_desc); + elog(WARN, "DeletePgTypeTuple: %s type nonexistent", + &rdesc->rd_rel->relname); + } + + /* ---------------- + * now scan pg_attribute. if any other relations have + * attributes of the type of the relation we are deleteing + * then we have to disallow the deletion. should talk to + * stonebraker about this. -cim 6/19/90 + * ---------------- + */ + typoid = tup->t_oid; + + pg_attribute_desc = heap_openr(AttributeRelationName); + + ScanKeyEntryInitialize(&attkey, + 0, Anum_pg_attribute_atttypid, F_INT4EQ, + typoid); + + pg_attribute_scan = heap_beginscan(pg_attribute_desc, + 0, + NowTimeQual, + 1, + &attkey); + + /* ---------------- + * try and get a pg_attribute tuple. if we succeed it means + * we cant delete the relation because something depends on + * the schema. + * ---------------- + */ + atttup = heap_getnext(pg_attribute_scan, 0, (Buffer *)NULL); + + if (PointerIsValid(atttup)) { + Oid relid = ((AttributeTupleForm) GETSTRUCT(atttup))->attrelid; + + heap_endscan(pg_type_scan); + heap_close(pg_type_desc); + heap_endscan(pg_attribute_scan); + heap_close(pg_attribute_desc); + + elog(WARN, "DeletePgTypeTuple: att of type %s exists in relation %d", + &rdesc->rd_rel->relname, relid); + } + heap_endscan(pg_attribute_scan); + heap_close(pg_attribute_desc); + + /* ---------------- + * Ok, it's safe so we delete the relation tuple + * from pg_type and finish up. But first end the scan so that + * we release the read lock on pg_type. -mer 13 Aug 1991 + * ---------------- + */ + heap_endscan(pg_type_scan); + heap_delete(pg_type_desc, &tup->t_ctid); + + heap_close(pg_type_desc); +} + +/* -------------------------------- + * heap_destroy + * + * -------------------------------- + */ +void +heap_destroy(char *relname) +{ + Relation rdesc; + + /* ---------------- + * first open the relation. if the relation does exist, + * heap_openr() returns NULL. + * ---------------- + */ + rdesc = heap_openr(relname); + if (rdesc == NULL) + elog(WARN,"Relation %s Does Not Exist!", relname); + + /* ---------------- + * prevent deletion of system relations + * ---------------- + */ + if (IsSystemRelationName(RelationGetRelationName(rdesc)->data)) + elog(WARN, "amdestroy: cannot destroy %s relation", + &rdesc->rd_rel->relname); + + /* ---------------- + * remove inheritance information + * ---------------- + */ + RelationRemoveInheritance(rdesc); + + /* ---------------- + * remove indexes if necessary + * ---------------- + */ + if (rdesc->rd_rel->relhasindex) { + RelationRemoveIndexes(rdesc); + } + + /* ---------------- + * remove rules if necessary + * ---------------- + */ + if (rdesc->rd_rules != NULL) { + RelationRemoveRules(rdesc->rd_id); + } + + /* ---------------- + * delete attribute tuples + * ---------------- + */ + DeletePgAttributeTuples(rdesc); + + /* ---------------- + * delete type tuple. here we want to see the effects + * of the deletions we just did, so we use setheapoverride(). + * ---------------- + */ + setheapoverride(true); + DeletePgTypeTuple(rdesc); + setheapoverride(false); + + /* ---------------- + * delete relation tuple + * ---------------- + */ + DeletePgRelationTuple(rdesc); + + /* ---------------- + * flush the relation from the relcache + * ---------------- + */ + RelationIdInvalidateRelationCacheByRelationId(rdesc->rd_id); + + /* ---------------- + * unlink the relation and finish up. + * ---------------- + */ + (void) smgrunlink(rdesc->rd_rel->relsmgr, rdesc); + heap_close(rdesc); +} + +/* + * heap_destroyr + * destroy and close temporary relations + * + */ + +void +heap_destroyr(Relation rdesc) +{ + ReleaseTmpRelBuffers(rdesc); + (void) smgrunlink(rdesc->rd_rel->relsmgr, rdesc); + heap_close(rdesc); + RemoveFromTempRelList(rdesc); +} + + +/************************************************************** + functions to deal with the list of temporary relations +**************************************************************/ + +/* -------------- + InitTempRellist(): + + initialize temporary relations list + the tempRelList is a list of temporary relations that + are created in the course of the transactions + they need to be destroyed properly at the end of the transactions + + MODIFIES the global variable tempRels + + >> NOTE << + + malloc is used instead of palloc because we KNOW when we are + going to free these things. Keeps us away from the memory context + hairyness + +*/ +void +InitTempRelList() +{ + if (tempRels) { + free(tempRels->rels); + free(tempRels); + }; + + tempRels = (TempRelList*)malloc(sizeof(TempRelList)); + tempRels->size = TEMP_REL_LIST_SIZE; + tempRels->rels = (Relation*)malloc(sizeof(Relation) * tempRels->size); + memset(tempRels->rels, sizeof(Relation) * tempRels->size , 0); + tempRels->num = 0; +} + +/* + removes a relation from the TempRelList + + MODIFIES the global variable tempRels + we don't really remove it, just mark it as NULL + and DestroyTempRels will look for NULLs +*/ +void +RemoveFromTempRelList(Relation r) +{ + int i; + + if (!tempRels) + return; + + for (i=0; inum; i++) { + if (tempRels->rels[i] == r) { + tempRels->rels[i] = NULL; + break; + } + } +} + +/* + add a temporary relation to the TempRelList + + MODIFIES the global variable tempRels +*/ +void +AddToTempRelList(Relation r) +{ + if (!tempRels) + return; + + if (tempRels->num == tempRels->size) { + tempRels->size += TEMP_REL_LIST_SIZE; + tempRels->rels = realloc(tempRels->rels, tempRels->size); + } + tempRels->rels[tempRels->num] = r; + tempRels->num++; +} + +/* + go through the tempRels list and destroy each of the relations +*/ +void +DestroyTempRels() +{ + int i; + Relation rdesc; + + if (!tempRels) + return; + + for (i=0;inum;i++) { + rdesc = tempRels->rels[i]; + /* rdesc may be NULL if it has been removed from the list already */ + if (rdesc) + heap_destroyr(rdesc); + } + free(tempRels->rels); + free(tempRels); + tempRels = NULL; +} + diff --git a/src/backend/catalog/heap.h b/src/backend/catalog/heap.h new file mode 100644 index 00000000000..edcd5bf5ed8 --- /dev/null +++ b/src/backend/catalog/heap.h @@ -0,0 +1,42 @@ +/*------------------------------------------------------------------------- + * + * heap.h-- + * prototypes for functions in lib/catalog/heap.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: heap.h,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HEAP_H +#define HEAP_H + +extern Relation heap_creatr(char *relname, unsigned smgr, TupleDesc att); + +extern int RelationAlreadyExists(Relation pg_class_desc, char relname[]); +extern void addNewRelationType(char *typeName, Oid new_rel_oid); + +extern void AddPgRelationTuple(Relation pg_class_desc, + Relation new_rel_desc, Oid new_rel_oid, int arch, unsigned natts); + +extern Oid heap_create(char relname[], + char *typename, + int arch, + unsigned smgr, TupleDesc tupdesc); + +extern void RelationRemoveInheritance(Relation relation); +extern void RelationRemoveIndexes(Relation relation); +extern void DeletePgRelationTuple(Relation rdesc); +extern void DeletePgAttributeTuples(Relation rdesc); +extern void DeletePgTypeTuple(Relation rdesc); +extern void heap_destroy(char relname[]); +extern void heap_destroyr(Relation r); + +extern void InitTempRelList(); +extern void AddToTempRelList(Relation r); +extern void RemoveFromTempRelList(Relation r); +extern void DestroyTempRels(); + +#endif /* HEAP_H */ diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c new file mode 100644 index 00000000000..b04010bf95b --- /dev/null +++ b/src/backend/catalog/index.c @@ -0,0 +1,1655 @@ +/*------------------------------------------------------------------------- + * + * index.c-- + * code to create and destroy POSTGRES index relations + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/index.c,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + * + * INTERFACE ROUTINES + * index_create() - Create a cataloged index relation + * index_destroy() - Removes index relation from catalogs + * + * NOTES + * Much of this code uses hardcoded sequential heap relation scans + * to fetch information from the catalogs. These should all be + * rewritten to use the system caches lookup routines like + * SearchSysCacheTuple, which can do efficient lookup and + * caching. + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/attnum.h" +#include "access/genam.h" +#include "access/heapam.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/builtins.h" +#include "utils/tqual.h" +#include "access/tupdesc.h" +#include "access/funcindex.h" +#include "access/xact.h" + +#include "storage/smgr.h" +#include "miscadmin.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" +#include "utils/relcache.h" +#include "utils/elog.h" + +#include "bootstrap/bootstrap.h" + +#include "catalog/catname.h" +#include "catalog/catalog.h" +#include "utils/syscache.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "catalog/indexing.h" + +#include "catalog/heap.h" + +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" + +#include "catalog/index.h" + +#include "executor/executor.h" +#include "executor/tuptable.h" + +#include "optimizer/clauses.h" +#include "optimizer/prep.h" + +#include "parser/catalog_utils.h" + +#include "machine.h" + +/* + * macros used in guessing how many tuples are on a page. + */ +#define AVG_TUPLE_SIZE 8 +#define NTUPLES_PER_PAGE(natts) (BLCKSZ/((natts)*AVG_TUPLE_SIZE)) + +/* non-export function prototypes */ +static Oid RelationNameGetObjectId(char *relationName, Relation pg_class, + bool setHasIndexAttribute); +static Oid GetHeapRelationOid(char *heapRelationName, char *indexRelationName); +static TupleDesc BuildFuncTupleDesc(FuncIndexInfo *funcInfo); +static TupleDesc ConstructTupleDescriptor(Oid heapoid, Relation heapRelation, + int numatts, AttrNumber attNums[]); + +static void ConstructIndexReldesc(Relation indexRelation, Oid amoid); +static Oid UpdateRelationRelation(Relation indexRelation); +static void InitializeAttributeOids(Relation indexRelation, + int numatts, + Oid indexoid); +static void +AppendAttributeTuples(Relation indexRelation, int numatts); +static void UpdateIndexRelation(Oid indexoid, Oid heapoid, + FuncIndexInfo *funcInfo, int natts, + AttrNumber attNums[], Oid classOids[], Node *predicate); +static void DefaultBuild(Relation heapRelation, Relation indexRelation, + int numberOfAttributes, AttrNumber attributeNumber[], + IndexStrategy indexStrategy, uint16 parameterCount, + Datum parameter[], FuncIndexInfoPtr funcInfo, PredInfo *predInfo); + +/* ---------------------------------------------------------------- + * sysatts is a structure containing attribute tuple forms + * for system attributes (numbered -1, -2, ...). This really + * should be generated or eliminated or moved elsewhere. -cim 1/19/91 + * + * typedef struct FormData_pg_attribute { + * Oid attrelid; + * NameData attname; + * Oid atttypid; + * Oid attdefrel; + * uint32 attnvals; + * Oid atttyparg; type arg for arrays/spquel/procs + * int16 attlen; + * AttrNumber attnum; + * uint16 attbound; + * bool attbyval; + * bool attcanindex; + * Oid attproc; spquel? + * } FormData_pg_attribute; + * + * The data in this table was taken from local1_template.ami + * but tmin and tmax were switched because local1 was incorrect. + * ---------------------------------------------------------------- + */ +static FormData_pg_attribute sysatts[] = { + { 0l, {"ctid"}, 27l, 0l, 0l, 0l, 6, -1, 0, '\0', '\001', 0l, 'i' }, + { 0l, {"oid"}, 26l, 0l, 0l, 0l, 4, -2, 0, '\001', '\001', 0l, 'i' }, + { 0l, {"xmin"}, 28l, 0l, 0l, 0l, 5, -3, 0, '\0', '\001', 0l, 'i' }, + { 0l, {"cmin"}, 29l, 0l, 0l, 0l, 1, -4, 0, '\001', '\001', 0l, 's' }, + { 0l, {"xmax"}, 28l, 0l, 0l, 0l, 5, -5, 0, '\0', '\001', 0l, 'i' }, + { 0l, {"cmax"}, 29l, 0l, 0l, 0l, 1, -6, 0, '\001', '\001', 0l, 's' }, + { 0l, {"chain"}, 27l, 0l, 0l, 0l, 6, -7, 0, '\0', '\001', 0l, 'i' }, + { 0l, {"anchor"}, 27l, 0l, 0l, 0l, 6, -8, 0, '\0', '\001', 0l, 'i' }, + { 0l, {"tmin"}, 20l, 0l, 0l, 0l, 4, -9, 0, '\001', '\001', 0l, 'i' }, + { 0l, {"tmax"}, 20l, 0l, 0l, 0l, 4, -10, 0, '\001', '\001', 0l, 'i' }, + { 0l, {"vtype"}, 18l, 0l, 0l, 0l, 1, -11, 0, '\001', '\001', 0l, 'c' }, +}; + +/* ---------------------------------------------------------------- + * RelationNameGetObjectId -- + * Returns the object identifier for a relation given its name. + * + * > The HASINDEX attribute for the relation with this name will + * > be set if it exists and if it is indicated by the call argument. + * What a load of bull. This setHasIndexAttribute is totally ignored. + * This is yet another silly routine to scan the catalogs which should + * probably be replaced by SearchSysCacheTuple. -cim 1/19/91 + * + * Note: + * Assumes relation name is valid. + * Assumes relation descriptor is valid. + * ---------------------------------------------------------------- + */ +static Oid +RelationNameGetObjectId(char *relationName, + Relation pg_class, + bool setHasIndexAttribute) +{ + HeapScanDesc pg_class_scan; + HeapTuple pg_class_tuple; + Oid relationObjectId; + Buffer buffer; + ScanKeyData key; + + /* + * If this isn't bootstrap time, we can use the system catalogs to + * speed this up. + */ + + if (!IsBootstrapProcessingMode()) { + pg_class_tuple = ClassNameIndexScan(pg_class, relationName); + if (HeapTupleIsValid(pg_class_tuple)) { + relationObjectId = pg_class_tuple->t_oid; + pfree(pg_class_tuple); + } else + relationObjectId = InvalidOid; + + return (relationObjectId); + } + + /* ---------------- + * Bootstrap time, do this the hard way. + * begin a scan of pg_class for the named relation + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, Anum_pg_class_relname, + NameEqualRegProcedure, + PointerGetDatum(relationName)); + + pg_class_scan = heap_beginscan(pg_class, 0, NowTimeQual, 1, &key); + + /* ---------------- + * if we find the named relation, fetch its relation id + * (the oid of the tuple we found). + * ---------------- + */ + pg_class_tuple = heap_getnext(pg_class_scan, 0, &buffer); + + if (! HeapTupleIsValid(pg_class_tuple)) { + relationObjectId = InvalidOid; + } else { + relationObjectId = pg_class_tuple->t_oid; + ReleaseBuffer(buffer); + } + + /* ---------------- + * cleanup and return results + * ---------------- + */ + heap_endscan(pg_class_scan); + + return + relationObjectId; +} + + +/* ---------------------------------------------------------------- + * GetHeapRelationOid + * ---------------------------------------------------------------- + */ +static Oid +GetHeapRelationOid(char *heapRelationName, char *indexRelationName) +{ + Relation pg_class; + Oid indoid; + Oid heapoid; + + /* ---------------- + * XXX ADD INDEXING HERE + * ---------------- + */ + /* ---------------- + * open pg_class and get the oid of the relation + * corresponding to the name of the index relation. + * ---------------- + */ + pg_class = heap_openr(RelationRelationName); + + indoid = RelationNameGetObjectId(indexRelationName, + pg_class, + false); + + if (OidIsValid(indoid)) + elog(WARN, "Cannot create index: '%s' already exists", + indexRelationName); + + /* ---------------- + * get the object id of the heap relation + * ---------------- + */ + heapoid = RelationNameGetObjectId(heapRelationName, + pg_class, + true); + + /* ---------------- + * check that the heap relation exists.. + * ---------------- + */ + if (! OidIsValid(heapoid)) + elog(WARN, "Cannot create index on '%s': relation does not exist", + heapRelationName); + + /* ---------------- + * close pg_class and return the heap relation oid + * ---------------- + */ + heap_close(pg_class); + + return heapoid; +} + +static TupleDesc +BuildFuncTupleDesc(FuncIndexInfo *funcInfo) +{ + HeapTuple tuple; + TupleDesc funcTupDesc; + Oid retType; + char *funcname; + int4 nargs; + Oid *argtypes; + + /* + * Allocate and zero a tuple descriptor. + */ + funcTupDesc = CreateTemplateTupleDesc(1); + funcTupDesc->attrs[0] = (AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE); + memset(funcTupDesc->attrs[0], 0, ATTRIBUTE_TUPLE_SIZE); + + /* + * Lookup the function for the return type. + */ + funcname = FIgetname(funcInfo); + nargs = FIgetnArgs(funcInfo); + argtypes = FIgetArglist(funcInfo); + tuple = SearchSysCacheTuple(PRONAME, + PointerGetDatum(funcname), + Int32GetDatum(nargs), + PointerGetDatum(argtypes), + 0); + + if (!HeapTupleIsValid(tuple)) + func_error("BuildFuncTupleDesc", funcname, nargs, (int*)argtypes); + + retType = ((Form_pg_proc)GETSTRUCT(tuple))->prorettype; + + /* + * Look up the return type in pg_type for the type length. + */ + tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(retType), + 0,0,0); + if (!HeapTupleIsValid(tuple)) + elog(WARN,"Function %s return type does not exist",FIgetname(funcInfo)); + + /* + * Assign some of the attributes values. Leave the rest as 0. + */ + funcTupDesc->attrs[0]->attlen = ((TypeTupleForm)GETSTRUCT(tuple))->typlen; + funcTupDesc->attrs[0]->atttypid = retType; + funcTupDesc->attrs[0]->attnum = 1; + funcTupDesc->attrs[0]->attbyval = ((TypeTupleForm)GETSTRUCT(tuple))->typbyval; + funcTupDesc->attrs[0]->attcanindex = 0; + + /* + * make the attributes name the same as the functions + */ + namestrcpy(&funcTupDesc->attrs[0]->attname, funcname); + + return (funcTupDesc); +} + +/* ---------------------------------------------------------------- + * ConstructTupleDescriptor + * ---------------------------------------------------------------- + */ +static TupleDesc +ConstructTupleDescriptor(Oid heapoid, + Relation heapRelation, + int numatts, + AttrNumber attNums[]) +{ + TupleDesc heapTupDesc; + TupleDesc indexTupDesc; + AttrNumber atnum; /* attributeNumber[attributeOffset] */ + AttrNumber atind; + int natts; /* RelationTupleForm->relnatts */ + char *from; /* used to simplify memcpy below */ + char *to; /* used to simplify memcpy below */ + int i; + + /* ---------------- + * allocate the new tuple descriptor + * ---------------- + */ + natts = RelationGetRelationTupleForm(heapRelation)->relnatts; + + indexTupDesc = CreateTemplateTupleDesc(numatts); + + /* ---------------- + * + * ---------------- + */ + + /* ---------------- + * for each attribute we are indexing, obtain its attribute + * tuple form from either the static table of system attribute + * tuple forms or the relation tuple descriptor + * ---------------- + */ + for (i = 0; i < numatts; i += 1) { + + /* ---------------- + * get the attribute number and make sure it's valid + * ---------------- + */ + atnum = attNums[i]; + if (atnum > natts) + elog(WARN, "Cannot create index: attribute %d does not exist", + atnum); + + indexTupDesc->attrs[i] = (AttributeTupleForm) palloc(ATTRIBUTE_TUPLE_SIZE); + + /* ---------------- + * determine which tuple descriptor to copy + * ---------------- + */ + if (!AttrNumberIsForUserDefinedAttr(atnum)) { + + /* ---------------- + * here we are indexing on a system attribute (-1...-12) + * so we convert atnum into a usable index 0...11 so we can + * use it to dereference the array sysatts[] which stores + * tuple descriptor information for system attributes. + * ---------------- + */ + if (atnum <= FirstLowInvalidHeapAttributeNumber || atnum >= 0 ) + elog(WARN, "Cannot create index on system attribute: attribute number out of range (%d)", atnum); + atind = (-atnum) - 1; + + from = (char *) (& sysatts[atind]); + + } else { + /* ---------------- + * here we are indexing on a normal attribute (1...n) + * ---------------- + */ + + heapTupDesc = RelationGetTupleDescriptor(heapRelation); + atind = AttrNumberGetAttrOffset(atnum); + + from = (char *) (heapTupDesc->attrs[ atind ]); + } + + /* ---------------- + * now that we've determined the "from", let's copy + * the tuple desc data... + * ---------------- + */ + + to = (char *) (indexTupDesc->attrs[ i ]); + memcpy(to, from, ATTRIBUTE_TUPLE_SIZE); + + /* ---------------- + * now we have to drop in the proper relation descriptor + * into the copied tuple form's attrelid and we should be + * all set. + * ---------------- + */ + ((AttributeTupleForm) to)->attrelid = heapoid; + } + + return indexTupDesc; +} + +/* ---------------------------------------------------------------- + * AccessMethodObjectIdGetAccessMethodTupleForm -- + * Returns the formated access method tuple given its object identifier. + * + * XXX ADD INDEXING + * + * Note: + * Assumes object identifier is valid. + * ---------------------------------------------------------------- + */ +Form_pg_am +AccessMethodObjectIdGetAccessMethodTupleForm(Oid accessMethodObjectId) +{ + Relation pg_am_desc; + HeapScanDesc pg_am_scan; + HeapTuple pg_am_tuple; + ScanKeyData key; + Form_pg_am form; + + /* ---------------- + * form a scan key for the pg_am relation + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(accessMethodObjectId)); + + /* ---------------- + * fetch the desired access method tuple + * ---------------- + */ + pg_am_desc = heap_openr(AccessMethodRelationName); + pg_am_scan = heap_beginscan(pg_am_desc, 0, NowTimeQual, 1, &key); + + pg_am_tuple = heap_getnext(pg_am_scan, 0, (Buffer *)NULL); + + /* ---------------- + * return NULL if not found + * ---------------- + */ + if (! HeapTupleIsValid(pg_am_tuple)) { + heap_endscan(pg_am_scan); + heap_close(pg_am_desc); + return (NULL); + } + + /* ---------------- + * if found am tuple, then copy the form and return the copy + * ---------------- + */ + form = (Form_pg_am)palloc(sizeof *form); + memcpy(form, GETSTRUCT(pg_am_tuple), sizeof *form); + + heap_endscan(pg_am_scan); + heap_close(pg_am_desc); + + return (form); +} + +/* ---------------------------------------------------------------- + * ConstructIndexReldesc + * ---------------------------------------------------------------- + */ +static void +ConstructIndexReldesc(Relation indexRelation, Oid amoid) +{ + extern GlobalMemory CacheCxt; + MemoryContext oldcxt; + + /* ---------------- + * here we make certain to allocate the access method + * tuple within the cache context lest it vanish when the + * context changes + * ---------------- + */ + if (!CacheCxt) + CacheCxt = CreateGlobalMemory("Cache"); + + oldcxt = MemoryContextSwitchTo((MemoryContext)CacheCxt); + + indexRelation->rd_am = + AccessMethodObjectIdGetAccessMethodTupleForm(amoid); + + MemoryContextSwitchTo(oldcxt); + + /* ---------------- + * XXX missing the initialization of some other fields + * ---------------- + */ + + indexRelation->rd_rel->relowner = GetUserId(); + + indexRelation->rd_rel->relam = amoid; + indexRelation->rd_rel->reltuples = 1; /* XXX */ + indexRelation->rd_rel->relexpires = 0; /* XXX */ + indexRelation->rd_rel->relpreserved = 0; /* XXX */ + indexRelation->rd_rel->relkind = RELKIND_INDEX; + indexRelation->rd_rel->relarch = 'n'; /* XXX */ +} + +/* ---------------------------------------------------------------- + * UpdateRelationRelation + * ---------------------------------------------------------------- + */ +static Oid +UpdateRelationRelation(Relation indexRelation) +{ + Relation pg_class; + HeapTuple tuple; + Oid tupleOid; + Relation idescs[Num_pg_class_indices]; + + pg_class = heap_openr(RelationRelationName); + + /* XXX Natts_pg_class_fixed is a hack - see pg_class.h */ + tuple = heap_addheader(Natts_pg_class_fixed, + sizeof(*indexRelation->rd_rel), + (char *) indexRelation->rd_rel); + + /* ---------------- + * the new tuple must have the same oid as the relcache entry for the + * index. sure would be embarassing to do this sort of thing in polite + * company. + * ---------------- + */ + tuple->t_oid = indexRelation->rd_id; + heap_insert(pg_class, tuple); + + /* + * During normal processing, we need to make sure that the system + * catalog indices are correct. Bootstrap (initdb) time doesn't + * require this, because we make sure that the indices are correct + * just before exiting. + */ + + if (!IsBootstrapProcessingMode()) { + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class, tuple); + CatalogCloseIndices(Num_pg_class_indices, idescs); + } + + tupleOid = tuple->t_oid; + pfree(tuple); + heap_close(pg_class); + + return(tupleOid); +} + +/* ---------------------------------------------------------------- + * InitializeAttributeOids + * ---------------------------------------------------------------- + */ +static void +InitializeAttributeOids(Relation indexRelation, + int numatts, + Oid indexoid) +{ + TupleDesc tupleDescriptor; + int i; + + tupleDescriptor = RelationGetTupleDescriptor(indexRelation); + + for (i = 0; i < numatts; i += 1) + tupleDescriptor->attrs[i]->attrelid = indexoid; +} + +/* ---------------------------------------------------------------- + * AppendAttributeTuples + * + * XXX For now, only change the ATTNUM attribute value + * ---------------------------------------------------------------- + */ +static void +AppendAttributeTuples(Relation indexRelation, int numatts) +{ + Relation pg_attribute; + HeapTuple tuple; + HeapTuple newtuple; + bool hasind; + Relation idescs[Num_pg_attr_indices]; + + Datum value[ Natts_pg_attribute ]; + char nullv[ Natts_pg_attribute ]; + char replace[ Natts_pg_attribute ]; + + TupleDesc indexTupDesc; + int i; + + /* ---------------- + * open the attribute relation + * XXX ADD INDEXING + * ---------------- + */ + pg_attribute = heap_openr(AttributeRelationName); + + /* ---------------- + * initialize null[], replace[] and value[] + * ---------------- + */ + (void) memset(nullv, ' ', Natts_pg_attribute); + (void) memset(replace, ' ', Natts_pg_attribute); + + /* ---------------- + * create the first attribute tuple. + * XXX For now, only change the ATTNUM attribute value + * ---------------- + */ + replace[ Anum_pg_attribute_attnum - 1 ] = 'r'; + + value[ Anum_pg_attribute_attnum - 1 ] = Int16GetDatum(1); + + tuple = heap_addheader(Natts_pg_attribute, + sizeof *(indexRelation->rd_att->attrs[0]), + (char *)(indexRelation->rd_att->attrs[0])); + + hasind = false; + if (!IsBootstrapProcessingMode() && pg_attribute->rd_rel->relhasindex) { + hasind = true; + CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs); + } + + /* ---------------- + * insert the first attribute tuple. + * ---------------- + */ + tuple = heap_modifytuple(tuple, + InvalidBuffer, + pg_attribute, + value, + nullv, + replace); + + heap_insert(pg_attribute, tuple); + if (hasind) + CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, tuple); + + /* ---------------- + * now we use the information in the index tuple + * descriptor to form the remaining attribute tuples. + * ---------------- + */ + indexTupDesc = RelationGetTupleDescriptor(indexRelation); + + for (i = 1; i < numatts; i += 1) { + /* ---------------- + * process the remaining attributes... + * ---------------- + */ + memmove(GETSTRUCT(tuple), + (char *)indexTupDesc->attrs[i], + sizeof (AttributeTupleForm)); + + value[ Anum_pg_attribute_attnum - 1 ] = Int16GetDatum(i + 1); + + newtuple = heap_modifytuple(tuple, + InvalidBuffer, + pg_attribute, + value, + nullv, + replace); + + heap_insert(pg_attribute, newtuple); + if (hasind) + CatalogIndexInsert(idescs, Num_pg_attr_indices, pg_attribute, newtuple); + + /* ---------------- + * ModifyHeapTuple returns a new copy of a tuple + * so we free the original and use the copy.. + * ---------------- + */ + pfree(tuple); + tuple = newtuple; + } + + /* ---------------- + * close the attribute relation and free the tuple + * ---------------- + */ + heap_close(pg_attribute); + + if (hasind) + CatalogCloseIndices(Num_pg_attr_indices, idescs); + + pfree(tuple); +} + +/* ---------------------------------------------------------------- + * UpdateIndexRelation + * ---------------------------------------------------------------- + */ +static void +UpdateIndexRelation(Oid indexoid, + Oid heapoid, + FuncIndexInfo *funcInfo, + int natts, + AttrNumber attNums[], + Oid classOids[], + Node *predicate) +{ + IndexTupleForm indexForm; + char *predString; + text *predText; + int predLen, itupLen; + Relation pg_index; + HeapTuple tuple; + int i; + + /* ---------------- + * allocate an IndexTupleForm big enough to hold the + * index-predicate (if any) in string form + * ---------------- + */ + if (predicate != NULL) { + predString = nodeToString(predicate); + predText = (text *)fmgr(F_TEXTIN, predString); + pfree(predString); + } else { + predText = (text *)fmgr(F_TEXTIN, ""); + } + predLen = VARSIZE(predText); + itupLen = predLen + sizeof(FormData_pg_index); + indexForm = (IndexTupleForm) palloc(itupLen); + + memmove((char *)& indexForm->indpred, (char *)predText, predLen); + + /* ---------------- + * store the oid information into the index tuple form + * ---------------- + */ + indexForm->indrelid = heapoid; + indexForm->indexrelid = indexoid; + indexForm->indproc = (PointerIsValid(funcInfo)) ? + FIgetProcOid(funcInfo) : InvalidOid; + + memset((char *)& indexForm->indkey[0], 0, sizeof indexForm->indkey); + memset((char *)& indexForm->indclass[0], 0, sizeof indexForm->indclass); + + /* ---------------- + * copy index key and op class information + * ---------------- + */ + for (i = 0; i < natts; i += 1) { + indexForm->indkey[i] = attNums[i]; + indexForm->indclass[i] = classOids[i]; + } + /* + * If we have a functional index, add all attribute arguments + */ + if (PointerIsValid(funcInfo)) + { + for (i=1; i < FIgetnArgs(funcInfo); i++) + indexForm->indkey[i] = attNums[i]; + } + + indexForm->indisclustered = '\0'; /* XXX constant */ + indexForm->indisarchived = '\0'; /* XXX constant */ + + /* ---------------- + * open the system catalog index relation + * ---------------- + */ + pg_index = heap_openr(IndexRelationName); + + /* ---------------- + * form a tuple to insert into pg_index + * ---------------- + */ + tuple = heap_addheader(Natts_pg_index, + itupLen, + (char *)indexForm); + + /* ---------------- + * insert the tuple into the pg_index + * XXX ADD INDEX TUPLES TOO + * ---------------- + */ + heap_insert(pg_index, tuple); + + /* ---------------- + * close the relation and free the tuple + * ---------------- + */ + heap_close(pg_index); + pfree(predText); + pfree(indexForm); + pfree(tuple); +} + +/* ---------------------------------------------------------------- + * UpdateIndexPredicate + * ---------------------------------------------------------------- + */ +void +UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate) +{ + Node *newPred; + char *predString; + text *predText; + Relation pg_index; + HeapTuple tuple; + HeapTuple newtup; + ScanKeyData entry; + HeapScanDesc scan; + Buffer buffer; + int i; + Datum values[Natts_pg_index]; + char nulls[Natts_pg_index]; + char replace[Natts_pg_index]; + + /* + * Construct newPred as a CNF expression equivalent to the OR of the + * original partial-index predicate ("oldPred") and the extension + * predicate ("predicate"). + * + * This should really try to process the result to change things like + * "a>2 OR a>1" to simply "a>1", but for now all it does is make sure + * that if the extension predicate is NULL (i.e., it is being extended + * to be a complete index), then newPred will be NULL - in effect, + * changing "a>2 OR TRUE" to "TRUE". --Nels, Jan '93 + */ + newPred = NULL; + if (predicate != NULL) { + newPred = + (Node*)make_orclause(lcons(make_andclause((List*)predicate), + lcons(make_andclause((List*)oldPred), + NIL))); + newPred = (Node*)cnfify((Expr*)newPred, true); + } + + /* translate the index-predicate to string form */ + if (newPred != NULL) { + predString = nodeToString(newPred); + predText = (text *)fmgr(F_TEXTIN, predString); + pfree(predString); + } else { + predText = (text *)fmgr(F_TEXTIN, ""); + } + + /* open the index system catalog relation */ + pg_index = heap_openr(IndexRelationName); + + ScanKeyEntryInitialize(&entry, 0x0, Anum_pg_index_indexrelid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(indexoid)); + + scan = heap_beginscan(pg_index, 0, NowTimeQual, 1, &entry); + tuple = heap_getnext(scan, 0, &buffer); + heap_endscan(scan); + + for (i = 0; i < Natts_pg_index; i++) { + nulls[i] = heap_attisnull(tuple, i+1) ? 'n' : ' '; + replace[i] = ' '; + values[i] = (Datum) NULL; + } + + replace[Anum_pg_index_indpred - 1] = 'r'; + values[Anum_pg_index_indpred - 1] = (Datum) predText; + + newtup = heap_modifytuple(tuple, buffer, pg_index, values, nulls, replace); + + (void) heap_replace(pg_index, &(newtup->t_ctid), newtup); + + heap_close(pg_index); + pfree(predText); +} + +/* ---------------------------------------------------------------- + * InitIndexStrategy + * ---------------------------------------------------------------- + */ +void +InitIndexStrategy(int numatts, + Relation indexRelation, + Oid accessMethodObjectId) +{ + IndexStrategy strategy; + RegProcedure *support; + uint16 amstrategies; + uint16 amsupport; + Oid attrelid; + Size strsize; + extern GlobalMemory CacheCxt; + + /* ---------------- + * get information from the index relation descriptor + * ---------------- + */ + attrelid = indexRelation->rd_att->attrs[0]->attrelid; + amstrategies = indexRelation->rd_am->amstrategies; + amsupport = indexRelation->rd_am->amsupport; + + /* ---------------- + * get the size of the strategy + * ---------------- + */ + strsize = AttributeNumberGetIndexStrategySize(numatts, amstrategies); + + /* ---------------- + * allocate the new index strategy structure + * + * the index strategy has to be allocated in the same + * context as the relation descriptor cache or else + * it will be lost at the end of the transaction. + * ---------------- + */ + if (!CacheCxt) + CacheCxt = CreateGlobalMemory("Cache"); + + strategy = (IndexStrategy) + MemoryContextAlloc((MemoryContext)CacheCxt, strsize); + + if (amsupport > 0) { + strsize = numatts * (amsupport * sizeof(RegProcedure)); + support = (RegProcedure *) MemoryContextAlloc((MemoryContext)CacheCxt, + strsize); + } else { + support = (RegProcedure *) NULL; + } + + /* ---------------- + * fill in the index strategy structure with information + * from the catalogs. Note: we use heap override mode + * in order to be allowed to see the correct information in the + * catalogs, even though our transaction has not yet committed. + * ---------------- + */ + setheapoverride(1); + + IndexSupportInitialize(strategy, support, + attrelid, accessMethodObjectId, + amstrategies, amsupport, numatts); + + setheapoverride(0); + + /* ---------------- + * store the strategy information in the index reldesc + * ---------------- + */ + RelationSetIndexSupport(indexRelation, strategy, support); +} + + +/* ---------------------------------------------------------------- + * index_create + * ---------------------------------------------------------------- + */ +void +index_create(char *heapRelationName, + char *indexRelationName, + FuncIndexInfo *funcInfo, + Oid accessMethodObjectId, + int numatts, + AttrNumber attNums[], + Oid classObjectId[], + uint16 parameterCount, + Datum parameter[], + Node *predicate) +{ + Relation heapRelation; + Relation indexRelation; + TupleDesc indexTupDesc; + Oid heapoid; + Oid indexoid; + PredInfo *predInfo; + + /* ---------------- + * check parameters + * ---------------- + */ + if (numatts < 1) + elog(WARN, "must index at least one attribute"); + + /* ---------------- + * get heap relation oid and open the heap relation + * XXX ADD INDEXING + * ---------------- + */ + heapoid = GetHeapRelationOid(heapRelationName, indexRelationName); + + heapRelation = heap_open(heapoid); + + /* ---------------- + * write lock heap to guarantee exclusive access + * ---------------- + */ + + RelationSetLockForWrite(heapRelation); + + /* ---------------- + * construct new tuple descriptor + * ---------------- + */ + if (PointerIsValid(funcInfo)) + indexTupDesc = BuildFuncTupleDesc(funcInfo); + else + indexTupDesc = ConstructTupleDescriptor(heapoid, + heapRelation, + numatts, + attNums); + + /* ---------------- + * create the index relation + * ---------------- + */ + indexRelation = heap_creatr(indexRelationName, + DEFAULT_SMGR, + indexTupDesc); + + /* ---------------- + * construct the index relation descriptor + * + * XXX should have a proper way to create cataloged relations + * ---------------- + */ + ConstructIndexReldesc(indexRelation, accessMethodObjectId); + + /* ---------------- + * add index to catalogs + * (append RELATION tuple) + * ---------------- + */ + indexoid = UpdateRelationRelation(indexRelation); + + /* ---------------- + * Now get the index procedure (only relevant for functional indices). + * ---------------- + */ + + if (PointerIsValid(funcInfo)) + { + HeapTuple proc_tup; + + proc_tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(FIgetname(funcInfo)), + Int32GetDatum(FIgetnArgs(funcInfo)), + PointerGetDatum(FIgetArglist(funcInfo)), + 0); + + if (!HeapTupleIsValid(proc_tup)) { + func_error("index_create", FIgetname(funcInfo), + FIgetnArgs(funcInfo), + (int*) FIgetArglist(funcInfo)); + } + FIgetProcOid(funcInfo) = proc_tup->t_oid; + } + + /* ---------------- + * now update the object id's of all the attribute + * tuple forms in the index relation's tuple descriptor + * ---------------- + */ + InitializeAttributeOids(indexRelation, numatts, indexoid); + + /* ---------------- + * append ATTRIBUTE tuples + * ---------------- + */ + AppendAttributeTuples(indexRelation, numatts); + + /* ---------------- + * update pg_index + * (append INDEX tuple) + * + * Note that this stows away a representation of "predicate". + * (Or, could define a rule to maintain the predicate) --Nels, Feb '92 + * ---------------- + */ + UpdateIndexRelation(indexoid, heapoid, funcInfo, + numatts, attNums, classObjectId, predicate); + + predInfo = (PredInfo*)palloc(sizeof(PredInfo)); + predInfo->pred = predicate; + predInfo->oldPred = NULL; + + /* ---------------- + * initialize the index strategy + * ---------------- + */ + InitIndexStrategy(numatts, indexRelation, accessMethodObjectId); + + /* + * If this is bootstrap (initdb) time, then we don't actually + * fill in the index yet. We'll be creating more indices and classes + * later, so we delay filling them in until just before we're done + * with bootstrapping. Otherwise, we call the routine that constructs + * the index. The heap and index relations are closed by index_build(). + */ + if (IsBootstrapProcessingMode()) { + index_register(heapRelationName, indexRelationName, numatts, attNums, + parameterCount, parameter, funcInfo, predInfo); + } else { + heapRelation = heap_openr(heapRelationName); + index_build(heapRelation, indexRelation, numatts, attNums, + parameterCount, parameter, funcInfo, predInfo); + } +} + +/* ---------------------------------------------------------------- + * index_destroy + * + * XXX break into modules like index_create + * ---------------------------------------------------------------- + */ +void +index_destroy(Oid indexId) +{ + Relation indexRelation; + Relation catalogRelation; + HeapTuple tuple; + HeapScanDesc scan; + ScanKeyData entry; + + Assert(OidIsValid(indexId)); + + indexRelation = index_open(indexId); + + /* ---------------- + * fix RELATION relation + * ---------------- + */ + catalogRelation = heap_openr(RelationRelationName); + + ScanKeyEntryInitialize(&entry, 0x0, ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(indexId));; + + scan = heap_beginscan(catalogRelation, 0, NowTimeQual, 1, &entry); + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + + AssertState(HeapTupleIsValid(tuple)); + + heap_delete(catalogRelation, &tuple->t_ctid); + heap_endscan(scan); + heap_close(catalogRelation); + + /* ---------------- + * fix ATTRIBUTE relation + * ---------------- + */ + catalogRelation = heap_openr(AttributeRelationName); + + entry.sk_attno = Anum_pg_attribute_attrelid; + + scan = heap_beginscan(catalogRelation, 0, NowTimeQual, 1, &entry); + + while (tuple = heap_getnext(scan, 0, (Buffer *)NULL), + HeapTupleIsValid(tuple)) { + + heap_delete(catalogRelation, &tuple->t_ctid); + } + heap_endscan(scan); + heap_close(catalogRelation); + + /* ---------------- + * fix INDEX relation + * ---------------- + */ + catalogRelation = heap_openr(IndexRelationName); + + entry.sk_attno = Anum_pg_index_indexrelid; + + scan = heap_beginscan(catalogRelation, 0, NowTimeQual, 1, &entry); + tuple = heap_getnext(scan, 0, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) { + elog(NOTICE, "IndexRelationDestroy: %s's INDEX tuple missing", + RelationGetRelationName(indexRelation)); + } + heap_delete(catalogRelation, &tuple->t_ctid); + heap_endscan(scan); + heap_close(catalogRelation); + + /* + * physically remove the file + */ + if (FileNameUnlink(relpath(indexRelation->rd_rel->relname.data)) < 0) + elog(WARN, "amdestroyr: unlink: %m"); + + index_close(indexRelation); +} + +/* ---------------------------------------------------------------- + * index_build support + * ---------------------------------------------------------------- + */ +/* ---------------- + * FormIndexDatum + * ---------------- + */ +void +FormIndexDatum(int numberOfAttributes, + AttrNumber attributeNumber[], + HeapTuple heapTuple, + TupleDesc heapDescriptor, + Buffer buffer, + Datum *datum, + char *nullv, + FuncIndexInfoPtr fInfo) +{ + AttrNumber i; + int offset; + bool isNull; + + /* ---------------- + * for each attribute we need from the heap tuple, + * get the attribute and stick it into the datum and + * null arrays. + * ---------------- + */ + + for (i = 1; i <= numberOfAttributes; i += 1) { + offset = AttrNumberGetAttrOffset(i); + + datum[ offset ] = + PointerGetDatum( GetIndexValue(heapTuple, + heapDescriptor, + offset, + attributeNumber, + fInfo, + &isNull, + buffer) ); + + nullv[ offset ] = (isNull) ? 'n' : ' '; + } +} + + +/* ---------------- + * UpdateStats + * ---------------- + */ +void +UpdateStats(Oid relid, long reltuples, bool hasindex) +{ + Relation whichRel; + Relation pg_class; + HeapScanDesc pg_class_scan; + HeapTuple htup; + HeapTuple newtup; + long relpages; + Buffer buffer; + int i; + Form_pg_class rd_rel; + Relation idescs[Num_pg_class_indices]; + + static ScanKeyData key[1] = { + { 0, ObjectIdAttributeNumber, ObjectIdEqualRegProcedure } + }; + Datum values[Natts_pg_class]; + char nulls[Natts_pg_class]; + char replace[Natts_pg_class]; + + fmgr_info(ObjectIdEqualRegProcedure, (func_ptr *) &key[0].sk_func, + &key[0].sk_nargs); + + /* ---------------- + * This routine handles updates for both the heap and index relation + * statistics. In order to guarantee that we're able to *see* the index + * relation tuple, we bump the command counter id here. The index + * relation tuple was created in the current transaction. + * ---------------- + */ + CommandCounterIncrement(); + + /* ---------------- + * CommandCounterIncrement() flushes invalid cache entries, including + * those for the heap and index relations for which we're updating + * statistics. Now that the cache is flushed, it's safe to open the + * relation again. We need the relation open in order to figure out + * how many blocks it contains. + * ---------------- + */ + + whichRel = RelationIdGetRelation(relid); + + if (!RelationIsValid(whichRel)) + elog(WARN, "UpdateStats: cannot open relation id %d", relid); + + /* ---------------- + * Find the RELATION relation tuple for the given relation. + * ---------------- + */ + pg_class = heap_openr(RelationRelationName); + if (! RelationIsValid(pg_class)) { + elog(WARN, "UpdateStats: could not open RELATION relation"); + } + key[0].sk_argument = ObjectIdGetDatum(relid); + + pg_class_scan = + heap_beginscan(pg_class, 0, NowTimeQual, 1, key); + + if (! HeapScanIsValid(pg_class_scan)) { + heap_close(pg_class); + elog(WARN, "UpdateStats: cannot scan RELATION relation"); + } + + /* if the heap_open above succeeded, then so will this heap_getnext() */ + htup = heap_getnext(pg_class_scan, 0, &buffer); + heap_endscan(pg_class_scan); + + /* ---------------- + * update statistics + * ---------------- + */ + relpages = RelationGetNumberOfBlocks(whichRel); + + /* + * We shouldn't have to do this, but we do... Modify the reldesc + * in place with the new values so that the cache contains the + * latest copy. + */ + + whichRel->rd_rel->relhasindex = hasindex; + whichRel->rd_rel->relpages = relpages; + whichRel->rd_rel->reltuples = reltuples; + + for (i = 0; i < Natts_pg_class; i++) { + nulls[i] = heap_attisnull(htup, i+1) ? 'n' : ' '; + replace[i] = ' '; + values[i] = (Datum) NULL; + } + + /* + * If reltuples wasn't supplied take an educated guess. + */ + if (reltuples == 0) + reltuples = relpages*NTUPLES_PER_PAGE(whichRel->rd_rel->relnatts); + + if (IsBootstrapProcessingMode()) { + + /* + * At bootstrap time, we don't need to worry about concurrency + * or visibility of changes, so we cheat. + */ + + rd_rel = (Form_pg_class) GETSTRUCT(htup); + rd_rel->relpages = relpages; + rd_rel->reltuples = reltuples; + rd_rel->relhasindex = hasindex; + } else { + /* during normal processing, work harder */ + replace[Anum_pg_class_relpages - 1] = 'r'; + values[Anum_pg_class_relpages - 1] = (Datum)relpages; + replace[Anum_pg_class_reltuples - 1] = 'r'; + values[Anum_pg_class_reltuples - 1] = (Datum)reltuples; + replace[Anum_pg_class_relhasindex - 1] = 'r'; + values[Anum_pg_class_relhasindex - 1] = CharGetDatum(hasindex); + + newtup = heap_modifytuple(htup, buffer, pg_class, values, + nulls, replace); + (void) heap_replace(pg_class, &(newtup->t_ctid), newtup); + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_class_indices, pg_class, newtup); + CatalogCloseIndices(Num_pg_class_indices, idescs); + } + + heap_close(pg_class); + heap_close(whichRel); +} + + +/* ------------------------- + * FillDummyExprContext + * Sets up dummy ExprContext and TupleTableSlot objects for use + * with ExecQual. + * ------------------------- + */ +void +FillDummyExprContext(ExprContext *econtext, + TupleTableSlot *slot, + TupleDesc tupdesc, + Buffer buffer) +{ + econtext->ecxt_scantuple = slot; + econtext->ecxt_innertuple = NULL; + econtext->ecxt_outertuple = NULL; + econtext->ecxt_param_list_info = NULL; + econtext->ecxt_range_table = NULL; + + slot->ttc_tupleDescriptor = tupdesc; + slot->ttc_buffer = buffer; + slot->ttc_shouldFree = false; + +} + + +/* ---------------- + * DefaultBuild + * ---------------- + */ +static void +DefaultBuild(Relation heapRelation, + Relation indexRelation, + int numberOfAttributes, + AttrNumber attributeNumber[], + IndexStrategy indexStrategy, /* not used */ + uint16 parameterCount, /* not used */ + Datum parameter[], /* not used */ + FuncIndexInfoPtr funcInfo, + PredInfo *predInfo) +{ + HeapScanDesc scan; + HeapTuple heapTuple; + Buffer buffer; + + IndexTuple indexTuple; + TupleDesc heapDescriptor; + TupleDesc indexDescriptor; + Datum *datum; + char *nullv; + long reltuples, indtuples; + ExprContext *econtext; + TupleTable tupleTable; + TupleTableSlot *slot; + Node *predicate; + Node *oldPred; + + InsertIndexResult insertResult; + + /* ---------------- + * more & better checking is needed + * ---------------- + */ + Assert(OidIsValid(indexRelation->rd_rel->relam)); /* XXX */ + + /* ---------------- + * get the tuple descriptors from the relations so we know + * how to form the index tuples.. + * ---------------- + */ + heapDescriptor = RelationGetTupleDescriptor(heapRelation); + indexDescriptor = RelationGetTupleDescriptor(indexRelation); + + /* ---------------- + * datum and null are arrays in which we collect the index attributes + * when forming a new index tuple. + * ---------------- + */ + datum = (Datum *) palloc(numberOfAttributes * sizeof *datum); + nullv = (char *) palloc(numberOfAttributes * sizeof *nullv); + + /* + * If this is a predicate (partial) index, we will need to evaluate the + * predicate using ExecQual, which requires the current tuple to be in a + * slot of a TupleTable. In addition, ExecQual must have an ExprContext + * referring to that slot. Here, we initialize dummy TupleTable and + * ExprContext objects for this purpose. --Nels, Feb '92 + */ + + predicate = predInfo->pred; + oldPred = predInfo->oldPred; + +#ifndef OMIT_PARTIAL_INDEX + if (predicate != NULL || oldPred != NULL) { + tupleTable = ExecCreateTupleTable(1); + slot = ExecAllocTableSlot(tupleTable); + econtext = makeNode(ExprContext); + FillDummyExprContext(econtext, slot, heapDescriptor, buffer); + } +#endif /* OMIT_PARTIAL_INDEX */ + + /* ---------------- + * Ok, begin our scan of the base relation. + * ---------------- + */ + scan = heap_beginscan(heapRelation, /* relation */ + 0, /* start at end */ + NowTimeQual, /* time range */ + 0, /* number of keys */ + (ScanKey) NULL); /* scan key */ + + reltuples = indtuples = 0; + + /* ---------------- + * for each tuple in the base relation, we create an index + * tuple and add it to the index relation. We keep a running + * count of the number of tuples so that we can update pg_class + * with correct statistics when we're done building the index. + * ---------------- + */ + while (heapTuple = heap_getnext(scan, 0, &buffer), + HeapTupleIsValid(heapTuple)) { + + reltuples++; + + /* + * If oldPred != NULL, this is an EXTEND INDEX command, so skip + * this tuple if it was already in the existing partial index + */ + if (oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /*SetSlotContents(slot, heapTuple); */ + slot->val = heapTuple; + if (ExecQual((List*)oldPred, econtext) == true) { + indtuples++; + continue; + } +#endif /* OMIT_PARTIAL_INDEX */ + } + + /* Skip this tuple if it doesn't satisfy the partial-index predicate */ + if (predicate != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /*SetSlotContents(slot, heapTuple); */ + slot->val = heapTuple; + if (ExecQual((List*)predicate, econtext) == false) + continue; +#endif /* OMIT_PARTIAL_INDEX */ + } + + indtuples++; + + /* ---------------- + * FormIndexDatum fills in its datum and null parameters + * with attribute information taken from the given heap tuple. + * ---------------- + */ + FormIndexDatum(numberOfAttributes, /* num attributes */ + attributeNumber, /* array of att nums to extract */ + heapTuple, /* tuple from base relation */ + heapDescriptor, /* heap tuple's descriptor */ + buffer, /* buffer used in the scan */ + datum, /* return: array of attributes */ + nullv, /* return: array of char's */ + funcInfo); + + indexTuple = index_formtuple(indexDescriptor, + datum, + nullv); + + indexTuple->t_tid = heapTuple->t_ctid; + + insertResult = index_insert(indexRelation, indexTuple); + + if (insertResult) pfree(insertResult); + pfree(indexTuple); + } + + heap_endscan(scan); + + if (predicate != NULL || oldPred != NULL) { +#ifndef OMIT_PARTIAL_INDEX + ExecDestroyTupleTable(tupleTable, false); +#endif /* OMIT_PARTIAL_INDEX */ + } + + pfree(nullv); + pfree(datum); + + /* + * Okay, now update the reltuples and relpages statistics for both + * the heap relation and the index. These statistics are used by + * the planner to choose a scan type. They are maintained generally + * by the vacuum daemon, but we update them here to make the index + * useful as soon as possible. + */ + UpdateStats(heapRelation->rd_id, reltuples, true); + UpdateStats(indexRelation->rd_id, indtuples, false); + if (oldPred != NULL) { + if (indtuples == reltuples) predicate = NULL; + UpdateIndexPredicate(indexRelation->rd_id, oldPred, predicate); + } +} + +/* ---------------- + * index_build + * ---------------- + */ +void +index_build(Relation heapRelation, + Relation indexRelation, + int numberOfAttributes, + AttrNumber attributeNumber[], + uint16 parameterCount, + Datum parameter[], + FuncIndexInfo *funcInfo, + PredInfo *predInfo) +{ + RegProcedure procedure; + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(RelationIsValid(indexRelation)); + Assert(PointerIsValid(indexRelation->rd_am)); + + procedure = indexRelation->rd_am->ambuild; + + /* ---------------- + * use the access method build procedure if supplied.. + * ---------------- + */ + if (RegProcedureIsValid(procedure)) + (void) fmgr(procedure, + heapRelation, + indexRelation, + numberOfAttributes, + attributeNumber, + RelationGetIndexStrategy(indexRelation), + parameterCount, + parameter, + funcInfo, + predInfo); + else + DefaultBuild(heapRelation, + indexRelation, + numberOfAttributes, + attributeNumber, + RelationGetIndexStrategy(indexRelation), + parameterCount, + parameter, + funcInfo, + predInfo); +} + + diff --git a/src/backend/catalog/index.h b/src/backend/catalog/index.h new file mode 100644 index 00000000000..1734f866a07 --- /dev/null +++ b/src/backend/catalog/index.h @@ -0,0 +1,59 @@ +/*------------------------------------------------------------------------- + * + * index.h-- + * prototypes for index.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: index.h,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef INDEX_H +#define INDEX_H + +#include "access/funcindex.h" +#include "access/itup.h" +#include "nodes/execnodes.h" + + +extern Form_pg_am +AccessMethodObjectIdGetAccessMethodTupleForm(Oid accessMethodObjectId); + +extern void +UpdateIndexPredicate(Oid indexoid, Node *oldPred, Node *predicate); + +extern void InitIndexStrategy(int numatts, + Relation indexRelation, + Oid accessMethodObjectId); + +extern void index_create(char *heapRelationName, + char* indexRelationName, + FuncIndexInfo *funcInfo, + Oid accessMethodObjectId, + int numatts, + AttrNumber attNums[], + Oid classObjectId[], + uint16 parameterCount, + Datum parameter[], + Node *predicate); + +extern void index_destroy(Oid indexId); + +extern void FormIndexDatum(int numberOfAttributes, + AttrNumber attributeNumber[], HeapTuple heapTuple, + TupleDesc heapDescriptor, Buffer buffer, Datum *datum, + char *nullv, FuncIndexInfoPtr fInfo); + +extern void UpdateStats(Oid relid, long reltuples, bool hasindex); + +extern void FillDummyExprContext(ExprContext *econtext, TupleTableSlot *slot, + TupleDesc tupdesc, Buffer buffer); + +extern void index_build(Relation heapRelation, Relation indexRelation, + int numberOfAttributes, AttrNumber attributeNumber[], + uint16 parameterCount, Datum parameter[], FuncIndexInfo *funcInfo, + PredInfo *predInfo); + +#endif /* INDEX_H */ diff --git a/src/backend/catalog/indexing.c b/src/backend/catalog/indexing.c new file mode 100644 index 00000000000..74bf48a443b --- /dev/null +++ b/src/backend/catalog/indexing.c @@ -0,0 +1,561 @@ +/*------------------------------------------------------------------------- + * + * indexing.c-- + * This file contains routines to support indices defined on system + * catalogs. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/indexing.c,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "utils/builtins.h" +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/oidcompos.h" +#include "utils/palloc.h" +#include "access/htup.h" +#include "access/heapam.h" +#include "access/genam.h" +#include "access/attnum.h" +#include "access/funcindex.h" +#include "access/skey.h" +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "nodes/execnodes.h" +#include "catalog/catalog.h" +#include "catalog/catname.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/pg_class.h" +#include "catalog/pg_attribute.h" +#include "utils/syscache.h" +#include "catalog/indexing.h" +#include "catalog/index.h" + +/* + * Names of indices on the following system catalogs: + * + * pg_attribute + * pg_proc + * pg_type + * pg_naming + * pg_class + */ +/* +static NameData AttributeNameIndexData = { "pg_attnameind" }; +static NameData AttributeNumIndexData = { "pg_attnumind" }; +static NameData AttributeRelidIndexData= { "pg_attrelidind" }; +static NameData ProcedureNameIndexData = { "pg_procnameind" }; +static NameData ProcedureOidIndexData = { "pg_procidind" }; +static NameData ProcedureSrcIndexData = { "pg_procsrcind" }; +static NameData TypeNameIndexData = { "pg_typenameind" }; +static NameData TypeOidIndexData = { "pg_typeidind" }; +static NameData ClassNameIndexData = { "pg_classnameind" }; +static NameData ClassOidIndexData = { "pg_classoidind" }; + +Name AttributeNameIndex = &AttributeNameIndexData; +Name AttributeNumIndex = &AttributeNumIndexData; +Name AttributeRelidIndex= &AttributeRelidIndexData; +Name ProcedureNameIndex = &ProcedureNameIndexData; +Name ProcedureOidIndex = &ProcedureOidIndexData; +Name ProcedureSrcIndex = &ProcedureSrcIndexData; +Name TypeNameIndex = &TypeNameIndexData; +Name TypeOidIndex = &TypeOidIndexData; +Name ClassNameIndex = &ClassNameIndexData; +Name ClassOidIndex = &ClassOidIndexData; +char *Name_pg_attr_indices[Num_pg_attr_indices] = {AttributeNameIndexData.data, + AttributeNumIndexData.data, + AttributeRelidIndexData.data}; +char *Name_pg_proc_indices[Num_pg_proc_indices] = {ProcedureNameIndexData.data, + ProcedureOidIndexData.data, + ProcedureSrcIndexData.data};char *Name_pg_type_indices[Num_pg_type_indices] = {TypeNameIndexData.data, + TypeOidIndexData.data}; +char *Name_pg_class_indices[Num_pg_class_indices]= {ClassNameIndexData.data, + ClassOidIndexData.data}; +*/ + +char *Name_pg_attr_indices[Num_pg_attr_indices] = {AttributeNameIndex, + AttributeNumIndex, + AttributeRelidIndex}; +char *Name_pg_proc_indices[Num_pg_proc_indices] = { ProcedureNameIndex, + ProcedureOidIndex, + ProcedureSrcIndex}; +char *Name_pg_type_indices[Num_pg_type_indices] = { TypeNameIndex, + TypeOidIndex}; +char *Name_pg_class_indices[Num_pg_class_indices]= { ClassNameIndex, + ClassOidIndex}; + + +static HeapTuple CatalogIndexFetchTuple(Relation heapRelation, + Relation idesc, + ScanKey skey); + + +/* + * Changes (appends) to catalogs can (and does) happen at various places + * throughout the code. We need a generic routine that will open all of + * the indices defined on a given catalog a return the relation descriptors + * associated with them. + */ +void +CatalogOpenIndices(int nIndices, char *names[], Relation idescs[]) +{ + int i; + + for (i=0; ird_id), + 0,0,0); + Assert(pgIndexTup); + pgIndexP = (IndexTupleForm)GETSTRUCT(pgIndexTup); + + /* + * Compute the number of attributes we are indexing upon. + * very important - can't assume one if this is a functional + * index. + */ + for (attnumP=(&pgIndexP->indkey[0]), natts=0; + *attnumP != InvalidAttrNumber; + attnumP++, natts++) + ; + + if (pgIndexP->indproc != InvalidOid) + { + FIgetnArgs(&finfo) = natts; + natts = 1; + FIgetProcOid(&finfo) = pgIndexP->indproc; + *(FIgetname(&finfo)) = '\0'; + finfoP = &finfo; + } + else + finfoP = (FuncIndexInfo *)NULL; + + FormIndexDatum(natts, + (AttrNumber *)&pgIndexP->indkey[0], + heapTuple, + heapDescriptor, + InvalidBuffer, + &datum, + nulls, + finfoP); + + newIndxTup = (IndexTuple)index_formtuple(indexDescriptor, + &datum,nulls); + Assert(newIndxTup); + /* + * Doing this structure assignment makes me quake in my boots when I + * think about portability. + */ + newIndxTup->t_tid = heapTuple->t_ctid; + + indexRes = index_insert(idescs[i], newIndxTup); + if (indexRes) pfree(indexRes); + } +} + +/* + * This is needed at initialization when reldescs for some of the crucial + * system catalogs are created and nailed into the cache. + */ +bool +CatalogHasIndex(char *catName, Oid catId) +{ + Relation pg_class; + HeapTuple htup; + Form_pg_class pgRelP; + int i; + + Assert(IsSystemRelationName(catName)); + + /* + * If we're bootstraping we don't have pg_class (or any indices). + */ + if (IsBootstrapProcessingMode()) + return false; + + if (IsInitProcessingMode()) { + for (i = 0; IndexedCatalogNames[i] != NULL; i++) { + if ( strcmp(IndexedCatalogNames[i], catName) == 0) + return (true); + } + return (false); + } + + pg_class = heap_openr(RelationRelationName); + htup = ClassOidIndexScan(pg_class, catId); + heap_close(pg_class); + + if (! HeapTupleIsValid(htup)) { + elog(NOTICE, "CatalogHasIndex: no relation with oid %d", catId); + return false; + } + + pgRelP = (Form_pg_class)GETSTRUCT(htup); + return (pgRelP->relhasindex); +} + +/* + * CatalogIndexFetchTuple() -- Get a tuple that satisfies a scan key + * from a catalog relation. + * + * Since the index may contain pointers to dead tuples, we need to + * iterate until we find a tuple that's valid and satisfies the scan + * key. + */ +static HeapTuple +CatalogIndexFetchTuple(Relation heapRelation, + Relation idesc, + ScanKey skey) +{ + IndexScanDesc sd; + RetrieveIndexResult indexRes; + HeapTuple tuple; + Buffer buffer; + + sd = index_beginscan(idesc, false, 1, skey); + tuple = (HeapTuple)NULL; + + do { + indexRes = index_getnext(sd, ForwardScanDirection); + if (indexRes) { + ItemPointer iptr; + + iptr = &indexRes->heap_iptr; + tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer); + pfree(indexRes); + } else + break; + } while (!HeapTupleIsValid(tuple)); + + if (HeapTupleIsValid(tuple)) { + tuple = heap_copytuple(tuple); + ReleaseBuffer(buffer); + } + + index_endscan(sd); + if (sd->opaque) + pfree(sd->opaque); + pfree(sd); + return (tuple); +} + +/* + * The remainder of the file is for individual index scan routines. Each + * index should be scanned according to how it was defined during bootstrap + * (that is, functional or normal) and what arguments the cache lookup + * requires. Each routine returns the heap tuple that qualifies. + */ +HeapTuple +AttributeNameIndexScan(Relation heapRelation, + Oid relid, + char *attname) +{ + Relation idesc; + ScanKeyData skey; + OidName keyarg; + HeapTuple tuple; + + keyarg = mkoidname(relid, attname); + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)OidNameEqRegProcedure, + (Datum)keyarg); + + idesc = index_openr(AttributeNameIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + pfree(keyarg); + + return tuple; +} + +HeapTuple +AttributeNumIndexScan(Relation heapRelation, + Oid relid, + AttrNumber attnum) +{ + Relation idesc; + ScanKeyData skey; + OidInt2 keyarg; + HeapTuple tuple; + + keyarg = mkoidint2(relid, (uint16)attnum); + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)OidInt2EqRegProcedure, + (Datum)keyarg); + + idesc = index_openr(AttributeNumIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + pfree(keyarg); + + return tuple; +} + +HeapTuple +ProcedureOidIndexScan(Relation heapRelation, Oid procId) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)ObjectIdEqualRegProcedure, + (Datum)procId); + + idesc = index_openr(ProcedureOidIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + + return tuple; +} + +HeapTuple +ProcedureNameIndexScan(Relation heapRelation, + char *procName, + int nargs, + Oid *argTypes) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + IndexScanDesc sd; + RetrieveIndexResult indexRes; + Buffer buffer; + Form_pg_proc pgProcP; + bool bufferUsed = FALSE; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)NameEqualRegProcedure, + (Datum)procName); + + idesc = index_openr(ProcedureNameIndex); + + sd = index_beginscan(idesc, false, 1, &skey); + + /* + * for now, we do the work usually done by CatalogIndexFetchTuple + * by hand, so that we can check that the other keys match. when + * multi-key indices are added, they will be used here. + */ + do { + tuple = (HeapTuple)NULL; + if (bufferUsed) { + ReleaseBuffer(buffer); + bufferUsed = FALSE; + } + + indexRes = index_getnext(sd, ForwardScanDirection); + if (indexRes) { + ItemPointer iptr; + + iptr = &indexRes->heap_iptr; + tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer); + pfree(indexRes); + if (HeapTupleIsValid(tuple)) { + pgProcP = (Form_pg_proc)GETSTRUCT(tuple); + bufferUsed = TRUE; + } + } else + break; + } while (!HeapTupleIsValid(tuple) || + pgProcP->pronargs != nargs || + !oid8eq(&(pgProcP->proargtypes[0]), argTypes)); + + if (HeapTupleIsValid(tuple)) { + tuple = heap_copytuple(tuple); + ReleaseBuffer(buffer); + } + + index_endscan(sd); + index_close(idesc); + + return tuple; +} + +HeapTuple +ProcedureSrcIndexScan(Relation heapRelation, text *procSrc) +{ + Relation idesc; + IndexScanDesc sd; + ScanKeyData skey; + RetrieveIndexResult indexRes; + HeapTuple tuple; + Buffer buffer; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)Anum_pg_proc_prosrc, + (RegProcedure)TextEqualRegProcedure, + (Datum)procSrc); + + idesc = index_openr(ProcedureSrcIndex); + sd = index_beginscan(idesc, false, 1, &skey); + + indexRes = index_getnext(sd, ForwardScanDirection); + if (indexRes) { + ItemPointer iptr; + + iptr = &indexRes->heap_iptr; + tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer); + pfree(indexRes); + } else + tuple = (HeapTuple)NULL; + + if (HeapTupleIsValid(tuple)) { + tuple = heap_copytuple(tuple); + ReleaseBuffer(buffer); + } + + index_endscan(sd); + + return tuple; +} + +HeapTuple +TypeOidIndexScan(Relation heapRelation, Oid typeId) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)ObjectIdEqualRegProcedure, + (Datum)typeId); + + idesc = index_openr(TypeOidIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + + return tuple; +} + +HeapTuple +TypeNameIndexScan(Relation heapRelation, char *typeName) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)NameEqualRegProcedure, + (Datum)typeName); + + idesc = index_openr(TypeNameIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + + return tuple; +} + +HeapTuple +ClassNameIndexScan(Relation heapRelation, char *relName) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)NameEqualRegProcedure, + (Datum)relName); + + idesc = index_openr(ClassNameIndex); + + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + return tuple; +} + +HeapTuple +ClassOidIndexScan(Relation heapRelation, Oid relId) +{ + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)ObjectIdEqualRegProcedure, + (Datum)relId); + + idesc = index_openr(ClassOidIndex); + tuple = CatalogIndexFetchTuple(heapRelation, idesc, &skey); + + index_close(idesc); + + return tuple; +} diff --git a/src/backend/catalog/indexing.h b/src/backend/catalog/indexing.h new file mode 100644 index 00000000000..c1a83cbaf34 --- /dev/null +++ b/src/backend/catalog/indexing.h @@ -0,0 +1,103 @@ +/*------------------------------------------------------------------------- + * + * indexing.h-- + * This include provides some definitions to support indexing + * on system catalogs + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: indexing.h,v 1.1.1.1 1996/07/09 06:21:15 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef INDEXING_H +#define INDEXING_H + +#include "utils/rel.h" + +/* + * Some definitions for indices on pg_attribute + */ +#define Num_pg_attr_indices 3 +#define Num_pg_proc_indices 3 +#define Num_pg_type_indices 2 +#define Num_pg_class_indices 2 + + +/* + * Names of indices on system catalogs + */ +#define AttributeNameIndex "pg_attnameind" +#define AttributeNumIndex "pg_attnumind" +#define AttributeRelidIndex "pg_attrelidind" +#define ProcedureNameIndex "pg_procnameind" +#define ProcedureOidIndex "pg_procidind" +#define ProcedureSrcIndex "pg_procsrcind" +#define TypeNameIndex "pg_typenameind" +#define TypeOidIndex "pg_typeidind" +#define ClassNameIndex "pg_classnameind" +#define ClassOidIndex "pg_classoidind" + +extern char *Name_pg_attr_indices[]; +extern char *Name_pg_proc_indices[]; +extern char *Name_pg_type_indices[]; +extern char *Name_pg_class_indices[]; + +extern char *IndexedCatalogNames[]; + +/* + * indexing.c prototypes + * + * Functions for each index to perform the necessary scan on a cache miss. + */ +extern void CatalogOpenIndices(int nIndices, char *names[], Relation idescs[]); +extern void CatalogCloseIndices(int nIndices, Relation *idescs); +extern void CatalogIndexInsert(Relation *idescs, + int nIndices, + Relation heapRelation, + HeapTuple heapTuple); +extern bool CatalogHasIndex(char *catName, Oid catId); + +extern HeapTuple AttributeNameIndexScan(Relation heapRelation, + Oid relid, + char *attname); + +extern HeapTuple AttributeNumIndexScan(Relation heapRelation, + Oid relid, + AttrNumber attnum); +extern HeapTuple ProcedureOidIndexScan(Relation heapRelation, Oid procId); +extern HeapTuple ProcedureNameIndexScan(Relation heapRelation, + char *procName, int nargs, Oid *argTypes); +extern HeapTuple ProcedureSrcIndexScan(Relation heapRelation, text *procSrc); +extern HeapTuple TypeOidIndexScan(Relation heapRelation, Oid typeId); +extern HeapTuple TypeNameIndexScan(Relation heapRelation, char *typeName); +extern HeapTuple ClassNameIndexScan(Relation heapRelation, char *relName); +extern HeapTuple ClassOidIndexScan(Relation heapRelation, Oid relId); + + +/* + * What follows are lines processed by genbki.sh to create the statements + * the bootstrap parser will turn into DefineIndex commands. + * + * The keyword is DECLARE_INDEX every thing after that is just like in a + * normal specification of the 'define index' POSTQUEL command. + */ +DECLARE_INDEX(pg_attnameind on pg_attribute using btree (mkoidname(attrelid, attname) oidname_ops)); +DECLARE_INDEX(pg_attnumind on pg_attribute using btree (mkoidint2(attrelid, attnum) oidint2_ops)); +DECLARE_INDEX(pg_attrelidind on pg_attribute using btree (attrelid oid_ops)); + +DECLARE_INDEX(pg_procidind on pg_proc using btree (Oid oid_ops)); +DECLARE_INDEX(pg_procnameind on pg_proc using btree (proname name_ops)); +DECLARE_INDEX(pg_procsrcind on pg_proc using btree (prosrc text_ops)); + +DECLARE_INDEX(pg_typeidind on pg_type using btree (Oid oid_ops)); +DECLARE_INDEX(pg_typenameind on pg_type using btree (typname name_ops)); + +DECLARE_INDEX(pg_classnameind on pg_class using btree (relname name_ops)); +DECLARE_INDEX(pg_classoidind on pg_class using btree (Oid oid_ops)); + +/* now build indices in the initialization scripts */ +BUILD_INDICES + +#endif /* INDEXING_H */ diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c new file mode 100644 index 00000000000..7fe895e0f0c --- /dev/null +++ b/src/backend/catalog/pg_aggregate.c @@ -0,0 +1,325 @@ +/*------------------------------------------------------------------------- + * + * pg_aggregate.c-- + * routines to support manipulation of the pg_aggregate relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/pg_aggregate.c,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/htup.h" +#include "access/tupdesc.h" +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/builtins.h" +#include "fmgr.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/pg_aggregate.h" + +/* ---------------- + * AggregateCreate + * + * aggregates overloading has been added. Instead of the full + * overload support we have for functions, aggregate overloading only + * applies to exact basetype matches. That is, we don't check the + * the inheritance hierarchy + * + * OLD COMMENTS: + * Currently, redefining aggregates using the same name is not + * supported. In such a case, a warning is printed that the + * aggregate already exists. If such is not the case, a new tuple + * is created and inserted in the aggregate relation. The fields + * of this tuple are aggregate name, owner id, 2 transition functions + * (called aggtransfn1 and aggtransfn2), final function (aggfinalfn), + * type of data on which aggtransfn1 operates (aggbasetype), return + * types of the two transition functions (aggtranstype1 and + * aggtranstype2), final return type (aggfinaltype), and initial values + * for the two state transition functions (agginitval1 and agginitval2). + * All types and functions must have been defined + * prior to defining the aggregate. + * + * --------------- + */ +void +AggregateCreate(char *aggName, + char *aggtransfn1Name, + char *aggtransfn2Name, + char *aggfinalfnName, + char *aggbasetypeName, + char *aggtransfn1typeName, + char *aggtransfn2typeName, + char *agginitval1, + char *agginitval2) +{ + register i; + Relation aggdesc; + HeapTuple tup; + char nulls[Natts_pg_aggregate]; + Datum values[Natts_pg_aggregate]; + Form_pg_proc proc; + Oid xfn1 = InvalidOid; + Oid xfn2 = InvalidOid; + Oid ffn = InvalidOid; + Oid xbase = InvalidOid; + Oid xret1 = InvalidOid; + Oid xret2 = InvalidOid; + Oid fret = InvalidOid; + Oid fnArgs[8]; + TupleDesc tupDesc; + + memset(fnArgs, 0, 8 * sizeof(Oid)); + + /* sanity checks */ + if (!aggName) + elog(WARN, "AggregateCreate: no aggregate name supplied"); + + if (!aggtransfn1Name && !aggtransfn2Name) + elog(WARN, "AggregateCreate: aggregate must have at least one transition function"); + + tup = SearchSysCacheTuple(TYPNAME, + PointerGetDatum(aggbasetypeName), + 0,0,0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: Type '%s' undefined",aggbasetypeName); + xbase = tup->t_oid; + + if (aggtransfn1Name) { + tup = SearchSysCacheTuple(TYPNAME, + PointerGetDatum(aggtransfn1typeName), + 0,0,0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: Type '%s' undefined", + aggtransfn1typeName); + xret1 = tup->t_oid; + + fnArgs[0] = xret1; + fnArgs[1] = xbase; + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(aggtransfn1Name), + Int32GetDatum(2), + PointerGetDatum(fnArgs), + 0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: '%s('%s', '%s') does not exist", + aggtransfn1Name, aggtransfn1typeName, aggbasetypeName); + if (((Form_pg_proc) GETSTRUCT(tup))->prorettype != xret1) + elog(WARN, "AggregateCreate: return type of '%s' is not '%s'", + aggtransfn1Name, + aggtransfn1typeName); + xfn1 = tup->t_oid; + if (!OidIsValid(xfn1) || !OidIsValid(xret1) || + !OidIsValid(xbase)) + elog(WARN, "AggregateCreate: bogus function '%s'", aggfinalfnName); + } + + if (aggtransfn2Name) { + tup = SearchSysCacheTuple(TYPNAME, + PointerGetDatum(aggtransfn2typeName), + 0,0,0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: Type '%s' undefined", + aggtransfn2typeName); + xret2 = tup->t_oid; + + fnArgs[0] = xret2; + fnArgs[1] = 0; + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(aggtransfn2Name), + Int32GetDatum(1), + PointerGetDatum(fnArgs), + 0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: '%s'('%s') does not exist", + aggtransfn2Name, aggtransfn2typeName); + if (((Form_pg_proc) GETSTRUCT(tup))->prorettype != xret2) + elog(WARN, "AggregateCreate: return type of '%s' is not '%s'", + aggtransfn2Name, aggtransfn2typeName); + xfn2 = tup->t_oid; + if (!OidIsValid(xfn2) || !OidIsValid(xret2)) + elog(WARN, "AggregateCreate: bogus function '%s'",aggfinalfnName); + } + + tup = SearchSysCacheTuple(AGGNAME, PointerGetDatum(aggName), + ObjectIdGetDatum(xbase), + 0,0); + if (HeapTupleIsValid(tup)) + elog(WARN, + "AggregateCreate: aggregate '%s' with base type '%s' already exists", + aggName, aggbasetypeName); + + /* more sanity checks */ + if (aggtransfn1Name && aggtransfn2Name && !aggfinalfnName) + elog(WARN, "AggregateCreate: Aggregate must have final function with both transition functions"); + + if ((!aggtransfn1Name || !aggtransfn2Name) && aggfinalfnName) + elog(WARN, "AggregateCreate: Aggregate cannot have final function without both transition functions"); + + if (aggfinalfnName) { + fnArgs[0] = xret1; + fnArgs[1] = xret2; + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(aggfinalfnName), + Int32GetDatum(2), + PointerGetDatum(fnArgs), + 0); + if(!HeapTupleIsValid(tup)) + elog(WARN, "AggregateCreate: '%s'('%s','%s') does not exist", + aggfinalfnName, aggtransfn1typeName, aggtransfn2typeName); + ffn = tup->t_oid; + proc = (Form_pg_proc) GETSTRUCT(tup); + fret = proc->prorettype; + if (!OidIsValid(ffn) || !OidIsValid(fret)) + elog(WARN, "AggregateCreate: bogus function '%s'", aggfinalfnName); + } + + /* + * If transition function 2 is defined, it must have an initial value, + * whereas transition function 1 does not, which allows man and min + * aggregates to return NULL if they are evaluated on empty sets. + */ + if (OidIsValid(xfn2) && !agginitval2) + elog(WARN, "AggregateCreate: transition function 2 MUST have an initial value"); + + /* initialize nulls and values */ + for(i=0; i < Natts_pg_aggregate; i++) { + nulls[i] = ' '; + values[i] = (Datum)NULL; + } + values[Anum_pg_aggregate_aggname-1] = PointerGetDatum(aggName); + values[Anum_pg_aggregate_aggowner-1] = + Int32GetDatum(GetUserId()); + values[Anum_pg_aggregate_aggtransfn1-1] = + ObjectIdGetDatum(xfn1); + values[Anum_pg_aggregate_aggtransfn2-1] = + ObjectIdGetDatum(xfn2); + values[Anum_pg_aggregate_aggfinalfn-1] = + ObjectIdGetDatum(ffn); + + values[Anum_pg_aggregate_aggbasetype-1] = + ObjectIdGetDatum(xbase); + if (!OidIsValid(xfn1)) { + values[Anum_pg_aggregate_aggtranstype1-1] = + ObjectIdGetDatum(InvalidOid); + values[Anum_pg_aggregate_aggtranstype2-1] = + ObjectIdGetDatum(xret2); + values[Anum_pg_aggregate_aggfinaltype-1] = + ObjectIdGetDatum(xret2); + } + else if (!OidIsValid(xfn2)) { + values[Anum_pg_aggregate_aggtranstype1-1] = + ObjectIdGetDatum(xret1); + values[Anum_pg_aggregate_aggtranstype2-1] = + ObjectIdGetDatum(InvalidOid); + values[Anum_pg_aggregate_aggfinaltype-1] = + ObjectIdGetDatum(xret1); + } + else { + values[Anum_pg_aggregate_aggtranstype1-1] = + ObjectIdGetDatum(xret1); + values[Anum_pg_aggregate_aggtranstype2-1] = + ObjectIdGetDatum(xret2); + values[Anum_pg_aggregate_aggfinaltype-1] = + ObjectIdGetDatum(fret); + } + + if (agginitval1) + values[Anum_pg_aggregate_agginitval1-1] = PointerGetDatum(textin(agginitval1)); + else + nulls[Anum_pg_aggregate_agginitval1-1] = 'n'; + + if (agginitval2) + values[Anum_pg_aggregate_agginitval2-1] = PointerGetDatum(textin(agginitval2)); + else + nulls[Anum_pg_aggregate_agginitval2-1] = 'n'; + + if (!RelationIsValid(aggdesc = heap_openr(AggregateRelationName))) + elog(WARN, "AggregateCreate: could not open '%s'", + AggregateRelationName); + + tupDesc = aggdesc->rd_att; + if (!HeapTupleIsValid(tup = heap_formtuple(tupDesc, + values, + nulls))) + elog(WARN, "AggregateCreate: heap_formtuple failed"); + if (!OidIsValid(heap_insert(aggdesc, tup))) + elog(WARN, "AggregateCreate: heap_insert failed"); + heap_close(aggdesc); + +} + +char * +AggNameGetInitVal(char *aggName, Oid basetype, int xfuncno, bool *isNull) +{ + HeapTuple tup; + Relation aggRel; + int initValAttno; + Oid transtype; + text *textInitVal; + char *strInitVal, *initVal; + extern char *textout(); + + Assert(PointerIsValid(aggName)); + Assert(PointerIsValid(isNull)); + Assert(xfuncno == 1 || xfuncno == 2); + + tup = SearchSysCacheTuple(AGGNAME, + PointerGetDatum(aggName), + PointerGetDatum(basetype), + 0,0); + if (!HeapTupleIsValid(tup)) + elog(WARN, "AggNameGetInitVal: cache lookup failed for aggregate '%s'", + aggName); + if (xfuncno == 1) { + transtype = ((Form_pg_aggregate) GETSTRUCT(tup))->aggtranstype1; + initValAttno = Anum_pg_aggregate_agginitval1; + } + else if (xfuncno == 2) { + transtype = ((Form_pg_aggregate) GETSTRUCT(tup))->aggtranstype2; + initValAttno = Anum_pg_aggregate_agginitval2; + } + + aggRel = heap_openr(AggregateRelationName); + if (!RelationIsValid(aggRel)) + elog(WARN, "AggNameGetInitVal: could not open \"%-.*s\"", + AggregateRelationName); + /* + * must use fastgetattr in case one or other of the init values is NULL + */ + textInitVal = (text *) fastgetattr(tup, initValAttno, + RelationGetTupleDescriptor(aggRel), + isNull); + if (!PointerIsValid(textInitVal)) + *isNull = true; + if (*isNull) { + heap_close(aggRel); + return((char *) NULL); + } + strInitVal = textout(textInitVal); + heap_close(aggRel); + + tup = SearchSysCacheTuple(TYPOID, ObjectIdGetDatum(transtype), + 0,0,0); + if (!HeapTupleIsValid(tup)) { + pfree(strInitVal); + elog(WARN, "AggNameGetInitVal: cache lookup failed on aggregate transition function return type"); + } + initVal = fmgr(((TypeTupleForm) GETSTRUCT(tup))->typinput, strInitVal, -1); + pfree(strInitVal); + return(initVal); +} diff --git a/src/backend/catalog/pg_aggregate.h b/src/backend/catalog/pg_aggregate.h new file mode 100644 index 00000000000..7ed983506b0 --- /dev/null +++ b/src/backend/catalog/pg_aggregate.h @@ -0,0 +1,132 @@ +/*------------------------------------------------------------------------- + * + * pg_aggregate.h-- + * definition of the system "aggregate" relation (pg_aggregate) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_aggregate.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_AGGREGATE_H +#define PG_AGGREGATE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------------------------------------------------------- + * pg_aggregate definition. + * + * cpp turns this into typedef struct FormData_pg_aggregate + * + * aggname name of the aggregate + * aggtransfn1 transition function 1 + * aggtransfn2 transition function 2 + * aggfinalfn final function + * aggbasetype type of data on which aggregate operates + * aggtranstype1 output types for xition func 1 + * aggtranstype2 output types for xition func 2 + * aggfinaltype output type for final func + * agginitval1 initial aggregate value + * agginitval2 initial value for transition state 2 + * ---------------------------------------------------------------- + */ +CATALOG(pg_aggregate) { + NameData aggname; + Oid aggowner; + regproc aggtransfn1; + regproc aggtransfn2; + regproc aggfinalfn; + Oid aggbasetype; + Oid aggtranstype1; + Oid aggtranstype2; + Oid aggfinaltype; + text agginitval1; /* VARIABLE LENGTH FIELD */ + text agginitval2; /* VARIABLE LENGTH FIELD */ +} FormData_pg_aggregate; + +/* ---------------- + * Form_pg_aggregate corresponds to a pointer to a tuple with + * the format of pg_aggregate relation. + * ---------------- + */ +typedef FormData_pg_aggregate *Form_pg_aggregate; + +/* ---------------- + * compiler constants for pg_aggregate + * ---------------- + */ + +#define Natts_pg_aggregate 11 +#define Anum_pg_aggregate_aggname 1 +#define Anum_pg_aggregate_aggowner 2 +#define Anum_pg_aggregate_aggtransfn1 3 +#define Anum_pg_aggregate_aggtransfn2 4 +#define Anum_pg_aggregate_aggfinalfn 5 +#define Anum_pg_aggregate_aggbasetype 6 +#define Anum_pg_aggregate_aggtranstype1 7 +#define Anum_pg_aggregate_aggtranstype2 8 +#define Anum_pg_aggregate_aggfinaltype 9 +#define Anum_pg_aggregate_agginitval1 10 +#define Anum_pg_aggregate_agginitval2 11 + + +/* ---------------- + * initial contents of pg_aggregate + * --------------- + */ + +DATA(insert OID = 0 ( avg PGUID int4pl int4inc int4div 23 23 23 23 0 0 )); +DATA(insert OID = 0 ( avg PGUID int2pl int2inc int2div 21 21 21 21 0 0 )); +DATA(insert OID = 0 ( avg PGUID float4pl float4inc float4div 700 700 700 700 0.0 0.0 )); +DATA(insert OID = 0 ( avg PGUID float8pl float8inc float8div 701 701 701 701 0.0 0.0 )); + +DATA(insert OID = 0 ( sum PGUID int4pl - - 23 23 0 23 0 _null_ )); +DATA(insert OID = 0 ( sum PGUID int2pl - - 21 21 0 21 0 _null_ )); +DATA(insert OID = 0 ( sum PGUID float4pl - - 700 700 0 700 0.0 _null_ )); +DATA(insert OID = 0 ( sum PGUID float8pl - - 701 701 0 701 0.0 _null_ )); + +DATA(insert OID = 0 ( max PGUID int4larger - - 23 23 0 23 _null_ _null_ )); +DATA(insert OID = 0 ( max PGUID int2larger - - 21 21 0 21 _null_ _null_ )); +DATA(insert OID = 0 ( max PGUID float4larger - - 700 700 0 700 _null_ _null_ )); +DATA(insert OID = 0 ( max PGUID float8larger - - 701 701 0 701 _null_ _null_ )); + +DATA(insert OID = 0 ( min PGUID int4smaller - - 23 23 0 23 _null_ _null_ )); +DATA(insert OID = 0 ( min PGUID int2smaller - - 21 21 0 21 _null_ _null_ )); +DATA(insert OID = 0 ( min PGUID float4smaller - - 700 700 0 700 _null_ _null_ )); +DATA(insert OID = 0 ( min PGUID float8smaller - - 701 701 0 701 _null_ _null_ )); + +DATA(insert OID = 0 ( count PGUID - int4inc - 0 0 23 23 _null_ 0 )); + +/* + * prototypes for fucnctions in pg_aggregate.c + */ +extern void AggregateCreate(char *aggName, + char *aggtransfn1Name, + char *aggtransfn2Name, + char *aggfinalfnName, + char *aggbasetypeName, + char *aggtransfn1typeName, + char *aggtransfn2typeName, + char *agginitval1, + char *agginitval2); +extern char *AggNameGetInitVal(char *aggName, Oid basetype, + int xfuncno, bool *isNull); + +#endif /* PG_AGGREGATE_H */ + + + + diff --git a/src/backend/catalog/pg_am.h b/src/backend/catalog/pg_am.h new file mode 100644 index 00000000000..0f36e7c4332 --- /dev/null +++ b/src/backend/catalog/pg_am.h @@ -0,0 +1,115 @@ +/*------------------------------------------------------------------------- + * + * pg_am.h-- + * definition of the system "am" relation (pg_am) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_am.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + * XXX do NOT break up DATA() statements into multiple lines! + * the scripts are not as smart as you might think... + * + *------------------------------------------------------------------------- + */ +#ifndef PG_AM_H +#define PG_AM_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_am definition. cpp turns this into + * typedef struct FormData_pg_am + * ---------------- + */ +CATALOG(pg_am) { + NameData amname; + Oid amowner; + char amkind; + int2 amstrategies; + int2 amsupport; + regproc amgettuple; + regproc aminsert; + regproc amdelete; + regproc amgetattr; + regproc amsetlock; + regproc amsettid; + regproc amfreetuple; + regproc ambeginscan; + regproc amrescan; + regproc amendscan; + regproc ammarkpos; + regproc amrestrpos; + regproc amopen; + regproc amclose; + regproc ambuild; + regproc amcreate; + regproc amdestroy; +} FormData_pg_am; + +/* ---------------- + * Form_pg_am corresponds to a pointer to a tuple with + * the format of pg_am relation. + * ---------------- + */ +typedef FormData_pg_am *Form_pg_am; + +/* ---------------- + * compiler constants for pg_am + * ---------------- + */ +#define Natts_pg_am 22 +#define Anum_pg_am_amname 1 +#define Anum_pg_am_amowner 2 +#define Anum_pg_am_amkind 3 +#define Anum_pg_am_amstrategies 4 +#define Anum_pg_am_amsupport 5 +#define Anum_pg_am_amgettuple 6 +#define Anum_pg_am_aminsert 7 +#define Anum_pg_am_amdelete 8 +#define Anum_pg_am_amgetattr 9 +#define Anum_pg_am_amsetlock 10 +#define Anum_pg_am_amsettid 11 +#define Anum_pg_am_amfreetuple 12 +#define Anum_pg_am_ambeginscan 13 +#define Anum_pg_am_amrescan 14 +#define Anum_pg_am_amendscan 15 +#define Anum_pg_am_ammarkpos 16 +#define Anum_pg_am_amrestrpos 17 +#define Anum_pg_am_amopen 18 +#define Anum_pg_am_amclose 19 +#define Anum_pg_am_ambuild 20 +#define Anum_pg_am_amcreate 21 +#define Anum_pg_am_amdestroy 22 + +/* ---------------- + * initial contents of pg_am + * ---------------- + */ + +DATA(insert OID = 405 ( hash PGUID "o" 1 1 hashgettuple hashinsert hashdelete - - - - hashbeginscan hashrescan hashendscan hashmarkpos hashrestrpos - - hashbuild - - )); +DATA(insert OID = 402 ( rtree PGUID "o" 8 3 rtgettuple rtinsert rtdelete - - - - rtbeginscan rtrescan rtendscan rtmarkpos rtrestrpos - - rtbuild - - )); +DATA(insert OID = 403 ( btree PGUID "o" 5 1 btgettuple btinsert btdelete - - - - btbeginscan btrescan btendscan btmarkpos btrestrpos - - btbuild - - )); +#define BTREE_AM_OID 403 + +BKI_BEGIN +#ifdef NOBTREE +BKI_END +DATA(insert OID = 404 ( nobtree PGUID "o" 5 1 nobtgettuple nobtinsert nobtdelete - - - - nobtbeginscan nobtrescan nobtendscan nobtmarkpos nobtrestrpos - - nobtbuild - - )); +BKI_BEGIN +#endif /* NOBTREE */ +BKI_END + +#endif /* PG_AM_H */ diff --git a/src/backend/catalog/pg_amop.h b/src/backend/catalog/pg_amop.h new file mode 100644 index 00000000000..e9d12127aab --- /dev/null +++ b/src/backend/catalog/pg_amop.h @@ -0,0 +1,546 @@ +/*------------------------------------------------------------------------- + * + * pg_amop.h-- + * definition of the system "amop" relation (pg_amop) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_amop.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_AMOP_H +#define PG_AMOP_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "access/istrat.h" + +/* ---------------- + * pg_amop definition. cpp turns this into + * typedef struct FormData_pg_amop + * ---------------- + */ +CATALOG(pg_amop) { + Oid amopid; + Oid amopclaid; + Oid amopopr; + int2 amopstrategy; + regproc amopselect; + regproc amopnpages; +} FormData_pg_amop; + +/* ---------------- + * Form_pg_amop corresponds to a pointer to a tuple with + * the format of pg_amop relation. + * ---------------- + */ +typedef FormData_pg_amop *Form_pg_amop; + +/* ---------------- + * compiler constants for pg_amop + * ---------------- + */ +/* #define Name_pg_amop "pg_amop" */ +#define Natts_pg_amop 6 +#define Anum_pg_amop_amopid 1 +#define Anum_pg_amop_amopclaid 2 +#define Anum_pg_amop_amopopr 3 +#define Anum_pg_amop_amopstrategy 4 +#define Anum_pg_amop_amopselect 5 +#define Anum_pg_amop_amopnpages 6 + +/* ---------------- + * initial contents of pg_amop + * ---------------- + */ + +/* + * rtree box_ops + */ + +DATA(insert OID = 0 ( 402 422 493 1 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 494 2 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 500 3 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 495 4 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 496 5 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 499 6 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 498 7 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 422 497 8 rtsel rtnpage )); + +/* + * rtree bigbox_ops + */ + +DATA(insert OID = 0 ( 402 433 493 1 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 494 2 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 500 3 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 495 4 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 496 5 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 499 6 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 498 7 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 433 497 8 rtsel rtnpage )); + +/* + * rtree poly_ops (supports polygons) + */ + +DATA(insert OID = 0 ( 402 434 485 1 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 486 2 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 487 3 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 488 4 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 489 5 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 490 6 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 491 7 rtsel rtnpage )); +DATA(insert OID = 0 ( 402 434 492 8 rtsel rtnpage )); + +/* + * nbtree int2_ops + */ + +DATA(insert OID = 0 ( 403 421 95 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 421 522 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 421 94 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 421 524 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 421 520 5 btreesel btreenpage )); + +/* + * nbtree float8_ops + */ + +DATA(insert OID = 0 ( 403 423 672 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 423 673 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 423 670 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 423 675 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 423 674 5 btreesel btreenpage )); + +/* + * nbtree int24_ops + */ + +DATA(insert OID = 0 ( 403 424 534 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 424 540 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 424 532 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 424 542 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 424 536 5 btreesel btreenpage )); + +/* + * nbtree int42_ops + */ + +DATA(insert OID = 0 ( 403 425 535 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 425 541 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 425 533 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 425 543 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 425 537 5 btreesel btreenpage )); + +/* + * nbtree int4_ops + */ + +DATA(insert OID = 0 ( 403 426 97 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 426 523 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 426 96 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 426 525 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 426 521 5 btreesel btreenpage )); + +/* + * nbtree oid_ops + */ + +DATA(insert OID = 0 ( 403 427 609 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 427 611 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 427 607 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 427 612 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 427 610 5 btreesel btreenpage )); + +/* + * nbtree float4_ops + */ + +DATA(insert OID = 0 ( 403 428 622 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 428 624 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 428 620 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 428 625 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 428 623 5 btreesel btreenpage )); + +/* + * nbtree char_ops + */ + +DATA(insert OID = 0 ( 403 429 631 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 429 632 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 429 92 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 429 634 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 429 633 5 btreesel btreenpage )); + +/* + * nbtree char2_ops + */ + +DATA(insert OID = 0 ( 403 406 418 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 406 457 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 406 412 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 406 463 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 406 460 5 btreesel btreenpage )); + +/* + * nbtree char4_ops + */ + +DATA(insert OID = 0 ( 403 407 419 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 407 458 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 407 413 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 407 464 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 407 461 5 btreesel btreenpage )); + +/* + * nbtree char8_ops + */ + +DATA(insert OID = 0 ( 403 408 420 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 408 459 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 408 414 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 408 465 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 408 462 5 btreesel btreenpage )); + +/* + * nbtree name_ops + */ + +DATA(insert OID = 0 ( 403 409 660 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 409 661 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 409 93 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 409 663 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 409 662 5 btreesel btreenpage )); + +/* + * nbtree char16_ops + */ + +DATA(insert OID = 0 ( 403 430 645 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 430 646 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 430 99 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 430 648 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 430 647 5 btreesel btreenpage )); + +/* + * nbtree text_ops + */ + +DATA(insert OID = 0 ( 403 431 664 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 431 665 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 431 98 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 431 667 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 431 666 5 btreesel btreenpage )); + +/* + * nbtree abstime_ops + */ + +DATA(insert OID = 0 ( 403 432 562 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 432 564 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 432 560 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 432 565 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 432 563 5 btreesel btreenpage )); + +/* + * nbtree oidint4_ops + */ + +DATA(insert OID = 0 ( 403 435 930 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 435 931 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 435 932 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 435 933 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 435 934 5 btreesel btreenpage )); + +/* + * nbtree oidint2_ops + */ + +DATA(insert OID = 0 ( 403 437 830 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 437 831 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 437 832 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 437 833 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 437 834 5 btreesel btreenpage )); + +/* + * nbtree oidname_ops + */ + +DATA(insert OID = 0 ( 403 436 676 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 436 677 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 436 678 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 436 679 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 436 680 5 btreesel btreenpage )); + +/* + * nbtree bpchar_ops + */ + +DATA(insert OID = 0 ( 403 1076 1058 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1076 1059 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1076 1054 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1076 1061 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1076 1060 5 btreesel btreenpage )); + +/* + * nbtree varchar_ops + */ + +DATA(insert OID = 0 ( 403 1077 1066 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1077 1067 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1077 1062 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1077 1069 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1077 1068 5 btreesel btreenpage )); + +/* + * nbtree date_ops + */ + +DATA(insert OID = 0 ( 403 1114 1095 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1114 1096 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1114 1093 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1114 1098 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1114 1097 5 btreesel btreenpage )); + + +/* + * nbtree time_ops + */ + +DATA(insert OID = 0 ( 403 1115 1110 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1115 1111 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1115 1108 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1115 1113 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 403 1115 1112 5 btreesel btreenpage )); + +BKI_BEGIN +#ifdef NOBTREE +BKI_END +/* + * nobtree int2_ops + */ + +DATA(insert OID = 0 ( 404 421 95 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 421 522 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 421 94 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 421 524 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 421 520 5 btreesel btreenpage )); + +/* + * nobtree float8_ops + */ + +DATA(insert OID = 0 ( 404 423 672 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 423 673 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 423 670 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 423 675 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 423 674 5 btreesel btreenpage )); + +/* + * nobtree int24_ops + */ + +DATA(insert OID = 0 ( 404 424 534 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 424 540 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 424 532 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 424 542 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 424 536 5 btreesel btreenpage )); + +/* + * nobtree int42_ops + */ + +DATA(insert OID = 0 ( 404 425 535 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 425 541 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 425 533 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 425 543 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 425 537 5 btreesel btreenpage )); + +/* + * nobtree int4_ops + */ + +DATA(insert OID = 0 ( 404 426 97 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 426 523 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 426 96 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 426 525 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 426 521 5 btreesel btreenpage )); + +/* + * nobtree oid_ops + */ + +DATA(insert OID = 0 ( 404 427 609 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 427 611 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 427 607 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 427 612 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 427 610 5 btreesel btreenpage )); + +/* + * nobtree float4_ops + */ + +DATA(insert OID = 0 ( 404 428 622 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 428 624 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 428 620 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 428 625 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 428 623 5 btreesel btreenpage )); + +/* + * nobtree char_ops + */ + +DATA(insert OID = 0 ( 404 429 631 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 429 632 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 429 92 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 429 634 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 429 633 5 btreesel btreenpage )); + +/* + * nobtree char2_ops + */ + +DATA(insert OID = 0 ( 404 406 418 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 406 457 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 406 412 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 406 463 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 406 460 5 btreesel btreenpage )); + +/* + * nobtree char4_ops + */ + +DATA(insert OID = 0 ( 404 407 419 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 407 458 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 407 413 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 407 464 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 407 461 5 btreesel btreenpage )); + +/* + * nobtree char8_ops + */ + +DATA(insert OID = 0 ( 404 408 420 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 408 459 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 408 414 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 408 465 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 408 462 5 btreesel btreenpage )); + +/* + * nobtree char16_ops + */ + +DATA(insert OID = 0 ( 404 430 645 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 430 646 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 430 99 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 430 648 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 430 647 5 btreesel btreenpage )); + +/* + * nobtree name_ops + */ + +DATA(insert OID = 0 ( 404 409 660 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 409 661 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 409 93 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 409 663 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 409 662 5 btreesel btreenpage )); + +/* + * nobtree text_ops + */ + +DATA(insert OID = 0 ( 404 431 664 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 431 665 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 431 98 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 431 667 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 431 666 5 btreesel btreenpage )); + +/* + * nobtree abstime_ops + */ + +DATA(insert OID = 0 ( 404 432 562 1 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 432 564 2 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 432 560 3 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 432 565 4 btreesel btreenpage )); +DATA(insert OID = 0 ( 404 432 563 5 btreesel btreenpage )); + +BKI_BEGIN +#endif /* NOBTREE */ +BKI_END + +/* + * hash table int2_ops + */ +DATA(insert OID = 0 ( 405 421 94 1 btreesel btreenpage )); +/* + * hash table float8_ops + */ +DATA(insert OID = 0 ( 405 423 670 1 btreesel btreenpage )); +/* + * hash table int4_ops + */ +DATA(insert OID = 0 ( 405 426 96 1 hashsel hashnpage )); +/* + * hash table oid_ops + */ +DATA(insert OID = 0 ( 405 427 607 1 hashsel hashnpage )); +/* + * hash table float4_ops + */ +DATA(insert OID = 0 ( 405 428 620 1 hashsel hashnpage )); +/* + * hash table char_ops + */ +DATA(insert OID = 0 ( 405 429 92 1 hashsel hashnpage )); +/* + * hash table char2_ops + */ +DATA(insert OID = 0 ( 405 406 412 1 hashsel hashnpage )); +/* + * hash table char4_ops + */ +DATA(insert OID = 0 ( 405 407 413 1 hashsel hashnpage )); +/* + * hash table char8_ops + */ +DATA(insert OID = 0 ( 405 408 414 1 hashsel hashnpage )); +/* + * hash table char16_ops + */ +DATA(insert OID = 0 ( 405 430 99 1 hashsel hashnpage )); +/* + * hash table name_ops + */ +DATA(insert OID = 0 ( 405 409 93 1 hashsel hashnpage )); +/* + * hash table text_ops + */ +DATA(insert OID = 0 ( 405 431 98 1 hashsel hashnpage )); + +/* + * hash table bpchar_ops + */ +DATA(insert OID = 0 ( 405 1076 1054 1 hashsel hashnpage )); + +/* + * hash table varchar_ops + */ +DATA(insert OID = 0 ( 405 1077 1062 1 hashsel hashnpage )); + + +#endif /* PG_AMOP_H */ diff --git a/src/backend/catalog/pg_amproc.h b/src/backend/catalog/pg_amproc.h new file mode 100644 index 00000000000..cacc2b72701 --- /dev/null +++ b/src/backend/catalog/pg_amproc.h @@ -0,0 +1,134 @@ +/*------------------------------------------------------------------------- + * + * pg_amproc.h-- + * definition of the system "amproc" relation (pg_amproce) + * along with the relation's initial contents. The amproc + * catalog is used to store procedures used by indexed access + * methods that aren't associated with operators. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_amproc.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_AMPROC_H +#define PG_AMPROC_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_amproc definition. cpp turns this into + * typedef struct FormData_pg_amproc + * ---------------- + */ +CATALOG(pg_amproc) { + Oid amid; + Oid amopclaid; + Oid amproc; + int2 amprocnum; +} FormData_pg_amproc; + +/* ---------------- + * Form_pg_amproc corresponds to a pointer to a tuple with + * the format of pg_amproc relation. + * ---------------- + */ +typedef FormData_pg_amproc *Form_pg_amproc; + +/* ---------------- + * compiler constants for pg_amproc + * ---------------- + */ +#define Natts_pg_amproc 4 +#define Anum_pg_amproc_amid 1 +#define Anum_pg_amproc_amopclaid 2 +#define Anum_pg_amproc_amproc 3 +#define Anum_pg_amproc_amprocnum 4 + +/* ---------------- + * initial contents of pg_amproc + * ---------------- + */ + +DATA(insert OID = 0 (402 422 193 1)); +DATA(insert OID = 0 (402 422 194 2)); +DATA(insert OID = 0 (402 422 195 3)); +DATA(insert OID = 0 (402 433 193 1)); +DATA(insert OID = 0 (402 433 194 2)); +DATA(insert OID = 0 (402 433 196 3)); +DATA(insert OID = 0 (402 434 197 1)); +DATA(insert OID = 0 (402 434 198 2)); +DATA(insert OID = 0 (402 434 199 3)); +DATA(insert OID = 0 (403 421 350 1)); +DATA(insert OID = 0 (403 423 355 1)); +DATA(insert OID = 0 (403 424 353 1)); +DATA(insert OID = 0 (403 425 352 1)); +DATA(insert OID = 0 (403 426 351 1)); +DATA(insert OID = 0 (403 427 356 1)); +DATA(insert OID = 0 (403 428 354 1)); +DATA(insert OID = 0 (403 429 358 1)); +DATA(insert OID = 0 (403 406 689 1)); +DATA(insert OID = 0 (403 407 690 1)); +DATA(insert OID = 0 (403 408 691 1)); +DATA(insert OID = 0 (403 409 359 1)); +DATA(insert OID = 0 (403 430 374 1)); +DATA(insert OID = 0 (403 431 360 1)); +DATA(insert OID = 0 (403 432 357 1)); +DATA(insert OID = 0 (403 435 928 1)); +DATA(insert OID = 0 (403 436 948 1)); +DATA(insert OID = 0 (403 437 828 1)); +DATA(insert OID = 0 (403 1076 1078 1)); +DATA(insert OID = 0 (403 1077 1079 1)); +DATA(insert OID = 0 (403 1114 1092 1)); +DATA(insert OID = 0 (403 1115 1107 1)); + +BKI_BEGIN +#ifdef NOBTREE +BKI_END +DATA(insert OID = 0 (404 421 350 1)); +DATA(insert OID = 0 (404 423 355 1)); +DATA(insert OID = 0 (404 424 353 1)); +DATA(insert OID = 0 (404 425 352 1)); +DATA(insert OID = 0 (404 426 351 1)); +DATA(insert OID = 0 (404 427 356 1)); +DATA(insert OID = 0 (404 428 354 1)); +DATA(insert OID = 0 (404 429 358 1)); +DATA(insert OID = 0 (404 406 689 1)); +DATA(insert OID = 0 (404 407 690 1)); +DATA(insert OID = 0 (404 408 691 1)); +DATA(insert OID = 0 (404 409 359 1)); +DATA(insert OID = 0 (404 430 374 1)); +DATA(insert OID = 0 (404 431 360 1)); +DATA(insert OID = 0 (404 432 357 1)); +BKI_BEGIN +#endif /* NOBTREE */ +BKI_END + +DATA(insert OID = 0 (405 421 449 1)); +DATA(insert OID = 0 (405 423 452 1)); +DATA(insert OID = 0 (405 426 450 1)); +DATA(insert OID = 0 (405 427 453 1)); +DATA(insert OID = 0 (405 428 451 1)); +DATA(insert OID = 0 (405 429 454 1)); +DATA(insert OID = 0 (405 406 692 1)); +DATA(insert OID = 0 (405 407 693 1)); +DATA(insert OID = 0 (405 408 694 1)); +DATA(insert OID = 0 (405 409 455 1)); +DATA(insert OID = 0 (405 430 499 1)); +DATA(insert OID = 0 (405 431 456 1)); +DATA(insert OID = 0 (405 1076 1080 1)); +DATA(insert OID = 0 (405 1077 1081 1)); + +#endif /* PG_AMPROC_H */ diff --git a/src/backend/catalog/pg_attribute.h b/src/backend/catalog/pg_attribute.h new file mode 100644 index 00000000000..d8133177d52 --- /dev/null +++ b/src/backend/catalog/pg_attribute.h @@ -0,0 +1,512 @@ +/*------------------------------------------------------------------------- + * + * pg_attribute.h-- + * definition of the system "attribute" relation (pg_attribute) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_attribute.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + * utils/cache/relcache.c requires some hard-coded tuple descriptors + * for some of the system catalogs so if the schema for any of + * these changes, be sure and change the appropriate Schema_xxx + * macros! -cim 2/5/91 + * + * fastgetattr() now uses attcacheoff to cache byte offsets of + * attributes in heap tuples. The data actually stored in + * pg_attribute (-1) indicates no cached value. But when we copy + * these tuples into a tuple descriptor, we may then update attcacheoff + * in the copies. This speeds up the attribute walking process. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_ATTRIBUTE_H +#define PG_ATTRIBUTE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "access/attnum.h" + +/* ---------------- + * pg_attribute definition. cpp turns this into + * typedef struct FormData_pg_attribute + * + * If you change the following, make sure you change the structs for + * system attributes in heap.c and index.c also. + * ---------------- + */ +CATALOG(pg_attribute) BOOTSTRAP { + Oid attrelid; + NameData attname; + Oid atttypid; + Oid attdefrel; + int4 attnvals; + Oid atttyparg; /* type arg for arrays/spquel/procs */ + int2 attlen; + int2 attnum; + int2 attbound; + bool attbyval; + bool attcanindex; + Oid attproc; /* spquel? */ + int4 attnelems; + int4 attcacheoff; + bool attisset; + char attalign; /* alignment (c=char, s=short, i=int, d=double) */ +} FormData_pg_attribute; + +/* + * someone should figure out how to do this properly. (The problem is + * the size of the C struct is not the same as the size of the tuple.) + */ +#define ATTRIBUTE_TUPLE_SIZE \ + (offsetof(FormData_pg_attribute,attalign) + sizeof(char)) + +/* ---------------- + * Form_pg_attribute corresponds to a pointer to a tuple with + * the format of pg_attribute relation. + * ---------------- + */ +typedef FormData_pg_attribute *AttributeTupleForm; + +/* ---------------- + * compiler constants for pg_attribute + * ---------------- + */ + +#define Natts_pg_attribute 16 +#define Anum_pg_attribute_attrelid 1 +#define Anum_pg_attribute_attname 2 +#define Anum_pg_attribute_atttypid 3 +#define Anum_pg_attribute_attdefrel 4 +#define Anum_pg_attribute_attnvals 5 +#define Anum_pg_attribute_atttyparg 6 +#define Anum_pg_attribute_attlen 7 +#define Anum_pg_attribute_attnum 8 +#define Anum_pg_attribute_attbound 9 +#define Anum_pg_attribute_attbyval 10 +#define Anum_pg_attribute_attcanindex 11 +#define Anum_pg_attribute_attproc 12 +#define Anum_pg_attribute_attnelems 13 +#define Anum_pg_attribute_attcacheoff 14 +#define Anum_pg_attribute_attisset 15 +#define Anum_pg_attribute_attalign 16 + + +/* ---------------- + * SCHEMA_ macros for declaring hardcoded tuple descriptors. + * these are used in utils/cache/relcache.c + * ---------------- +#define SCHEMA_NAME(x) CppConcat(Name_,x) +#define SCHEMA_DESC(x) CppConcat(Desc_,x) +#define SCHEMA_NATTS(x) CppConcat(Natts_,x) +#define SCHEMA_DEF(x) \ + FormData_pg_attribute \ + SCHEMA_DESC(x) [ SCHEMA_NATTS(x) ] = \ + { \ + CppConcat(Schema_,x) \ + } + */ + +/* ---------------- + * initial contents of pg_attribute + * ---------------- + */ + +/* ---------------- + * pg_type schema + * ---------------- + */ +#define Schema_pg_type \ +{ 71l, {"typname"}, 19l, 71l, 0l, 0l, NAMEDATALEN, 1, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typowner"}, 26l, 71l, 0l, 0l, 4, 2, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typlen"}, 21l, 71l, 0l, 0l, 2, 3, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 71l, {"typprtlen"}, 21l, 71l, 0l, 0l, 2, 4, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 71l, {"typbyval"}, 16l, 71l, 0l, 0l, 1, 5, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 71l, {"typtype"}, 18l, 71l, 0l, 0l, 1, 6, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 71l, {"typisdefined"}, 16l, 71l, 0l, 0l, 1, 7, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 71l, {"typdelim"}, 18l, 71l, 0l, 0l, 1, 8, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 71l, {"typrelid"}, 26l, 71l, 0l, 0l, 4, 9, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typelem"}, 26l, 71l, 0l, 0l, 4, 10, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typinput"}, 24l, 71l, 0l, 0l, 4, 11, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typoutput"}, 24l, 71l, 0l, 0l, 4, 12, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typreceive"}, 24l, 71l, 0l, 0l, 4, 13, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typsend"}, 24l, 71l, 0l, 0l, 4, 14, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 71l, {"typalign"}, 18l, 71l, 0l, 0l, 1, 15, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 71l, {"typdefault"}, 25l, 71l, 0l, 0l, -1, 16, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 71 typname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typowner 26 0 0 0 4 2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typlen 21 0 0 0 2 3 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 71 typprtlen 21 0 0 0 2 4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 71 typbyval 16 0 0 0 1 5 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 71 typtype 18 0 0 0 1 6 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 71 typisdefined 16 0 0 0 1 7 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 71 typdelim 18 0 0 0 1 8 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 71 typrelid 26 0 0 0 4 9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typelem 26 0 0 0 4 10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typinput 26 0 0 0 4 11 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typoutput 26 0 0 0 4 12 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typreceive 26 0 0 0 4 13 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typsend 26 0 0 0 4 14 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 typalign 18 0 0 0 1 15 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 71 typdefault 25 0 0 0 -1 16 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 71 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 71 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 71 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_database + * ---------------- + */ +DATA(insert OID = 0 ( 88 datname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 datdba 26 0 0 0 4 2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 datpath 25 0 0 0 -1 3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 88 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 88 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 88 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_demon + * ---------------- + */ +DATA(insert OID = 0 ( 76 demserid 26 0 0 0 4 1 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 demname 19 0 0 0 NAMEDATALEN 2 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 demowner 26 0 0 0 4 3 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 demcode 24 0 0 0 4 4 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); + +DATA(insert OID = 0 ( 76 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 76 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 76 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 76 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_proc + * ---------------- + */ +#define Schema_pg_proc \ +{ 81l, {"proname"}, 19l, 81l, 0l, 0l, NAMEDATALEN, 1, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"proowner"}, 26l, 81l, 0l, 0l, 4, 2, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"prolang"}, 26l, 81l, 0l, 0l, 4, 3, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"proisinh"}, 16l, 81l, 0l, 0l, 1, 4, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 81l, {"proistrusted"}, 16l, 81l, 0l, 0l, 1, 5, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 81l, {"proiscachable"}, 16l, 81l, 0l, 0l, 1, 6, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 81l, {"pronargs"}, 21l, 81l, 0l, 0l, 2, 7, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 81l, {"proretset"}, 16l, 81l, 0l, 0l, 1, 8, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 81l, {"prorettype"}, 26l, 81l, 0l, 0l, 4, 9, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"proargtypes"}, 30l, 81l, 0l, 0l, 32, 10, 0, '\0', '\001', 0l, 0l, \ + -1l, '\0', 'i' }, \ +{ 81l, {"probyte_pct"}, 23l, 81l, 0l, 0l, 4, 11, 0, '\001', '\001', 0l, 0l, \ + -1l, '\0', 'i' }, \ +{ 81l, {"properbyte_cpu"}, 23l, 81l, 0l, 0l, 4, 12, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"propercall_cpu"}, 23l, 81l, 0l, 0l, 4, 13, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"prooutin_ratio"}, 23l, 81l, 0l, 0l, 4, 14, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"prosrc"}, 25l, 81l, 0l, 0l, -1, 15, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 81l, {"probin"}, 17l, 81l, 0l, 0l, -1, 16, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 81 proname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 proowner 26 0 0 0 4 2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 prolang 26 0 0 0 4 3 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 proisinh 16 0 0 0 1 4 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 81 proistrusted 16 0 0 0 1 5 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 81 proiscachable 16 0 0 0 1 6 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 81 pronargs 21 0 0 0 2 7 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 81 proretset 16 0 0 0 1 8 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 81 prorettype 26 0 0 0 4 9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 proargtypes 30 0 0 0 32 10 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 probyte_pct 23 0 0 0 4 11 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 properbyte_cpu 23 0 0 0 4 12 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 propercall_cpu 23 0 0 0 4 13 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 prooutin_ratio 23 0 0 0 4 14 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 prosrc 25 0 0 0 -1 15 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 probin 17 0 0 0 -1 16 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 81 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 81 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 81 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_server + * ---------------- + */ +DATA(insert OID = 0 ( 82 sername 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 serpid 21 0 0 0 2 2 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 82 serport 21 0 0 0 2 3 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 82 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 82 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 82 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 82 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_user + * ---------------- + */ +DATA(insert OID = 0 ( 86 usename 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 usesysid 23 0 0 0 4 2 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 86 usecreatedb 16 0 0 0 1 3 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 86 usetrace 16 0 0 0 1 4 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 86 usesuper 16 0 0 0 1 5 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 86 usecatupd 16 0 0 0 1 6 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 86 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 86 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 86 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 86 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_group + * ---------------- + */ +DATA(insert OID = 0 ( 87 groname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 grosysid 23 0 0 0 4 2 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 87 grolist 1007 0 0 0 -1 3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 87 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 87 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 87 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_attribute + * ---------------- + */ +#define Schema_pg_attribute \ +{ 75l, {"attrelid"}, 26l, 75l, 0l, 0l, 4, 1, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attname"}, 19l, 75l, 0l, 0l, NAMEDATALEN, 2, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"atttypid"}, 26l, 75l, 0l, 0l, 4, 3, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attdefrel"}, 26l, 75l, 0l, 0l, 4, 4, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attnvals"}, 23l, 75l, 0l, 0l, 4, 5, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"atttyparg"}, 26l, 75l, 0l, 0l, 4, 6, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attlen"}, 21l, 75l, 0l, 0l, 2, 7, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 75l, {"attnum"}, 21l, 75l, 0l, 0l, 2, 8, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 75l, {"attbound"}, 21l, 75l, 0l, 0l, 2, 9, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 75l, {"attbyval"}, 16l, 75l, 0l, 0l, 1, 10, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 75l, {"attcanindex"}, 16l, 75l, 0l, 0l, 1, 11, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 75l, {"attproc"}, 26l, 75l, 0l, 0l, 4, 12, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attnelems"}, 23l, 75l, 0l, 0l, 4, 13, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attcacheoff"}, 23l, 75l, 0l, 0l, 4, 14, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 75l, {"attisset"}, 16l, 75l, 0l, 0l, 1, 15, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 75l, {"attalign"}, 18l, 75l, 0l, 0l, 1, 16, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' } + +DATA(insert OID = 0 ( 75 attrelid 26 0 0 0 4 1 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attname 19 0 0 0 NAMEDATALEN 2 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 atttypid 26 0 0 0 4 3 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attdefrel 26 0 0 0 4 4 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attnvals 23 0 0 0 4 5 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 atttyparg 26 0 0 0 4 6 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attlen 21 0 0 0 2 7 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 75 attnum 21 0 0 0 2 8 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 75 attbound 21 0 0 0 2 9 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 75 attbyval 16 0 0 0 1 10 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 75 attcanindex 16 0 0 0 1 11 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 75 attproc 26 0 0 0 4 12 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attnelems 23 0 0 0 4 13 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attcacheoff 23 0 0 0 4 14 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 attisset 16 0 0 0 1 15 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 75 attalign 18 0 0 0 1 16 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 75 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 75 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 75 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 75 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_class + * ---------------- + */ +#define Schema_pg_class \ +{ 83l, {"relname"}, 19l, 83l, 0l, 0l, NAMEDATALEN, 1, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"reltype"}, 26l, 83l, 0l, 0l, 4, 2, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relowner"}, 26l, 83l, 0l, 0l, 4, 2, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relam"}, 26l, 83l, 0l, 0l, 4, 3, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relpages"}, 23, 83l, 0l, 0l, 4, 4, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"reltuples"}, 23, 83l, 0l, 0l, 4, 5, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relexpires"}, 702, 83l, 0l, 0l, 4, 6, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relpreserved"}, 703, 83l, 0l, 0l, 4, 7, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relhasindex"}, 16, 83l, 0l, 0l, 1, 8, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 83l, {"relisshared"}, 16, 83l, 0l, 0l, 1, 9, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 83l, {"relkind"}, 18, 83l, 0l, 0l, 1, 10, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 83l, {"relarch"}, 18, 83l, 0l, 0l, 1, 11, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 83l, {"relnatts"}, 21, 83l, 0l, 0l, 2, 12, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 83l, {"relsmgr"}, 210l, 83l, 0l, 0l, 2, 13, 0, '\001', '\001', 0l, 0l, -1l, '\0', 's' }, \ +{ 83l, {"relkey"}, 22, 83l, 0l, 0l, 16, 14, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relkeyop"}, 30, 83l, 0l, 0l, 32, 15, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' }, \ +{ 83l, {"relhasrules"}, 16, 83l, 0l, 0l, 1, 16, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'c' }, \ +{ 83l, {"relacl"}, 1034l, 83l, 0l, 0l, -1, 17, 0, '\0', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 83 relname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 reltype 26 0 0 0 4 2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relowner 26 0 0 0 4 2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relam 26 0 0 0 4 3 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relpages 23 0 0 0 4 4 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 reltuples 23 0 0 0 4 5 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relexpires 702 0 0 0 4 6 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relpreserved 702 0 0 0 4 7 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relhasindex 16 0 0 0 1 8 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 83 relisshared 16 0 0 0 1 9 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 83 relkind 18 0 0 0 1 10 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 83 relarch 18 0 0 0 1 11 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 83 relnatts 21 0 0 0 2 12 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 83 relsmgr 210 0 0 0 2 13 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 83 relkey 22 0 0 0 16 14 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relkeyop 30 0 0 0 32 15 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 relhasrules 16 0 0 0 1 16 0 t t 0 0 -1 f c)); +DATA(insert OID = 0 ( 83 relacl 1034 0 0 0 -1 17 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 83 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 83 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 83 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_magic + * ---------------- + */ +DATA(insert OID = 0 ( 80 magname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 magvalue 19 0 0 0 NAMEDATALEN 2 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 80 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 80 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 80 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + +/* ---------------- + * pg_defaults + * ---------------- + */ +DATA(insert OID = 0 ( 89 defname 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 defvalue 19 0 0 0 NAMEDATALEN 2 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 ctid 27 0 0 0 6 -1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 oid 26 0 0 0 4 -2 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 xmin 28 0 0 0 4 -3 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 cmin 29 0 0 0 2 -4 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 89 xmax 28 0 0 0 4 -5 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 cmax 29 0 0 0 2 -6 0 t t 0 0 -1 f s)); +DATA(insert OID = 0 ( 89 chain 27 0 0 0 6 -7 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 anchor 27 0 0 0 6 -8 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 tmax 702 0 0 0 4 -9 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 tmin 702 0 0 0 4 -10 0 t t 0 0 -1 f i)); +DATA(insert OID = 0 ( 89 vtype 18 0 0 0 1 -11 0 t t 0 0 -1 f c)); + + +/* ---------------- + * pg_hosts - this relation is used to store host based authentication + * info + * + * ---------------- + */ +DATA(insert OID = 0 ( 101 dbName 19 0 0 0 NAMEDATALEN 1 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 101 address 25 0 0 0 -1 2 0 f t 0 0 -1 f i)); +DATA(insert OID = 0 ( 101 mask 25 0 0 0 -1 3 0 f t 0 0 -1 f i)); + +/* ---------------- + * pg_variable - this relation is modified by special purpose access + * method code. The following is garbage but is needed + * so that the reldesc code works properly. + * ---------------- + */ +#define Schema_pg_variable \ +{ 90l, {"varfoo"}, 26l, 90l, 0l, 0l, 4, 1, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 90 varfoo 26 0 0 0 4 1 0 t t 0 0 -1 f i)); + +/* ---------------- + * pg_log - this relation is modified by special purpose access + * method code. The following is garbage but is needed + * so that the reldesc code works properly. + * ---------------- + */ +#define Schema_pg_log \ +{ 99l, {"logfoo"}, 26l, 99l, 0l, 0l, 4, 1, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 99 logfoo 26 0 0 0 4 1 0 t t 0 0 -1 f i)); + +/* ---------------- + * pg_time - this relation is modified by special purpose access + * method code. The following is garbage but is needed + * so that the reldesc code works properly. + * ---------------- + */ +#define Schema_pg_time \ +{ 100l, {"timefoo"}, 26l, 100l, 0l, 0l, 4, 1, 0, '\001', '\001', 0l, 0l, -1l, '\0', 'i' } + +DATA(insert OID = 0 ( 100 timefoo 26 0 0 0 4 1 0 t t 0 0 -1 f i)); + +#endif /* PG_ATTRIBUTE_H */ diff --git a/src/backend/catalog/pg_class.h b/src/backend/catalog/pg_class.h new file mode 100644 index 00000000000..b1adb68be47 --- /dev/null +++ b/src/backend/catalog/pg_class.h @@ -0,0 +1,162 @@ +/*------------------------------------------------------------------------- + * + * pg_class.h-- + * definition of the system "relation" relation (pg_class) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_class.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * ``pg_relation'' is being replaced by ``pg_class''. currently + * we are only changing the name in the catalogs but someday the + * code will be changed too. -cim 2/26/90 + * [it finally happens. -ay 11/5/94] + * + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_RELATION_H +#define PG_RELATION_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "utils/nabstime.h" + +/* ---------------- + * pg_class definition. cpp turns this into + * typedef struct FormData_pg_class + * + * Note: the #if 0, #endif around the BKI_BEGIN.. END block + * below keeps cpp from seeing what is meant for the + * genbki script: pg_relation is now called pg_class, but + * only in the catalogs -cim 2/26/90 + * ---------------- + */ + +/* ---------------- + * This structure is actually variable-length (the last attribute is + * a POSTGRES array). Hence, sizeof(FormData_pg_class) does not + * describe the fixed-length or actual size of the structure. + * FormData_pg_class.relacl may not be correctly aligned, either, + * if aclitem and struct varlena don't align together. Hence, + * you MUST use heap_getattr() to get the relacl field. + * ---------------- + */ +CATALOG(pg_class) BOOTSTRAP { + NameData relname; + Oid reltype; + Oid relowner; + Oid relam; + int4 relpages; + int4 reltuples; + int4 relexpires; /* really used as a abstime, but fudge it for now*/ + int4 relpreserved;/*really used as a reltime, but fudge it for now*/ + bool relhasindex; + bool relisshared; + char relkind; + char relarch; /* 'h' = heavy, 'l' = light, 'n' = no archival*/ + int2 relnatts; + int2 relsmgr; + int28 relkey; /* not used */ + oid8 relkeyop; /* not used */ + bool relhasrules; + aclitem relacl[1]; /* this is here for the catalog */ +} FormData_pg_class; + +#define CLASS_TUPLE_SIZE \ + (offsetof(FormData_pg_class,relhasrules) + sizeof(bool)) + +/* ---------------- + * Form_pg_class corresponds to a pointer to a tuple with + * the format of pg_class relation. + * ---------------- + */ +typedef FormData_pg_class *Form_pg_class; + +/* ---------------- + * compiler constants for pg_class + * ---------------- + */ + +/* ---------------- + * Natts_pg_class_fixed is used to tell routines that insert new + * pg_class tuples (as opposed to replacing old ones) that there's no + * relacl field. + * ---------------- + */ +#define Natts_pg_class_fixed 17 +#define Natts_pg_class 18 +#define Anum_pg_class_relname 1 +#define Anum_pg_class_reltype 2 +#define Anum_pg_class_relowner 3 +#define Anum_pg_class_relam 4 +#define Anum_pg_class_relpages 5 +#define Anum_pg_class_reltuples 6 +#define Anum_pg_class_relexpires 7 +#define Anum_pg_class_relpreserved 8 +#define Anum_pg_class_relhasindex 9 +#define Anum_pg_class_relisshared 10 +#define Anum_pg_class_relkind 11 +#define Anum_pg_class_relarch 12 +#define Anum_pg_class_relnatts 13 +#define Anum_pg_class_relsmgr 14 +#define Anum_pg_class_relkey 15 +#define Anum_pg_class_relkeyop 16 +#define Anum_pg_class_relhasrules 17 +#define Anum_pg_class_relacl 18 + +/* ---------------- + * initial contents of pg_class + * ---------------- + */ + +DATA(insert OID = 71 ( pg_type 71 PGUID 0 0 0 0 0 f f r n 16 0 - - f _null_ )); +DATA(insert OID = 75 ( pg_attribute 75 PGUID 0 0 0 0 0 f f r n 16 0 - - f _null_ )); +DATA(insert OID = 76 ( pg_demon 76 PGUID 0 0 0 0 0 f t r n 4 0 - - f _null_ )); +DATA(insert OID = 80 ( pg_magic 80 PGUID 0 0 0 0 0 f t r n 2 0 - - f _null_ )); +DATA(insert OID = 81 ( pg_proc 81 PGUID 0 0 0 0 0 f f r n 16 0 - - f _null_ )); +DATA(insert OID = 82 ( pg_server 82 PGUID 0 0 0 0 0 f t r n 3 0 - - f _null_ )); +DATA(insert OID = 83 ( pg_class 83 PGUID 0 0 0 0 0 f f r n 17 0 - - f _null_ )); +DATA(insert OID = 86 ( pg_user 86 PGUID 0 0 0 0 0 f t r n 6 0 - - f _null_ )); +DATA(insert OID = 87 ( pg_group 87 PGUID 0 0 0 0 0 f t s n 3 0 - - f _null_ )); +DATA(insert OID = 88 ( pg_database 88 PGUID 0 0 0 0 0 f t r n 3 0 - - f _null_ )); +DATA(insert OID = 89 ( pg_defaults 89 PGUID 0 0 0 0 0 f t r n 2 0 - - f _null_ )); +DATA(insert OID = 90 ( pg_variable 90 PGUID 0 0 0 0 0 f t s n 2 0 - - f _null_ )); +DATA(insert OID = 99 ( pg_log 99 PGUID 0 0 0 0 0 f t s n 1 0 - - f _null_ )); +DATA(insert OID = 100 ( pg_time 100 PGUID 0 0 0 0 0 f t s n 1 0 - - f _null_ )); +DATA(insert OID = 101 ( pg_hosts 101 PGUID 0 0 0 0 0 f t s n 3 0 - - f _null_ )); + +#define RelOid_pg_type 71 +#define RelOid_pg_demon 76 +#define RelOid_pg_attribute 75 +#define RelOid_pg_magic 80 +#define RelOid_pg_proc 81 +#define RelOid_pg_server 82 +#define RelOid_pg_class 83 +#define RelOid_pg_user 86 +#define RelOid_pg_group 87 +#define RelOid_pg_database 88 +#define RelOid_pg_defaults 89 +#define RelOid_pg_variable 90 +#define RelOid_pg_log 99 +#define RelOid_pg_time 100 +#define RelOid_pg_hosts 101 + +#define MAX_SYSTEM_RELOID 101 + +#define RELKIND_INDEX 'i' /* secondary index */ +#define RELKIND_RELATION 'r' /* cataloged heap */ +#define RELKIND_SPECIAL 's' /* special (non-heap) */ +#define RELKIND_UNCATALOGED 'u' /* temporary heap */ + +#endif /* PG_RELATION_H */ diff --git a/src/backend/catalog/pg_database.h b/src/backend/catalog/pg_database.h new file mode 100644 index 00000000000..78a657e8d3b --- /dev/null +++ b/src/backend/catalog/pg_database.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * pg_database.h-- + * definition of the system "database" relation (pg_database) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_database.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_DATABASE_H +#define PG_DATABASE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_database definition. cpp turns this into + * typedef struct FormData_pg_database + * ---------------- + */ +CATALOG(pg_database) BOOTSTRAP { + NameData datname; + Oid datdba; + text datpath; /* VARIABLE LENGTH FIELD */ +} FormData_pg_database; + +/* ---------------- + * Form_pg_database corresponds to a pointer to a tuple with + * the format of pg_database relation. + * ---------------- + */ +typedef FormData_pg_database *Form_pg_database; + +/* ---------------- + * compiler constants for pg_database + * ---------------- + */ +#define Natts_pg_database 3 +#define Anum_pg_database_datname 1 +#define Anum_pg_database_datdba 2 +#define Anum_pg_database_datpath 3 + + +#endif /* PG_DATABASE_H */ diff --git a/src/backend/catalog/pg_defaults.h b/src/backend/catalog/pg_defaults.h new file mode 100644 index 00000000000..66efb7b3d4b --- /dev/null +++ b/src/backend/catalog/pg_defaults.h @@ -0,0 +1,55 @@ +/*------------------------------------------------------------------------- + * + * pg_defaults.h-- + * definition of the system "defaults" relation (pg_defaults) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_defaults.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_DEFAULTS_H +#define PG_DEFAULTS_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_defaults definition. cpp turns this into + * typedef struct FormData_pg_defaults + * ---------------- + */ +CATALOG(pg_defaults) BOOTSTRAP { + NameData defname; + NameData defvalue; +} FormData_pg_defaults; + +/* ---------------- + * Form_pg_defaults corresponds to a pointer to a tuple with + * the format of pg_defaults relation. + * ---------------- + */ +typedef FormData_pg_defaults *Form_pg_defaults; + +/* ---------------- + * compiler constants for pg_defaults + * ---------------- + */ +#define Natts_pg_defaults 2 +#define Anum_pg_defaults_defname 1 +#define Anum_pg_defaults_defvalue 2 + + +#endif /* PG_DEFAULTS_H */ diff --git a/src/backend/catalog/pg_demon.h b/src/backend/catalog/pg_demon.h new file mode 100644 index 00000000000..1089f571527 --- /dev/null +++ b/src/backend/catalog/pg_demon.h @@ -0,0 +1,58 @@ +/*------------------------------------------------------------------------- + * + * pg_demon.h-- + * definition of the system "demon" relation (pg_demon) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_demon.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_DEMON_H +#define PG_DEMON_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_demon definition. cpp turns this into + * typedef struct FormData_pg_demon + * ---------------- + */ +CATALOG(pg_demon) BOOTSTRAP { + Oid demserid; + NameData demname; + Oid demowner; + regproc demcode; +} FormData_pg_demon; + +/* ---------------- + * Form_pg_demon corresponds to a pointer to a tuple with + * the format of pg_demon relation. + * ---------------- + */ +typedef FormData_pg_demon *Form_pg_demon; + +/* ---------------- + * compiler constants for pg_demon + * ---------------- + */ +#define Natts_pg_demon 4 +#define Anum_pg_demon_demserid 1 +#define Anum_pg_demon_demname 2 +#define Anum_pg_demon_demowner 3 +#define Anum_pg_demon_demcode 4 + +#endif /* PG_DEMON_H */ diff --git a/src/backend/catalog/pg_group.h b/src/backend/catalog/pg_group.h new file mode 100644 index 00000000000..76d51bec4b3 --- /dev/null +++ b/src/backend/catalog/pg_group.h @@ -0,0 +1,42 @@ +/*------------------------------------------------------------------------- + * + * pg_group.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_group.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_GROUP_H +#define PG_GROUP_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +CATALOG(pg_group) BOOTSTRAP { + NameData groname; + int4 grosysid; + int4 grolist[1]; +} FormData_pg_group; +/* VARIABLE LENGTH STRUCTURE */ + +typedef FormData_pg_group *Form_pg_group; + +#define Natts_pg_group 1 +#define Anum_pg_group_groname 1 +#define Anum_pg_group_grosysid 2 +#define Anum_pg_group_grolist 3 + +#endif /* PG_GROUP_H */ diff --git a/src/backend/catalog/pg_hosts.h b/src/backend/catalog/pg_hosts.h new file mode 100644 index 00000000000..3924c264d00 --- /dev/null +++ b/src/backend/catalog/pg_hosts.h @@ -0,0 +1,44 @@ +/*------------------------------------------------------------------------- + * + * pg_hosts.h-- + * + * the pg_hosts system catalog provides host-based access to the + * backend. Only those hosts that are in the pg_hosts + * + * currently, this table is not used, instead file-based host authentication + * is used + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_hosts.h,v 1.1.1.1 1996/07/09 06:21:16 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + *------------------------------------------------------------------------- + */ + +#ifndef PG_HOSTS_H +#define PG_HOSTS_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +CATALOG(pg_hosts) BOOTSTRAP { + NameData dbName; + text address; + text mask; +} FormData_pg_hosts; + +typedef FormData_pg_hosts *Form_pg_hosts; +#define Natts_pg_hosts 3 +#define Anum_pg_hosts_dbName 1 +#define Anum_pg_hosts_address 2 +#define Anum_pg_hosts_mask 3 + +#endif /* PG_HOSTS_H */ diff --git a/src/backend/catalog/pg_index.h b/src/backend/catalog/pg_index.h new file mode 100644 index 00000000000..da75b025bcb --- /dev/null +++ b/src/backend/catalog/pg_index.h @@ -0,0 +1,71 @@ +/*------------------------------------------------------------------------- + * + * pg_index.h-- + * definition of the system "index" relation (pg_index) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_index.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_INDEX_H +#define PG_INDEX_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_index definition. cpp turns this into + * typedef struct FormData_pg_index. The oid of the index relation + * is stored in indexrelid; the oid of the indexed relation is stored + * in indrelid. + * ---------------- + */ +CATALOG(pg_index) { + Oid indexrelid; + Oid indrelid; + Oid indproc; /* registered procedure for functional index */ + int28 indkey; + oid8 indclass; + bool indisclustered; + bool indisarchived; + text indpred; /* query plan for partial index predicate */ +} FormData_pg_index; + +#define INDEX_MAX_KEYS 8 /* maximum number of keys in an index definition */ + +/* ---------------- + * Form_pg_index corresponds to a pointer to a tuple with + * the format of pg_index relation. + * ---------------- + */ +typedef FormData_pg_index *IndexTupleForm; + +/* ---------------- + * compiler constants for pg_index + * ---------------- + */ +#define Natts_pg_index 8 +#define Anum_pg_index_indexrelid 1 +#define Anum_pg_index_indrelid 2 +#define Anum_pg_index_indproc 3 +#define Anum_pg_index_indkey 4 +#define Anum_pg_index_indclass 5 +#define Anum_pg_index_indisclustered 6 +#define Anum_pg_index_indisarchived 7 +#define Anum_pg_index_indpred 8 + + +#endif /* PG_INDEX_H */ diff --git a/src/backend/catalog/pg_inheritproc.h b/src/backend/catalog/pg_inheritproc.h new file mode 100644 index 00000000000..1527e992868 --- /dev/null +++ b/src/backend/catalog/pg_inheritproc.h @@ -0,0 +1,59 @@ +/*------------------------------------------------------------------------- + * + * pg_inheritproc.h-- + * definition of the system "inheritproc" relation (pg_inheritproc) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_inheritproc.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_INHERITPROC_H +#define PG_INHERITPROC_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_inheritproc definition. cpp turns this into + * typedef struct FormData_pg_inheritproc + * ---------------- + */ +CATALOG(pg_inheritproc) { + NameData inhproname; + Oid inhargrel; + Oid inhdefrel; + Oid inhproc; +} FormData_pg_inheritproc; + +/* ---------------- + * Form_pg_inheritproc corresponds to a pointer to a tuple with + * the format of pg_inheritproc relation. + * ---------------- + */ +typedef FormData_pg_inheritproc *Form_pg_inheritproc; + +/* ---------------- + * compiler constants for pg_inheritproc + * ---------------- + */ +#define Natts_pg_inheritproc 4 +#define Anum_pg_inheritproc_inhproname 1 +#define Anum_pg_inheritproc_inhargrel 2 +#define Anum_pg_inheritproc_inhdefrel 3 +#define Anum_pg_inheritproc_inhproc 4 + + +#endif /* PG_INHERITPROC_H */ diff --git a/src/backend/catalog/pg_inherits.h b/src/backend/catalog/pg_inherits.h new file mode 100644 index 00000000000..1caa1cd0178 --- /dev/null +++ b/src/backend/catalog/pg_inherits.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * pg_inherits.h-- + * definition of the system "inherits" relation (pg_inherits) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_inherits.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_INHERITS_H +#define PG_INHERITS_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_inherits definition. cpp turns this into + * typedef struct FormData_pg_inherits + * ---------------- + */ +CATALOG(pg_inherits) { + Oid inhrel; + Oid inhparent; + int4 inhseqno; +} FormData_pg_inherits; + +/* ---------------- + * Form_pg_inherits corresponds to a pointer to a tuple with + * the format of pg_inherits relation. + * ---------------- + */ +typedef FormData_pg_inherits *InheritsTupleForm; + +/* ---------------- + * compiler constants for pg_inherits + * ---------------- + */ +#define Natts_pg_inherits 3 +#define Anum_pg_inherits_inhrel 1 +#define Anum_pg_inherits_inhparent 2 +#define Anum_pg_inherits_inhseqno 3 + + +#endif /* PG_INHERITS_H */ diff --git a/src/backend/catalog/pg_ipl.h b/src/backend/catalog/pg_ipl.h new file mode 100644 index 00000000000..df90cd42cef --- /dev/null +++ b/src/backend/catalog/pg_ipl.h @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * pg_ipl.h-- + * definition of the system "ipl" relation (pg_ipl) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_ipl.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_IPL_H +#define PG_IPL_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_ipl definition. cpp turns this into + * typedef struct FormData_pg_ipl + * ---------------- + */ +CATALOG(pg_ipl) { + Oid iplrel; + Oid iplipl; + int4 iplseqno; +} FormData_pg_ipl; + +/* ---------------- + * Form_pg_ipl corresponds to a pointer to a tuple with + * the format of pg_ipl relation. + * ---------------- + */ +typedef FormData_pg_ipl *Form_pg_ipl; + +/* ---------------- + * compiler constants for pg_ipl + * ---------------- + */ +#define Natts_pg_ipl 3 +#define Anum_pg_ipl_iplrel 1 +#define Anum_pg_ipl_iplipl 2 +#define Anum_pg_ipl_iplseqno 3 + + +#endif /* PG_IPL_H */ diff --git a/src/backend/catalog/pg_language.h b/src/backend/catalog/pg_language.h new file mode 100644 index 00000000000..7e5a31af7ae --- /dev/null +++ b/src/backend/catalog/pg_language.h @@ -0,0 +1,75 @@ +/*------------------------------------------------------------------------- + * + * pg_language.h-- + * definition of the system "language" relation (pg_language) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_language.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_LANGUAGE_H +#define PG_LANGUAGE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_language definition. cpp turns this into + * typedef struct FormData_pg_language + * ---------------- + */ +CATALOG(pg_language) { + NameData lanname; + text lancompiler; /* VARIABLE LENGTH FIELD */ +} FormData_pg_language; + +/* ---------------- + * Form_pg_language corresponds to a pointer to a tuple with + * the format of pg_language relation. + * ---------------- + */ +typedef FormData_pg_language *Form_pg_language; + +/* ---------------- + * compiler constants for pg_language + * ---------------- + */ +#define Natts_pg_language 2 +#define Anum_pg_language_lanname 1 +#define Anum_pg_language_lancompiler 2 + +/* ---------------- + * initial contents of pg_language + * ---------------- + */ + +DATA(insert OID = 11 ( internal "n/a" )); +#define INTERNALlanguageId 11 +DATA(insert OID = 12 ( lisp "/usr/ucb/liszt" )); +DATA(insert OID = 13 ( "C" "/bin/cc" )); +#define ClanguageId 13 +DATA(insert OID = 14 ( "sql" "postgres")); +#define SQLlanguageId 14 + + +#endif /* PG_LANGUAGE_H */ + + + + + + + diff --git a/src/backend/catalog/pg_listener.h b/src/backend/catalog/pg_listener.h new file mode 100644 index 00000000000..05e077ec53b --- /dev/null +++ b/src/backend/catalog/pg_listener.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * pg_listener.h-- + * Asynchronous notification + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_listener.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_LISTENER_H +#define PG_LISTENER_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------------------------------------------------------- + * pg_listener definition. + * + * cpp turns this into typedef struct FormData_pg_listener + * ---------------------------------------------------------------- + */ + +CATALOG(pg_listener) { + NameData relname; + int4 listenerpid; + int4 notification; +} FormData_pg_listener; + +/* ---------------- + * compiler constants for pg_listener + * ---------------- + */ +#define Natts_pg_listener 3 +#define Anum_pg_listener_relname 1 +#define Anum_pg_listener_pid 2 +#define Anum_pg_listener_notify 3 + +/* ---------------- + * initial contents of pg_listener are NOTHING. + * ---------------- + */ + + +#endif /* PG_LISTENER_H */ diff --git a/src/backend/catalog/pg_log.h b/src/backend/catalog/pg_log.h new file mode 100644 index 00000000000..987825a7769 --- /dev/null +++ b/src/backend/catalog/pg_log.h @@ -0,0 +1,40 @@ +/*------------------------------------------------------------------------- + * + * pg_log.h-- + * the system log relation "pg_log" is not a "heap" relation. + * it is automatically created by the transam/ code and the + * information here is all bogus and is just here to make the + * relcache code happy. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_log.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * The structures and macros used by the transam/ code + * to access pg_log should some day go here -cim 6/18/90 + * + *------------------------------------------------------------------------- + */ +#ifndef PG_LOG_H +#define PG_LOG_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +CATALOG(pg_log) BOOTSTRAP { + Oid logfoo; +} FormData_pg_log; + +typedef FormData_pg_log *Form_pg_log; + +#define Natts_pg_log 1 +#define Anum_pg_log_logfoo 1 + +#endif /* PG_LOG_H */ diff --git a/src/backend/catalog/pg_magic.h b/src/backend/catalog/pg_magic.h new file mode 100644 index 00000000000..c5e0d98491d --- /dev/null +++ b/src/backend/catalog/pg_magic.h @@ -0,0 +1,54 @@ +/*------------------------------------------------------------------------- + * + * pg_magic.h-- + * definition of the system "magic" relation (pg_magic) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_magic.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_MAGIC_H +#define PG_MAGIC_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_magic definition. cpp turns this into + * typedef struct FormData_pg_magic + * ---------------- + */ +CATALOG(pg_magic) BOOTSTRAP { + NameData magname; + NameData magvalue; +} FormData_pg_magic; + +/* ---------------- + * Form_pg_magic corresponds to a pointer to a tuple with + * the format of pg_magic relation. + * ---------------- + */ +typedef FormData_pg_magic *Form_pg_magic; + +/* ---------------- + * compiler constants for pg_magic + * ---------------- + */ +#define Natts_pg_magic 2 +#define Anum_pg_magic_magname 1 +#define Anum_pg_magic_magvalue 2 + +#endif /* PG_MAGIC_H */ diff --git a/src/backend/catalog/pg_opclass.h b/src/backend/catalog/pg_opclass.h new file mode 100644 index 00000000000..46aecd35c62 --- /dev/null +++ b/src/backend/catalog/pg_opclass.h @@ -0,0 +1,85 @@ +/*------------------------------------------------------------------------- + * + * pg_opclass.h-- + * definition of the system "opclass" relation (pg_opclass) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_opclass.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_OPCLASS_H +#define PG_OPCLASS_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_opclass definition. cpp turns this into + * typedef struct FormData_pg_opclass + * ---------------- + */ + +CATALOG(pg_opclass) { + NameData opcname; +} FormData_pg_opclass; + +/* ---------------- + * Form_pg_opclass corresponds to a pointer to a tuple with + * the format of pg_opclass relation. + * ---------------- + */ +typedef FormData_pg_opclass *Form_pg_opclass; + +/* ---------------- + * compiler constants for pg_opclass + * ---------------- + */ +#define Natts_pg_opclass 1 +#define Anum_pg_opclass_opcname 1 + +/* ---------------- + * initial contents of pg_opclass + * ---------------- + */ + +DATA(insert OID = 406 ( char2_ops )); +DATA(insert OID = 407 ( char4_ops )); +DATA(insert OID = 408 ( char8_ops )); +DATA(insert OID = 409 ( name_ops )); +DATA(insert OID = 421 ( int2_ops )); +DATA(insert OID = 422 ( box_ops )); +DATA(insert OID = 423 ( float8_ops )); +DATA(insert OID = 424 ( int24_ops )); +DATA(insert OID = 425 ( int42_ops )); +DATA(insert OID = 426 ( int4_ops )); +#define INT4_OPS_OID 426 +DATA(insert OID = 427 ( oid_ops )); +DATA(insert OID = 428 ( float4_ops )); +DATA(insert OID = 429 ( char_ops )); +DATA(insert OID = 430 ( char16_ops )); +DATA(insert OID = 431 ( text_ops )); +DATA(insert OID = 432 ( abstime_ops )); +DATA(insert OID = 433 ( bigbox_ops)); +DATA(insert OID = 434 ( poly_ops)); +DATA(insert OID = 435 ( oidint4_ops)); +DATA(insert OID = 436 ( oidname_ops)); +DATA(insert OID = 437 ( oidint2_ops)); +DATA(insert OID = 1076 ( bpchar_ops)); +DATA(insert OID = 1077 ( varchar_ops)); +DATA(insert OID = 1114 ( date_ops)); +DATA(insert OID = 1115 ( time_ops)); + +#endif /* PG_OPCLASS_H */ diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c new file mode 100644 index 00000000000..27842971299 --- /dev/null +++ b/src/backend/catalog/pg_operator.c @@ -0,0 +1,1077 @@ +/*------------------------------------------------------------------------- + * + * pg_operator.c-- + * routines to support manipulation of the pg_operator relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/pg_operator.c,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * these routines moved here from commands/define.c and somewhat cleaned up. + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/htup.h" +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "parser/catalog_utils.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "storage/bufmgr.h" + +#include "fmgr.h" + +static Oid OperatorGetWithOpenRelation(Relation pg_operator_desc, + char *operatorName, + Oid leftObjectId, + Oid rightObjectId ); +static Oid OperatorGet(char *operatorName, + char *leftTypeName, + char *rightTypeName ); + +static Oid OperatorShellMakeWithOpenRelation(Relation pg_operator_desc, + char *operatorName, + Oid leftObjectId, + Oid rightObjectId ); +static Oid OperatorShellMake(char *operatorName, + char *leftTypeName, + char *rightTypeName ); + +static void OperatorDef(char *operatorName, + int definedOK, + char *leftTypeName, + char *rightTypeName, + char *procedureName, + uint16 precedence, + bool isLeftAssociative, + char *commutatorName, + char *negatorName, + char *restrictionName, + char *oinName, + bool canHash, + char *leftSortName, + char *rightSortName ); +static void OperatorUpd(Oid baseId , Oid commId , Oid negId ); + +/* ---------------------------------------------------------------- + * OperatorGetWithOpenRelation + * + * preforms a scan on pg_operator for an operator tuple + * with given name and left/right type oids. + * ---------------------------------------------------------------- + * pg_operator_desc -- reldesc for pg_operator + * operatorName -- name of operator to fetch + * leftObjectId -- left oid of operator to fetch + * rightObjectId -- right oid of operator to fetch + */ +static Oid +OperatorGetWithOpenRelation(Relation pg_operator_desc, + char *operatorName, + Oid leftObjectId, + Oid rightObjectId) +{ + HeapScanDesc pg_operator_scan; + Oid operatorObjectId; + HeapTuple tup; + + static ScanKeyData opKey[3] = { + { 0, Anum_pg_operator_oprname, NameEqualRegProcedure }, + { 0, Anum_pg_operator_oprleft, ObjectIdEqualRegProcedure }, + { 0, Anum_pg_operator_oprright, ObjectIdEqualRegProcedure }, + }; + + fmgr_info(NameEqualRegProcedure, + &opKey[0].sk_func, &opKey[0].sk_nargs); + fmgr_info(ObjectIdEqualRegProcedure, + &opKey[1].sk_func, &opKey[1].sk_nargs); + fmgr_info(ObjectIdEqualRegProcedure, + &opKey[2].sk_func, &opKey[2].sk_nargs); + + /* ---------------- + * form scan key + * ---------------- + */ + opKey[0].sk_argument = PointerGetDatum(operatorName); + opKey[1].sk_argument = ObjectIdGetDatum(leftObjectId); + opKey[2].sk_argument = ObjectIdGetDatum(rightObjectId); + + /* ---------------- + * begin the scan + * ---------------- + */ + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + 3, + opKey); + + /* ---------------- + * fetch the operator tuple, if it exists, and determine + * the proper return oid value. + * ---------------- + */ + tup = heap_getnext(pg_operator_scan, 0, (Buffer *) 0); + operatorObjectId = HeapTupleIsValid(tup) ? tup->t_oid : InvalidOid; + + /* ---------------- + * close the scan and return the oid. + * ---------------- + */ + heap_endscan(pg_operator_scan); + + return + operatorObjectId; +} + +/* ---------------------------------------------------------------- + * OperatorGet + * + * finds the operator associated with the specified name + * and left and right type names. + * ---------------------------------------------------------------- + */ +static Oid +OperatorGet(char *operatorName, + char *leftTypeName, + char *rightTypeName) +{ + Relation pg_operator_desc; + + Oid operatorObjectId; + Oid leftObjectId = InvalidOid; + Oid rightObjectId = InvalidOid; + bool leftDefined = false; + bool rightDefined = false; + + /* ---------------- + * look up the operator types. + * + * Note: types must be defined before operators + * ---------------- + */ + if (leftTypeName) { + leftObjectId = TypeGet(leftTypeName, &leftDefined); + + if (!OidIsValid(leftObjectId) || !leftDefined) + elog(WARN, "OperatorGet: left type '%s' nonexistent",leftTypeName); + } + + if (rightTypeName) { + rightObjectId = TypeGet(rightTypeName, &rightDefined); + + if (!OidIsValid(rightObjectId) || !rightDefined) + elog(WARN, "OperatorGet: right type '%s' nonexistent", + rightTypeName); + } + + if (!((OidIsValid(leftObjectId) && leftDefined) || + (OidIsValid(rightObjectId) && rightDefined))) + elog(WARN, "OperatorGet: no argument types??"); + + /* ---------------- + * open the pg_operator relation + * ---------------- + */ + pg_operator_desc = heap_openr(OperatorRelationName); + + /* ---------------- + * get the oid for the operator with the appropriate name + * and left/right types. + * ---------------- + */ + operatorObjectId = OperatorGetWithOpenRelation(pg_operator_desc, + operatorName, + leftObjectId, + rightObjectId); + + /* ---------------- + * close the relation and return the operator oid. + * ---------------- + */ + heap_close(pg_operator_desc); + + return + operatorObjectId; +} + +/* ---------------------------------------------------------------- + * OperatorShellMakeWithOpenRelation + * + * ---------------------------------------------------------------- + */ +static Oid +OperatorShellMakeWithOpenRelation(Relation pg_operator_desc, + char *operatorName, + Oid leftObjectId, + Oid rightObjectId) +{ + register int i; + HeapTuple tup; + Datum values[ Natts_pg_operator ]; + char nulls[ Natts_pg_operator ]; + Oid operatorObjectId; + TupleDesc tupDesc; + + /* ---------------- + * initialize our nulls[] and values[] arrays + * ---------------- + */ + for (i = 0; i < Natts_pg_operator; ++i) { + nulls[i] = ' '; + values[i] = (Datum)NULL; /* redundant, but safe */ + } + + /* ---------------- + * initialize values[] with the type name and + * ---------------- + */ + i = 0; + values[i++] = PointerGetDatum(operatorName); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = (Datum) (uint16) 0; + + values[i++] = (Datum)'b'; /* fill oprkind with a bogus value */ + + values[i++] = (Datum) (bool) 0; + values[i++] = (Datum) (bool) 0; + values[i++] = ObjectIdGetDatum(leftObjectId); /* <-- left oid */ + values[i++] = ObjectIdGetDatum(rightObjectId); /* <-- right oid */ + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + values[i++] = ObjectIdGetDatum(InvalidOid); + + /* ---------------- + * create a new operator tuple + * ---------------- + */ + tupDesc = pg_operator_desc->rd_att; + + tup = heap_formtuple(tupDesc, + values, + nulls); + + /* ---------------- + * insert our "shell" operator tuple and + * close the relation + * ---------------- + */ + heap_insert(pg_operator_desc, tup); + operatorObjectId = tup->t_oid; + + /* ---------------- + * free the tuple and return the operator oid + * ---------------- + */ + pfree(tup); + + return + operatorObjectId; +} + +/* ---------------------------------------------------------------- + * OperatorShellMake + * + * Specify operator name and left and right type names, + * fill an operator struct with this info and NULL's, + * call heap_insert and return the Oid + * to the caller. + * ---------------------------------------------------------------- + */ +static Oid +OperatorShellMake(char *operatorName, + char *leftTypeName, + char *rightTypeName) +{ + Relation pg_operator_desc; + Oid operatorObjectId; + + Oid leftObjectId = InvalidOid; + Oid rightObjectId = InvalidOid; + bool leftDefined = false; + bool rightDefined = false; + + /* ---------------- + * get the left and right type oid's for this operator + * ---------------- + */ + if (leftTypeName) + leftObjectId = TypeGet(leftTypeName, &leftDefined); + + if (rightTypeName) + rightObjectId = TypeGet(rightTypeName, &rightDefined); + + if (!((OidIsValid(leftObjectId) && leftDefined) || + (OidIsValid(rightObjectId) && rightDefined))) + elog(WARN, "OperatorShellMake: no valid argument types??"); + + /* ---------------- + * open pg_operator + * ---------------- + */ + pg_operator_desc = heap_openr(OperatorRelationName); + + /* ---------------- + * add a "shell" operator tuple to the operator relation + * and recover the shell tuple's oid. + * ---------------- + */ + operatorObjectId = + OperatorShellMakeWithOpenRelation(pg_operator_desc, + operatorName, + leftObjectId, + rightObjectId); + /* ---------------- + * close the operator relation and return the oid. + * ---------------- + */ + heap_close(pg_operator_desc); + + return + operatorObjectId; +} + +/* -------------------------------- + * OperatorDef + * + * This routine gets complicated because it allows the user to + * specify operators that do not exist. For example, if operator + * "op" is being defined, the negator operator "negop" and the + * commutator "commop" can also be defined without specifying + * any information other than their names. Since in order to + * add "op" to the PG_OPERATOR catalog, all the Oid's for these + * operators must be placed in the fields of "op", a forward + * declaration is done on the commutator and negator operators. + * This is called creating a shell, and its main effect is to + * create a tuple in the PG_OPERATOR catalog with minimal + * information about the operator (just its name and types). + * Forward declaration is used only for this purpose, it is + * not available to the user as it is for type definition. + * + * Algorithm: + * + * check if operator already defined + * if so issue error if not definedOk, this is a duplicate + * but if definedOk, save the Oid -- filling in a shell + * get the attribute types from relation descriptor for pg_operator + * assign values to the fields of the operator: + * operatorName + * owner id (simply the user id of the caller) + * precedence + * operator "kind" either "b" for binary or "l" for left unary + * isLeftAssociative boolean + * canHash boolean + * leftTypeObjectId -- type must already be defined + * rightTypeObjectId -- this is optional, enter ObjectId=0 if none specified + * resultType -- defer this, since it must be determined from + * the pg_procedure catalog + * commutatorObjectId -- if this is NULL, enter ObjectId=0 + * else if this already exists, enter it's ObjectId + * else if this does not yet exist, and is not + * the same as the main operatorName, then create + * a shell and enter the new ObjectId + * else if this does not exist but IS the same + * name as the main operator, set the ObjectId=0. + * Later OperatorCreate will make another call + * to OperatorDef which will cause this field + * to be filled in (because even though the names + * will be switched, they are the same name and + * at this point this ObjectId will then be defined) + * negatorObjectId -- same as for commutatorObjectId + * leftSortObjectId -- same as for commutatorObjectId + * rightSortObjectId -- same as for commutatorObjectId + * operatorProcedure -- must access the pg_procedure catalog to get the + * ObjectId of the procedure that actually does the operator + * actions this is required. Do an amgetattr to find out the + * return type of the procedure + * restrictionProcedure -- must access the pg_procedure catalog to get + * the ObjectId but this is optional + * joinProcedure -- same as restrictionProcedure + * now either insert or replace the operator into the pg_operator catalog + * if the operator shell is being filled in + * access the catalog in order to get a valid buffer + * create a tuple using ModifyHeapTuple + * get the t_ctid from the modified tuple and call RelationReplaceHeapTuple + * else if a new operator is being created + * create a tuple using heap_formtuple + * call heap_insert + * -------------------------------- + * "X" indicates an optional argument (i.e. one that can be NULL) + * operatorName; -- operator name + * definedOK; -- operator can already have an oid? + * leftTypeName; -- X left type name + * rightTypeName; -- X right type name + * procedureName; -- procedure oid for operator code + * precedence; -- operator precedence + * isLeftAssociative; -- operator is left associative? + * commutatorName; -- X commutator operator name + * negatorName; -- X negator operator name + * restrictionName; -- X restriction sel. procedure name + * joinName; -- X join sel. procedure name + * canHash; -- possible hash operator? + * leftSortName; -- X left sort operator + * rightSortName; -- X right sort operator + */ +static void +OperatorDef(char *operatorName, + int definedOK, + char *leftTypeName, + char *rightTypeName, + char *procedureName, + uint16 precedence, + bool isLeftAssociative, + char *commutatorName, + char *negatorName, + char *restrictionName, + char *joinName, + bool canHash, + char *leftSortName, + char *rightSortName) +{ + register i, j; + Relation pg_operator_desc; + + HeapScanDesc pg_operator_scan; + HeapTuple tup; + Buffer buffer; + ItemPointerData itemPointerData; + char nulls[ Natts_pg_operator ]; + char replaces[ Natts_pg_operator ]; + Datum values[ Natts_pg_operator ]; + Oid other_oid; + Oid operatorObjectId; + Oid leftTypeId = InvalidOid; + Oid rightTypeId = InvalidOid; + Oid commutatorId = InvalidOid; + Oid negatorId = InvalidOid; + bool leftDefined = false; + bool rightDefined = false; + char *name[4]; + Oid typeId[8]; + int nargs; + TupleDesc tupDesc; + + static ScanKeyData opKey[3] = { + { 0, Anum_pg_operator_oprname, NameEqualRegProcedure }, + { 0, Anum_pg_operator_oprleft, ObjectIdEqualRegProcedure }, + { 0, Anum_pg_operator_oprright, ObjectIdEqualRegProcedure }, + }; + + fmgr_info(NameEqualRegProcedure, + &opKey[0].sk_func, &opKey[0].sk_nargs); + fmgr_info(ObjectIdEqualRegProcedure, + &opKey[1].sk_func, &opKey[1].sk_nargs); + fmgr_info(ObjectIdEqualRegProcedure, + &opKey[2].sk_func, &opKey[2].sk_nargs); + + operatorObjectId = OperatorGet(operatorName, + leftTypeName, + rightTypeName); + + if (OidIsValid(operatorObjectId) && !definedOK) + elog(WARN, "OperatorDef: operator \"%-.*s\" already defined", + NAMEDATALEN, operatorName); + + if (leftTypeName) + leftTypeId = TypeGet(leftTypeName, &leftDefined); + + if (rightTypeName) + rightTypeId = TypeGet(rightTypeName, &rightDefined); + + if (!((OidIsValid(leftTypeId && leftDefined)) || + (OidIsValid(rightTypeId && rightDefined)))) + elog(WARN, "OperatorGet: no argument types??"); + + for (i = 0; i < Natts_pg_operator; ++i) { + values[i] = (Datum)NULL; + replaces[i] = 'r'; + nulls[i] = ' '; + } + + /* ---------------- + * Look up registered procedures -- find the return type + * of procedureName to place in "result" field. + * Do this before shells are created so we don't + * have to worry about deleting them later. + * ---------------- + */ + memset(typeId, 0, 8 * sizeof(Oid)); + if (!leftTypeName) { + typeId[0] = rightTypeId; + nargs = 1; + } + else if (!rightTypeName) { + typeId[0] = leftTypeId; + nargs = 1; + } + else { + typeId[0] = leftTypeId; + typeId[1] = rightTypeId; + nargs = 2; + } + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(procedureName), + Int32GetDatum(nargs), + PointerGetDatum(typeId), + 0); + + if (!PointerIsValid(tup)) + func_error("OperatorDef", procedureName, nargs, (int*)typeId); + + values[ Anum_pg_operator_oprcode-1 ] = ObjectIdGetDatum(tup->t_oid); + values[ Anum_pg_operator_oprresult-1 ] = + ObjectIdGetDatum(((Form_pg_proc) + GETSTRUCT(tup))->prorettype); + + /* ---------------- + * find restriction + * ---------------- + */ + if (restrictionName) { /* optional */ + memset(typeId, 0, 8 * sizeof(Oid)); + typeId[0] = OIDOID; /* operator OID */ + typeId[1] = OIDOID; /* relation OID */ + typeId[2] = INT2OID; /* attribute number */ + typeId[3] = 0; /* value - can be any type */ + typeId[4] = INT4OID; /* flags - left or right selectivity */ + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(restrictionName), + Int32GetDatum(5), + ObjectIdGetDatum(typeId), + 0); + if (!HeapTupleIsValid(tup)) + func_error("OperatorDef", restrictionName, 5, (int*)typeId); + + values[ Anum_pg_operator_oprrest-1 ] = ObjectIdGetDatum(tup->t_oid); + } else + values[ Anum_pg_operator_oprrest-1 ] = ObjectIdGetDatum(InvalidOid); + + /* ---------------- + * find join - only valid for binary operators + * ---------------- + */ + if (joinName) { /* optional */ + memset(typeId, 0, 8 * sizeof(Oid)); + typeId[0] = OIDOID; /* operator OID */ + typeId[1] = OIDOID; /* relation OID 1 */ + typeId[2] = INT2OID; /* attribute number 1 */ + typeId[3] = OIDOID; /* relation OID 2 */ + typeId[4] = INT2OID; /* attribute number 2 */ + + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(joinName), + Int32GetDatum(5), + Int32GetDatum(typeId), + 0); + if (!HeapTupleIsValid(tup)) + func_error("OperatorDef", joinName, 5, (int*)typeId); + + values[Anum_pg_operator_oprjoin-1] = ObjectIdGetDatum(tup->t_oid); + } else + values[Anum_pg_operator_oprjoin-1] = ObjectIdGetDatum(InvalidOid); + + /* ---------------- + * set up values in the operator tuple + * ---------------- + */ + i = 0; + values[i++] = PointerGetDatum(operatorName); + values[i++] = Int32GetDatum(GetUserId()); + values[i++] = UInt16GetDatum(precedence); + values[i++] = leftTypeName ? (rightTypeName ? 'b' : 'r') : 'l'; + values[i++] = Int8GetDatum(isLeftAssociative); + values[i++] = Int8GetDatum(canHash); + values[i++] = ObjectIdGetDatum(leftTypeId); + values[i++] = ObjectIdGetDatum(rightTypeId); + + ++i; /* Skip "prorettype", this was done above */ + + /* + * Set up the other operators. If they do not currently exist, + * set up shells in order to get ObjectId's and call OperatorDef + * again later to fill in the shells. + */ + name[0] = commutatorName; + name[1] = negatorName; + name[2] = leftSortName; + name[3] = rightSortName; + + for (j = 0; j < 4; ++j) { + if (name[j]) { + + /* for the commutator, switch order of arguments */ + if (j == 0) { + other_oid = OperatorGet(name[j], rightTypeName,leftTypeName); + commutatorId = other_oid; + } else { + other_oid = OperatorGet(name[j], leftTypeName,rightTypeName); + if (j == 1) + negatorId = other_oid; + } + + if (OidIsValid(other_oid)) /* already in catalogs */ + values[i++] = ObjectIdGetDatum(other_oid); + else if (strcmp(operatorName, name[j]) != 0) { + /* not in catalogs, different from operator */ + + /* for the commutator, switch order of arguments */ + if (j == 0) { + other_oid = OperatorShellMake(name[j], + rightTypeName, + leftTypeName); + } else { + other_oid = OperatorShellMake(name[j], + leftTypeName, + rightTypeName); + } + + if (!OidIsValid(other_oid)) + elog(WARN, + "OperatorDef: can't create operator '%s'", + name[j]); + values[i++] = ObjectIdGetDatum(other_oid); + + } else /* not in catalogs, same as operator ??? */ + values[i++] = ObjectIdGetDatum(InvalidOid); + + } else /* new operator is optional */ + values[i++] = ObjectIdGetDatum(InvalidOid); + } + + /* last three fields were filled in first */ + + /* + * If we are adding to an operator shell, get its t_ctid and a + * buffer. + */ + pg_operator_desc = heap_openr(OperatorRelationName); + + if (operatorObjectId) { + opKey[0].sk_argument = PointerGetDatum(operatorName); + opKey[1].sk_argument = ObjectIdGetDatum(leftTypeId); + opKey[2].sk_argument = ObjectIdGetDatum(rightTypeId); + + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + 3, + opKey); + + tup = heap_getnext(pg_operator_scan, 0, &buffer); + if (HeapTupleIsValid(tup)) { + tup = heap_modifytuple(tup, + buffer, + pg_operator_desc, + values, + nulls, + replaces); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + setheapoverride(true); + (void) heap_replace(pg_operator_desc, &itemPointerData, tup); + setheapoverride(false); + } else + elog(WARN, "OperatorDef: no operator %d", other_oid); + + heap_endscan(pg_operator_scan); + + } else { + tupDesc = pg_operator_desc->rd_att; + tup = heap_formtuple(tupDesc, values, nulls); + + heap_insert(pg_operator_desc, tup); + operatorObjectId = tup->t_oid; + } + + heap_close(pg_operator_desc); + + /* + * It's possible that we're creating a skeleton operator here for + * the commute or negate attributes of a real operator. If we are, + * then we're done. If not, we may need to update the negator and + * commutator for this attribute. The reason for this is that the + * user may want to create two operators (say < and >=). When he + * defines <, if he uses >= as the negator or commutator, he won't + * be able to insert it later, since (for some reason) define operator + * defines it for him. So what he does is to define > without a + * negator or commutator. Then he defines >= with < as the negator + * and commutator. As a side effect, this will update the > tuple + * if it has no commutator or negator defined. + * + * Alstublieft, Tom Vijlbrief. + */ + if (!definedOK) + OperatorUpd(operatorObjectId, commutatorId, negatorId); +} + +/* ---------------------------------------------------------------- + * OperatorUpd + * + * For a given operator, look up its negator and commutator operators. + * If they are defined, but their negator and commutator operators + * (respectively) are not, then use the new operator for neg and comm. + * This solves a problem for users who need to insert two new operators + * which are the negator or commutator of each other. + * ---------------------------------------------------------------- + */ +static void +OperatorUpd(Oid baseId, Oid commId, Oid negId) +{ + register i; + Relation pg_operator_desc; + HeapScanDesc pg_operator_scan; + HeapTuple tup; + Buffer buffer; + ItemPointerData itemPointerData; + char nulls[ Natts_pg_operator ]; + char replaces[ Natts_pg_operator ]; + Datum values[ Natts_pg_operator ]; + + static ScanKeyData opKey[1] = { + { 0, ObjectIdAttributeNumber, ObjectIdEqualRegProcedure }, + }; + + fmgr_info(ObjectIdEqualRegProcedure, + &opKey[0].sk_func, &opKey[0].sk_nargs); + + for (i = 0; i < Natts_pg_operator; ++i) { + values[i] = (Datum)NULL; + replaces[i] = ' '; + nulls[i] = ' '; + } + + pg_operator_desc = heap_openr(OperatorRelationName); + + /* check and update the commutator, if necessary */ + opKey[0].sk_argument = ObjectIdGetDatum(commId); + + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + 1, + opKey); + + tup = heap_getnext(pg_operator_scan, 0, &buffer); + + /* if the commutator and negator are the same operator, do one update */ + if (commId == negId) { + if (HeapTupleIsValid(tup)) { + OperatorTupleForm t; + + t = (OperatorTupleForm) GETSTRUCT(tup); + if (!OidIsValid(t->oprcom) + || !OidIsValid(t->oprnegate)) { + + if (!OidIsValid(t->oprnegate)) { + values[Anum_pg_operator_oprnegate - 1] = + ObjectIdGetDatum(baseId); + replaces[ Anum_pg_operator_oprnegate - 1 ] = 'r'; + } + + if (!OidIsValid(t->oprcom)) { + values[Anum_pg_operator_oprcom - 1] = + ObjectIdGetDatum(baseId); + replaces[ Anum_pg_operator_oprcom - 1 ] = 'r'; + } + + tup = heap_modifytuple(tup, + buffer, + pg_operator_desc, + values, + nulls, + replaces); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + + setheapoverride(true); + (void) heap_replace(pg_operator_desc, &itemPointerData, tup); + setheapoverride(false); + + } + } + heap_endscan(pg_operator_scan); + + heap_close(pg_operator_desc); + + /* release the buffer properly */ + if (BufferIsValid(buffer)) + ReleaseBuffer(buffer); + + return; + } + + /* if commutator and negator are different, do two updates */ + if (HeapTupleIsValid(tup) && + !(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprcom))) { + values[ Anum_pg_operator_oprcom - 1] = ObjectIdGetDatum(baseId); + replaces[ Anum_pg_operator_oprcom - 1] = 'r'; + tup = heap_modifytuple(tup, + buffer, + pg_operator_desc, + values, + nulls, + replaces); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + setheapoverride(true); + (void) heap_replace(pg_operator_desc, &itemPointerData, tup); + setheapoverride(false); + + values[ Anum_pg_operator_oprcom - 1 ] = (Datum)NULL; + replaces[ Anum_pg_operator_oprcom - 1 ] = ' '; + + /* release the buffer properly */ + if (BufferIsValid(buffer)) + ReleaseBuffer(buffer); + + } + + /* check and update the negator, if necessary */ + opKey[0].sk_argument = ObjectIdGetDatum(negId); + + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + 1, + opKey); + + tup = heap_getnext(pg_operator_scan, 0, &buffer); + if (HeapTupleIsValid(tup) && + !(OidIsValid(((OperatorTupleForm) GETSTRUCT(tup))->oprnegate))) { + values[Anum_pg_operator_oprnegate-1] = ObjectIdGetDatum(baseId); + replaces[ Anum_pg_operator_oprnegate - 1 ] = 'r'; + tup = heap_modifytuple(tup, + buffer, + pg_operator_desc, + values, + nulls, + replaces); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + + setheapoverride(true); + (void) heap_replace(pg_operator_desc, &itemPointerData, tup); + setheapoverride(false); + } + + /* release the buffer properly */ + if (BufferIsValid(buffer)) + ReleaseBuffer(buffer); + + heap_endscan(pg_operator_scan); + + heap_close(pg_operator_desc); +} + + +/* ---------------------------------------------------------------- + * OperatorCreate + * + * Algorithm: + * + * Since the commutator, negator, leftsortoperator, and rightsortoperator + * can be defined implicitly through OperatorCreate, must check before + * the main operator is added to see if they already exist. If they + * do not already exist, OperatorDef makes a "shell" for each undefined + * one, and then OperatorCreate must call OperatorDef again to fill in + * each shell. All this is necessary in order to get the right ObjectId's + * filled into the right fields. + * + * The "definedOk" flag indicates that OperatorDef can be called on + * the operator even though it already has an entry in the PG_OPERATOR + * relation. This allows shells to be filled in. The user cannot + * forward declare operators, this is strictly an internal capability. + * + * When the shells are filled in by subsequent calls to OperatorDef, + * all the fields are the same as the definition of the original operator + * except that the target operator name and the original operatorName + * are switched. In the case of commutator and negator, special flags + * are set to indicate their status, telling the executor(?) that + * the operands are to be switched, or the outcome of the procedure + * negated. + * + * ************************* NOTE NOTE NOTE ****************************** + * + * If the execution of this utility is interrupted, the pg_operator + * catalog may be left in an inconsistent state. Similarly, if + * something is removed from the pg_operator, pg_type, or pg_procedure + * catalog while this is executing, the results may be inconsistent. + * ---------------------------------------------------------------- + * + * "X" indicates an optional argument (i.e. one that can be NULL) + * operatorName; -- operator name + * leftTypeName; -- X left type name + * rightTypeName; -- X right type name + * procedureName; -- procedure for operator + * precedence; -- operator precedence + * isLeftAssociative; -- operator is left associative + * commutatorName; -- X commutator operator name + * negatorName; -- X negator operator name + * restrictionName; -- X restriction sel. procedure + * joinName; -- X join sel. procedure name + * canHash; -- operator hashes + * leftSortName; -- X left sort operator + * rightSortName; -- X right sort operator + * + */ +void +OperatorCreate(char *operatorName, + char *leftTypeName, + char *rightTypeName, + char *procedureName, + uint16 precedence, + bool isLeftAssociative, + char *commutatorName, + char *negatorName, + char *restrictionName, + char *joinName, + bool canHash, + char *leftSortName, + char *rightSortName) +{ + Oid commObjectId, negObjectId; + Oid leftSortObjectId, rightSortObjectId; + int definedOK; + + if (!leftTypeName && !rightTypeName) + elog(WARN, "OperatorCreate : at least one of leftarg or rightarg must be defined"); + + /* ---------------- + * get the oid's of the operator's associated operators, if possible. + * ---------------- + */ + if (commutatorName) + commObjectId = OperatorGet(commutatorName, /* commute type order */ + rightTypeName, + leftTypeName); + + if (negatorName) + negObjectId = OperatorGet(negatorName, + leftTypeName, + rightTypeName); + + if (leftSortName) + leftSortObjectId = OperatorGet(leftSortName, + leftTypeName, + rightTypeName); + + if (rightSortName) + rightSortObjectId = OperatorGet(rightSortName, + rightTypeName, + leftTypeName); + + /* ---------------- + * Use OperatorDef() to define the specified operator and + * also create shells for the operator's associated operators + * if they don't already exist. + * + * This operator should not be defined yet. + * ---------------- + */ + definedOK = 0; + + OperatorDef(operatorName, + definedOK, + leftTypeName, + rightTypeName, + procedureName, + precedence, + isLeftAssociative, + commutatorName, + negatorName, + restrictionName, + joinName, + canHash, + leftSortName, + rightSortName); + + /* ---------------- + * Now fill in information in the operator's associated + * operators. + * + * These operators should be defined or have shells defined. + * ---------------- + */ + definedOK = 1; + + if (!OidIsValid(commObjectId) && commutatorName) + OperatorDef(commutatorName, + definedOK, + leftTypeName, /* should eventually */ + rightTypeName, /* commute order */ + procedureName, + precedence, + isLeftAssociative, + operatorName, /* commutator */ + negatorName, + restrictionName, + joinName, + canHash, + rightSortName, + leftSortName); + + if (negatorName && !OidIsValid(negObjectId)) + OperatorDef(negatorName, + definedOK, + leftTypeName, + rightTypeName, + procedureName, + precedence, + isLeftAssociative, + commutatorName, + operatorName, /* negator */ + restrictionName, + joinName, + canHash, + leftSortName, + rightSortName); + + if (leftSortName && !OidIsValid(leftSortObjectId)) + OperatorDef(leftSortName, + definedOK, + leftTypeName, + rightTypeName, + procedureName, + precedence, + isLeftAssociative, + commutatorName, + negatorName, + restrictionName, + joinName, + canHash, + operatorName, /* left sort */ + rightSortName); + + if (rightSortName && !OidIsValid(rightSortObjectId)) + OperatorDef(rightSortName, + definedOK, + leftTypeName, + rightTypeName, + procedureName, + precedence, + isLeftAssociative, + commutatorName, + negatorName, + restrictionName, + joinName, + canHash, + leftSortName, + operatorName); /* right sort */ +} diff --git a/src/backend/catalog/pg_operator.h b/src/backend/catalog/pg_operator.h new file mode 100644 index 00000000000..9f9533b2ffc --- /dev/null +++ b/src/backend/catalog/pg_operator.h @@ -0,0 +1,480 @@ +/*------------------------------------------------------------------------- + * + * pg_operator.h-- + * definition of the system "operator" relation (pg_operator) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_operator.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + * XXX do NOT break up DATA() statements into multiple lines! + * the scripts are not as smart as you might think... + * + *------------------------------------------------------------------------- + */ +#ifndef PG_OPERATOR_H +#define PG_OPERATOR_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_operator definition. cpp turns this into + * typedef struct FormData_pg_operator + * ---------------- + */ +CATALOG(pg_operator) { + NameData oprname; + Oid oprowner; + int2 oprprec; + char oprkind; + bool oprisleft; + bool oprcanhash; + Oid oprleft; + Oid oprright; + Oid oprresult; + Oid oprcom; + Oid oprnegate; + Oid oprlsortop; + Oid oprrsortop; + regproc oprcode; + regproc oprrest; + regproc oprjoin; +} FormData_pg_operator; + +/* ---------------- + * Form_pg_operator corresponds to a pointer to a tuple with + * the format of pg_operator relation. + * ---------------- + */ +typedef FormData_pg_operator *OperatorTupleForm; + +/* ---------------- + * compiler constants for pg_operator + * ---------------- + */ + +#define Natts_pg_operator 16 +#define Anum_pg_operator_oprname 1 +#define Anum_pg_operator_oprowner 2 +#define Anum_pg_operator_oprprec 3 +#define Anum_pg_operator_oprkind 4 +#define Anum_pg_operator_oprisleft 5 +#define Anum_pg_operator_oprcanhash 6 +#define Anum_pg_operator_oprleft 7 +#define Anum_pg_operator_oprright 8 +#define Anum_pg_operator_oprresult 9 +#define Anum_pg_operator_oprcom 10 +#define Anum_pg_operator_oprnegate 11 +#define Anum_pg_operator_oprlsortop 12 +#define Anum_pg_operator_oprrsortop 13 +#define Anum_pg_operator_oprcode 14 +#define Anum_pg_operator_oprrest 15 +#define Anum_pg_operator_oprjoin 16 + +/* ---------------- + * initial contents of pg_operator + * ---------------- + */ + +DATA(insert OID = 85 ( "<>" PGUID 0 b t f 16 16 16 85 91 0 0 boolne neqsel neqjoinsel )); +DATA(insert OID = 91 ( "=" PGUID 0 b t t 16 16 16 91 85 0 0 booleq eqsel eqjoinsel )); +#define BooleanEqualOperator 91 + +DATA(insert OID = 92 ( "=" PGUID 0 b t t 18 18 16 92 630 631 631 chareq eqsel eqjoinsel )); +DATA(insert OID = 93 ( "=" PGUID 0 b t t 19 19 16 93 643 660 660 nameeq eqsel eqjoinsel )); +DATA(insert OID = 94 ( "=" PGUID 0 b t t 21 21 16 94 519 95 95 int2eq eqsel eqjoinsel )); +DATA(insert OID = 95 ( "<" PGUID 0 b t f 21 21 16 520 524 0 0 int2lt intltsel intltjoinsel )); +DATA(insert OID = 96 ( "=" PGUID 0 b t t 23 23 16 96 518 97 97 int4eq eqsel eqjoinsel )); +DATA(insert OID = 97 ( "<" PGUID 0 b t f 23 23 16 521 525 0 0 int4lt intltsel intltjoinsel )); +DATA(insert OID = 98 ( "=" PGUID 0 b t t 25 25 16 98 531 664 664 texteq eqsel eqjoinsel )); +DATA(insert OID = 99 ( "=" PGUID 0 b t t 20 20 16 99 644 645 645 char16eq eqsel eqjoinsel )); +DATA(insert OID = 329 ( "=" PGUID 0 b t t 1000 1000 16 329 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 349 ( "=" PGUID 0 b t t 1001 1001 16 349 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 374 ( "=" PGUID 0 b t t 1002 1002 16 374 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 375 ( "=" PGUID 0 b t t 1003 1003 16 375 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 376 ( "=" PGUID 0 b t t 1004 1004 16 376 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 377 ( "=" PGUID 0 b t t 1005 1005 16 377 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 378 ( "=" PGUID 0 b t t 1006 1006 16 378 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 379 ( "=" PGUID 0 b t t 1007 1007 16 379 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 380 ( "=" PGUID 0 b t t 1008 1008 16 380 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 381 ( "=" PGUID 0 b t t 1009 1009 16 381 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 382 ( "=" PGUID 0 b t t 1028 1028 16 382 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 383 ( "=" PGUID 0 b t t 1010 1010 16 383 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 384 ( "=" PGUID 0 b t t 1011 1011 16 384 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 385 ( "=" PGUID 0 b t t 1012 1012 16 385 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 386 ( "=" PGUID 0 b t t 1013 1013 16 386 0 0 0 array_eq eqsel eqjoinsel )); +/* +DATA(insert OID = 387 ( "=" PGUID 0 b t t 1014 1014 16 387 0 0 0 array_eq eqsel eqjoinsel )); +*/ +DATA(insert OID = 388 ( "=" PGUID 0 b t t 1015 1015 16 388 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 389 ( "=" PGUID 0 b t t 1016 1016 16 389 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 390 ( "=" PGUID 0 b t t 1017 1017 16 390 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 391 ( "=" PGUID 0 b t t 1018 1018 16 391 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 392 ( "=" PGUID 0 b t t 1019 1019 16 392 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 393 ( "=" PGUID 0 b t t 1020 1020 16 393 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 394 ( "=" PGUID 0 b t t 1021 1021 16 394 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 395 ( "=" PGUID 0 b t t 1022 1022 16 395 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 396 ( "=" PGUID 0 b t t 1023 1023 16 396 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 397 ( "=" PGUID 0 b t t 1024 1024 16 397 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 398 ( "=" PGUID 0 b t t 1025 1025 16 398 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 399 ( "=" PGUID 0 b t t 1026 1026 16 399 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 400 ( "=" PGUID 0 b t t 1027 1027 16 400 0 0 0 array_eq eqsel eqjoinsel )); +DATA(insert OID = 401 ( "=" PGUID 0 b t t 1034 1034 16 401 0 0 0 array_eq eqsel eqjoinsel )); + +DATA(insert OID = 412 ( "=" PGUID 0 b t t 409 409 16 412 415 418 418 char2eq eqsel eqjoinsel )); +DATA(insert OID = 413 ( "=" PGUID 0 b t t 410 410 16 413 416 419 419 char4eq eqsel eqjoinsel )); +DATA(insert OID = 414 ( "=" PGUID 0 b t t 411 411 16 414 417 420 420 char8eq eqsel eqjoinsel )); + +DATA(insert OID = 415 ( "<>" PGUID 0 b t f 409 409 16 415 412 0 0 char2ne neqsel neqjoinsel )); +DATA(insert OID = 416 ( "<>" PGUID 0 b t f 410 410 16 416 413 0 0 char4ne neqsel neqjoinsel )); +DATA(insert OID = 417 ( "<>" PGUID 0 b t f 411 411 16 417 414 0 0 char8ne neqsel neqjoinsel )); +DATA(insert OID = 418 ( "<" PGUID 0 b t f 409 409 16 460 463 0 0 char2lt intltsel intltjoinsel )); +DATA(insert OID = 419 ( "<" PGUID 0 b t f 410 410 16 461 464 0 0 char4lt intltsel intltjoinsel )); +DATA(insert OID = 420 ( "<" PGUID 0 b t f 411 411 16 462 465 0 0 char8lt intltsel intltjoinsel )); + +DATA(insert OID = 457 ( "<=" PGUID 0 b t f 409 409 16 463 460 0 0 char2le intltsel intltjoinsel )); +DATA(insert OID = 458 ( "<=" PGUID 0 b t f 410 410 16 464 461 0 0 char4le intltsel intltjoinsel )); +DATA(insert OID = 459 ( "<=" PGUID 0 b t f 411 411 16 465 462 0 0 char8le intltsel intltjoinsel )); +DATA(insert OID = 460 ( ">" PGUID 0 b t f 409 409 16 418 457 0 0 char2gt intltsel intltjoinsel )); +DATA(insert OID = 461 ( ">" PGUID 0 b t f 410 410 16 419 458 0 0 char4gt intltsel intltjoinsel )); +DATA(insert OID = 462 ( ">" PGUID 0 b t f 411 411 16 420 459 0 0 char8gt intltsel intltjoinsel )); +DATA(insert OID = 463 ( ">=" PGUID 0 b t f 409 409 16 457 418 0 0 char2ge intltsel intltjoinsel )); +DATA(insert OID = 464 ( ">=" PGUID 0 b t f 410 410 16 458 418 0 0 char4ge intltsel intltjoinsel )); +DATA(insert OID = 465 ( ">=" PGUID 0 b t f 411 411 16 459 420 0 0 char8ge intltsel intltjoinsel )); + +DATA(insert OID = 485 ( "<<" PGUID 0 b t f 604 604 16 0 0 0 0 poly_left intltsel intltjoinsel )); +DATA(insert OID = 486 ( "&<" PGUID 0 b t f 604 604 16 0 0 0 0 poly_overleft intltsel intltjoinsel )); +DATA(insert OID = 487 ( "&>" PGUID 0 b t f 604 604 16 0 0 0 0 poly_overright intltsel intltjoinsel )); +DATA(insert OID = 488 ( ">>" PGUID 0 b t f 604 604 16 0 0 0 0 poly_right intltsel intltjoinsel )); +DATA(insert OID = 489 ( "@" PGUID 0 b t f 604 604 16 0 0 0 0 poly_contained intltsel intltjoinsel )); +DATA(insert OID = 490 ( "~" PGUID 0 b t f 604 604 16 0 0 0 0 poly_contain intltsel intltjoinsel )); +DATA(insert OID = 491 ( "~=" PGUID 0 b t f 604 604 16 0 0 0 0 poly_same intltsel intltjoinsel )); +DATA(insert OID = 492 ( "&&" PGUID 0 b t f 604 604 16 0 0 0 0 poly_overlap intltsel intltjoinsel )); +DATA(insert OID = 493 ( "<<" PGUID 0 b t f 603 603 16 0 0 0 0 box_left intltsel intltjoinsel )); +DATA(insert OID = 494 ( "&<" PGUID 0 b t f 603 603 16 0 0 0 0 box_overleft intltsel intltjoinsel )); +DATA(insert OID = 495 ( "&>" PGUID 0 b t f 603 603 16 0 0 0 0 box_overright intltsel intltjoinsel )); +DATA(insert OID = 496 ( ">>" PGUID 0 b t f 603 603 16 0 0 0 0 box_right intltsel intltjoinsel )); +DATA(insert OID = 497 ( "@" PGUID 0 b t f 603 603 16 0 0 0 0 box_contained intltsel intltjoinsel )); +DATA(insert OID = 498 ( "~" PGUID 0 b t f 603 603 16 0 0 0 0 box_contain intltsel intltjoinsel )); +DATA(insert OID = 499 ( "~=" PGUID 0 b t f 603 603 16 0 0 0 0 box_same intltsel intltjoinsel )); +DATA(insert OID = 500 ( "&&" PGUID 0 b t f 603 603 16 0 0 0 0 box_overlap intltsel intltjoinsel )); +DATA(insert OID = 501 ( ">=" PGUID 0 b t f 603 603 16 0 0 0 0 box_ge areasel areajoinsel )); +DATA(insert OID = 502 ( ">" PGUID 0 b t f 603 603 16 0 0 0 0 box_gt areasel areajoinsel )); +DATA(insert OID = 503 ( "=" PGUID 0 b t t 603 603 16 0 0 0 0 box_eq areasel areajoinsel )); +DATA(insert OID = 504 ( "<" PGUID 0 b t f 603 603 16 0 0 0 0 box_lt areasel areajoinsel )); +DATA(insert OID = 505 ( "<=" PGUID 0 b t f 603 603 16 0 0 0 0 box_le areasel areajoinsel )); +DATA(insert OID = 506 ( "!^" PGUID 0 b t f 600 600 16 0 0 0 0 point_above intltsel intltjoinsel )); +DATA(insert OID = 507 ( "!<" PGUID 0 b t f 600 600 16 0 0 0 0 point_left intltsel intltjoinsel )); +DATA(insert OID = 508 ( "!>" PGUID 0 b t f 600 600 16 0 0 0 0 point_right intltsel intltjoinsel )); +DATA(insert OID = 509 ( "!|" PGUID 0 b t f 600 600 16 0 0 0 0 point_below intltsel intltjoinsel )); +DATA(insert OID = 510 ( "=|=" PGUID 0 b t f 600 600 16 0 0 0 0 point_eq intltsel intltjoinsel )); +DATA(insert OID = 511 ( "===>" PGUID 0 b t f 600 603 16 0 0 0 0 on_pb intltsel intltjoinsel )); +DATA(insert OID = 512 ( "===`" PGUID 0 b t f 600 602 16 0 0 0 0 on_ppath intltsel intltjoinsel )); +DATA(insert OID = 513 ( "@@" PGUID 0 l t f 0 603 600 0 0 0 0 box_center intltsel intltjoinsel )); +DATA(insert OID = 514 ( "*" PGUID 0 b t f 23 23 23 514 0 0 0 int4mul intltsel intltjoinsel )); +DATA(insert OID = 515 ( "!" PGUID 0 r t f 23 0 23 0 0 0 0 int4fac intltsel intltjoinsel )); +DATA(insert OID = 516 ( "!!" PGUID 0 l t f 0 23 23 0 0 0 0 int4fac intltsel intltjoinsel )); +DATA(insert OID = 517 ( "<===>" PGUID 0 b t f 600 600 23 0 0 0 0 pointdist intltsel intltjoinsel )); +DATA(insert OID = 518 ( "<>" PGUID 0 b t f 23 23 16 518 96 0 0 int4ne neqsel neqjoinsel )); +DATA(insert OID = 519 ( "<>" PGUID 0 b t f 21 21 16 519 94 0 0 int2ne neqsel neqjoinsel )); +DATA(insert OID = 520 ( ">" PGUID 0 b t f 21 21 16 95 0 0 0 int2gt intgtsel intgtjoinsel )); +DATA(insert OID = 521 ( ">" PGUID 0 b t f 23 23 16 97 0 0 0 int4gt intgtsel intgtjoinsel )); +DATA(insert OID = 522 ( "<=" PGUID 0 b t f 21 21 16 524 520 0 0 int2le intltsel intltjoinsel )); +DATA(insert OID = 523 ( "<=" PGUID 0 b t f 23 23 16 525 521 0 0 int4le intltsel intltjoinsel )); +DATA(insert OID = 524 ( ">=" PGUID 0 b t f 21 21 16 522 95 0 0 int2ge intgtsel intgtjoinsel )); +DATA(insert OID = 525 ( ">=" PGUID 0 b t f 23 23 16 523 97 0 0 int4ge intgtsel intgtjoinsel )); +DATA(insert OID = 526 ( "*" PGUID 0 b t f 21 21 21 526 0 0 0 int2mul intltsel intltjoinsel )); +DATA(insert OID = 527 ( "/" PGUID 0 b t f 21 21 21 0 0 0 0 int2div intltsel intltjoinsel )); +DATA(insert OID = 528 ( "/" PGUID 0 b t f 23 23 23 0 0 0 0 int4div intltsel intltjoinsel )); +DATA(insert OID = 529 ( "%" PGUID 0 b t f 21 21 21 6 0 0 0 int2mod intltsel intltjoinsel )); +DATA(insert OID = 530 ( "%" PGUID 0 b t f 23 23 23 6 0 0 0 int4mod intltsel intltjoinsel )); +DATA(insert OID = 531 ( "<>" PGUID 0 b t f 25 25 16 531 98 0 0 textne neqsel neqjoinsel )); +DATA(insert OID = 532 ( "=" PGUID 0 b t t 21 23 16 533 538 95 97 int24eq eqsel eqjoinsel )); +DATA(insert OID = 533 ( "=" PGUID 0 b t t 23 21 16 532 539 97 95 int42eq eqsel eqjoinsel )); +DATA(insert OID = 534 ( "<" PGUID 0 b t f 21 23 16 537 542 0 0 int24lt intltsel intltjoinsel )); +DATA(insert OID = 535 ( "<" PGUID 0 b t f 23 21 16 536 543 0 0 int42lt intltsel intltjoinsel )); +DATA(insert OID = 536 ( ">" PGUID 0 b t f 21 23 16 535 540 0 0 int24gt intgtsel intgtjoinsel )); +DATA(insert OID = 537 ( ">" PGUID 0 b t f 23 21 16 534 541 0 0 int42gt intgtsel intgtjoinsel )); +DATA(insert OID = 538 ( "<>" PGUID 0 b t f 21 23 16 539 532 0 0 int24ne neqsel neqjoinsel )); +DATA(insert OID = 539 ( "<>" PGUID 0 b t f 23 21 16 538 533 0 0 int42ne neqsel neqjoinsel )); +DATA(insert OID = 540 ( "<=" PGUID 0 b t f 21 23 16 543 536 0 0 int24le intltsel intltjoinsel )); +DATA(insert OID = 541 ( "<=" PGUID 0 b t f 23 21 16 542 537 0 0 int42le intltsel intltjoinsel )); +DATA(insert OID = 542 ( ">=" PGUID 0 b t f 21 23 16 541 534 0 0 int24ge intgtsel intgtjoinsel )); +DATA(insert OID = 543 ( ">=" PGUID 0 b t f 23 21 16 540 535 0 0 int42ge intgtsel intgtjoinsel )); +DATA(insert OID = 544 ( "*" PGUID 0 b t f 21 23 23 545 0 0 0 int24mul intltsel intltjoinsel )); +DATA(insert OID = 545 ( "*" PGUID 0 b t f 23 21 23 544 0 0 0 int42mul intltsel intltjoinsel )); +DATA(insert OID = 546 ( "/" PGUID 0 b t f 21 23 23 0 0 0 0 int24div intltsel intltjoinsel )); +DATA(insert OID = 547 ( "/" PGUID 0 b t f 23 21 23 0 0 0 0 int42div intltsel intltjoinsel )); +DATA(insert OID = 548 ( "%" PGUID 0 b t f 21 23 23 6 0 0 0 int24mod intltsel intltjoinsel )); +DATA(insert OID = 549 ( "%" PGUID 0 b t f 23 21 23 6 0 0 0 int42mod intltsel intltjoinsel )); +DATA(insert OID = 550 ( "+" PGUID 0 b t f 21 21 21 550 0 0 0 int2pl intltsel intltjoinsel )); +DATA(insert OID = 551 ( "+" PGUID 0 b t f 23 23 23 551 0 0 0 int4pl intltsel intltjoinsel )); +DATA(insert OID = 552 ( "+" PGUID 0 b t f 21 23 23 553 0 0 0 int24pl intltsel intltjoinsel )); +DATA(insert OID = 553 ( "+" PGUID 0 b t f 23 21 23 552 0 0 0 int42pl intltsel intltjoinsel )); +DATA(insert OID = 554 ( "-" PGUID 0 b t f 21 21 21 0 0 0 0 int2mi intltsel intltjoinsel )); +DATA(insert OID = 555 ( "-" PGUID 0 b t f 23 23 23 0 0 0 0 int4mi intltsel intltjoinsel )); +DATA(insert OID = 556 ( "-" PGUID 0 b t f 21 23 23 0 0 0 0 int24mi intltsel intltjoinsel )); +DATA(insert OID = 557 ( "-" PGUID 0 b t f 23 21 23 0 0 0 0 int42mi intltsel intltjoinsel )); +DATA(insert OID = 558 ( "-" PGUID 0 l t f 0 23 23 0 0 0 0 int4um intltsel intltjoinsel )); +DATA(insert OID = 559 ( "-" PGUID 0 l t f 0 21 21 0 0 0 0 int2um intltsel intltjoinsel )); +DATA(insert OID = 560 ( "=" PGUID 0 b t t 702 702 16 560 561 562 562 abstimeeq eqsel eqjoinsel )); +DATA(insert OID = 561 ( "<>" PGUID 0 b t f 702 702 16 561 560 0 0 abstimene neqsel neqjoinsel )); +DATA(insert OID = 562 ( "<" PGUID 0 b t f 702 702 16 563 565 0 0 abstimelt intltsel intltjoinsel )); +DATA(insert OID = 563 ( ">" PGUID 0 b t f 702 702 16 562 564 0 0 abstimegt intltsel intltjoinsel )); +DATA(insert OID = 564 ( "<=" PGUID 0 b t f 702 702 16 565 563 0 0 abstimele intltsel intltjoinsel )); +DATA(insert OID = 565 ( ">=" PGUID 0 b t f 702 702 16 564 562 0 0 abstimege intltsel intltjoinsel )); +DATA(insert OID = 566 ( "=" PGUID 0 b t t 703 703 16 566 567 568 568 reltimeeq - - )); +DATA(insert OID = 567 ( "<>" PGUID 0 b t f 703 703 16 567 566 0 0 reltimene - - )); +DATA(insert OID = 568 ( "<" PGUID 0 b t f 703 703 16 569 571 0 0 reltimelt - - )); +DATA(insert OID = 569 ( ">" PGUID 0 b t f 703 703 16 568 570 0 0 reltimegt - - )); +DATA(insert OID = 570 ( "<=" PGUID 0 b t f 703 703 16 571 569 0 0 reltimele - - )); +DATA(insert OID = 571 ( ">=" PGUID 0 b t f 703 703 16 570 568 0 0 reltimege - - )); +DATA(insert OID = 572 ( "=" PGUID 0 b t t 704 704 16 572 0 0 0 intervaleq - - )); +DATA(insert OID = 573 ( "<<" PGUID 0 b t f 704 704 16 0 0 0 0 intervalct - - )); +DATA(insert OID = 574 ( "&&" PGUID 0 b t f 704 704 16 0 0 0 0 intervalov - - )); +DATA(insert OID = 575 ( "#=" PGUID 0 b t f 704 703 16 0 576 0 568 intervalleneq - - )); +DATA(insert OID = 576 ( "#<>" PGUID 0 b t f 704 703 16 0 575 0 568 intervallenne - - )); +DATA(insert OID = 577 ( "#<" PGUID 0 b t f 704 703 16 0 580 0 568 intervallenlt - - )); +DATA(insert OID = 578 ( "#>" PGUID 0 b t f 704 703 16 0 579 0 568 intervallengt - - )); +DATA(insert OID = 579 ( "#<=" PGUID 0 b t f 704 703 16 0 578 0 568 intervallenle - - )); +DATA(insert OID = 580 ( "#>=" PGUID 0 b t f 704 703 16 0 577 0 568 intervallenge - - )); +DATA(insert OID = 581 ( "+" PGUID 0 b t f 702 703 702 581 0 0 0 timepl - - )); +DATA(insert OID = 582 ( "-" PGUID 0 b t f 702 703 702 0 0 0 0 timemi - - )); +DATA(insert OID = 583 ( "" PGUID 0 b t f 702 704 16 0 0 562 0 ininterval - - )); +DATA(insert OID = 584 ( "-" PGUID 0 l t f 0 700 700 0 0 0 0 float4um - - )); +DATA(insert OID = 585 ( "-" PGUID 0 l t f 0 701 701 0 0 0 0 float8um - - )); +DATA(insert OID = 586 ( "+" PGUID 0 b t f 700 700 700 586 0 0 0 float4pl - - )); +DATA(insert OID = 587 ( "-" PGUID 0 b t f 700 700 700 0 0 0 0 float4mi - - )); +DATA(insert OID = 588 ( "/" PGUID 0 b t f 700 700 700 0 0 0 0 float4div - - )); +DATA(insert OID = 589 ( "*" PGUID 0 b t f 700 700 700 589 0 0 0 float4mul - - )); +DATA(insert OID = 590 ( "@" PGUID 0 l t f 0 700 700 0 0 0 0 float4abs - - )); +DATA(insert OID = 591 ( "+" PGUID 0 b t f 701 701 701 591 0 0 0 float8pl - - )); +DATA(insert OID = 592 ( "-" PGUID 0 b t f 701 701 701 0 0 0 0 float8mi - - )); +DATA(insert OID = 593 ( "/" PGUID 0 b t f 701 701 701 0 0 0 0 float8div - - )); +DATA(insert OID = 594 ( "*" PGUID 0 b t f 701 701 701 594 0 0 0 float8mul - - )); +DATA(insert OID = 595 ( "@" PGUID 0 l t f 0 701 701 0 0 0 0 float8abs - - )); +DATA(insert OID = 596 ( "|/" PGUID 0 l t f 0 701 701 0 0 0 0 dsqrt - - )); +DATA(insert OID = 597 ( "||/" PGUID 0 l t f 0 701 701 0 0 0 0 dcbrt - - )); +DATA(insert OID = 598 ( "%" PGUID 0 l t f 0 701 701 0 0 0 0 dtrunc - - )); +DATA(insert OID = 599 ( "%" PGUID 0 r t f 701 0 701 0 0 0 0 dround - - )); +DATA(insert OID = 601 ( ":" PGUID 0 l t f 0 701 701 0 0 0 0 dexp - - )); +DATA(insert OID = 602 ( ";" PGUID 0 l t f 0 701 701 0 0 0 0 dlog1 - - )); +DATA(insert OID = 603 ( "|" PGUID 0 l t f 0 704 702 0 0 0 0 intervalstart - - )); +DATA(insert OID = 606 ( "<#>" PGUID 0 b t f 702 702 704 0 0 0 0 mktinterval - - )); +DATA(insert OID = 607 ( "=" PGUID 0 b t t 26 26 16 607 608 97 97 oideq eqsel eqjoinsel )); +#define OIDEqualOperator 607 /* XXX planner/prep/semanopt.c crock */ +DATA(insert OID = 608 ( "<>" PGUID 0 b t f 26 26 16 608 607 0 0 oidne neqsel neqjoinsel )); +DATA(insert OID = 609 ( "<" PGUID 0 b t f 26 26 16 610 612 0 0 int4lt intltsel intltjoinsel )); +DATA(insert OID = 610 ( ">" PGUID 0 b t f 26 26 16 609 611 0 0 int4gt intgtsel intgtjoinsel )); +DATA(insert OID = 611 ( "<=" PGUID 0 b t f 26 26 16 612 610 0 0 int4le intltsel intltjoinsel )); +DATA(insert OID = 612 ( ">=" PGUID 0 b t f 26 26 16 611 609 0 0 int4ge intgtsel intgtjoinsel )); +DATA(insert OID = 620 ( "=" PGUID 0 b t t 700 700 16 620 621 622 622 float4eq eqsel eqjoinsel )); +DATA(insert OID = 621 ( "<>" PGUID 0 b t f 700 700 16 621 620 0 0 float4ne neqsel neqjoinsel )); +DATA(insert OID = 622 ( "<" PGUID 0 b t f 700 700 16 623 625 0 0 float4lt intltsel intltjoinsel )); +DATA(insert OID = 623 ( ">" PGUID 0 b t f 700 700 16 622 624 0 0 float4gt intgtsel intgtjoinsel )); +DATA(insert OID = 624 ( "<=" PGUID 0 b t f 700 700 16 625 623 0 0 float4le intltsel intltjoinsel )); +DATA(insert OID = 625 ( ">=" PGUID 0 b t f 700 700 16 624 622 0 0 float4ge intgtsel intgtjoinsel )); +DATA(insert OID = 626 ( "!!=" PGUID 0 b t f 23 19 16 0 0 0 0 int4notin "-" "-")); +DATA(insert OID = 627 ( "!!=" PGUID 0 b t f 26 19 16 0 0 0 0 oidnotin "-" "-")); +#define OIDNotInOperator 627 /* XXX planner/prep/semanopt.c crock */ +DATA(insert OID = 630 ( "<>" PGUID 0 b t f 18 18 16 630 92 0 0 charne neqsel neqjoinsel )); + +DATA(insert OID = 631 ( "<" PGUID 0 b t f 18 18 16 633 634 0 0 charlt intltsel intltjoinsel )); +DATA(insert OID = 632 ( "<=" PGUID 0 b t f 18 18 16 634 633 0 0 charle intltsel intltjoinsel )); +DATA(insert OID = 633 ( ">" PGUID 0 b t f 18 18 16 631 632 0 0 chargt intltsel intltjoinsel )); +DATA(insert OID = 634 ( ">=" PGUID 0 b t f 18 18 16 632 631 0 0 charge intltsel intltjoinsel )); + +DATA(insert OID = 635 ( "+" PGUID 0 b t f 18 18 18 0 0 0 0 charpl eqsel eqjoinsel )); +DATA(insert OID = 636 ( "-" PGUID 0 b t f 18 18 18 0 0 0 0 charmi eqsel eqjoinsel )); +DATA(insert OID = 637 ( "*" PGUID 0 b t f 18 18 18 0 0 0 0 charmul eqsel eqjoinsel )); +DATA(insert OID = 638 ( "/" PGUID 0 b t f 18 18 18 0 0 0 0 chardiv eqsel eqjoinsel )); + +DATA(insert OID = 639 ( "~" PGUID 0 b t f 19 25 16 0 640 0 0 nameregexeq eqsel eqjoinsel )); +DATA(insert OID = 640 ( "!~" PGUID 0 b t f 19 25 16 0 639 0 0 nameregexne neqsel neqjoinsel )); +DATA(insert OID = 641 ( "~" PGUID 0 b t f 25 25 16 0 642 0 0 textregexeq eqsel eqjoinsel )); +DATA(insert OID = 642 ( "!~" PGUID 0 b t f 25 25 16 0 641 0 0 textregexne eqsel eqjoinsel )); +DATA(insert OID = 643 ( "<>" PGUID 0 b t f 19 19 16 643 93 0 0 namene neqsel neqjoinsel )); +DATA(insert OID = 644 ( "<>" PGUID 0 b t f 20 20 16 644 99 0 0 char16ne neqsel neqjoinsel )); +DATA(insert OID = 645 ( "<" PGUID 0 b t f 20 20 16 647 648 0 0 char16lt intltsel intltjoinsel )); +DATA(insert OID = 646 ( "<=" PGUID 0 b t f 20 20 16 648 647 0 0 char16le intltsel intltjoinsel )); +DATA(insert OID = 647 ( ">" PGUID 0 b t f 20 20 16 645 646 0 0 char16gt intltsel intltjoinsel )); +DATA(insert OID = 648 ( ">=" PGUID 0 b t f 20 20 16 646 645 0 0 char16ge intltsel intltjoinsel )); +DATA(insert OID = 649 ( "~" PGUID 0 b t f 20 25 16 0 650 0 0 char16regexeq intltsel intltjoinsel )); +DATA(insert OID = 650 ( "!~" PGUID 0 b t f 20 25 16 650 0 0 0 char16regexne intltsel intltjoinsel )); +DATA(insert OID = 651 ( "~~" PGUID 0 b t f 20 25 16 0 651 0 0 char16like eqsel eqjoinsel )); +DATA(insert OID = 652 ( "!~~" PGUID 0 b t f 20 25 16 651 0 0 0 char16nlike neqsel neqjoinsel )); + +DATA(insert OID = 660 ( "<" PGUID 0 b t f 19 19 16 662 663 0 0 namelt intltsel intltjoinsel )); +DATA(insert OID = 661 ( "<=" PGUID 0 b t f 19 19 16 663 662 0 0 namele intltsel intltjoinsel )); +DATA(insert OID = 662 ( ">" PGUID 0 b t f 19 19 16 660 661 0 0 namegt intltsel intltjoinsel )); +DATA(insert OID = 663 ( ">=" PGUID 0 b t f 19 19 16 661 660 0 0 namege intltsel intltjoinsel )); +DATA(insert OID = 664 ( "<" PGUID 0 b t f 25 25 16 666 667 0 0 text_lt intltsel intltjoinsel )); +DATA(insert OID = 665 ( "<=" PGUID 0 b t f 25 25 16 667 666 0 0 text_le intltsel intltjoinsel )); +DATA(insert OID = 666 ( ">" PGUID 0 b t f 25 25 16 664 665 0 0 text_gt intltsel intltjoinsel )); +DATA(insert OID = 667 ( ">=" PGUID 0 b t f 25 25 16 665 664 0 0 text_ge intltsel intltjoinsel )); + +DATA(insert OID = 670 ( "=" PGUID 0 b t f 701 701 16 670 671 0 0 float8eq eqsel eqjoinsel )); +DATA(insert OID = 671 ( "<>" PGUID 0 b t f 701 701 16 671 670 0 0 float8ne neqsel neqjoinsel )); +DATA(insert OID = 672 ( "<" PGUID 0 b t f 701 701 16 674 675 0 0 float8lt intltsel intltjoinsel )); +DATA(insert OID = 673 ( "<=" PGUID 0 b t f 701 701 16 675 674 0 0 float8le intltsel intltjoinsel )); +DATA(insert OID = 674 ( ">" PGUID 0 b t f 701 701 16 672 673 0 0 float8gt intltsel intltjoinsel )); +DATA(insert OID = 675 ( ">=" PGUID 0 b t f 701 701 16 673 672 0 0 float8ge intltsel intltjoinsel )); + +DATA(insert OID = 676 ( "<" PGUID 0 b t f 911 911 16 680 679 0 0 oidnamelt intltsel intltjoinsel )); +DATA(insert OID = 677 ( "<=" PGUID 0 b t f 911 911 16 679 680 0 0 oidnamele intltsel intltjoinsel )); +DATA(insert OID = 678 ( "=" PGUID 0 b t f 911 911 16 678 681 0 0 oidnameeq intltsel intltjoinsel )); +DATA(insert OID = 679 ( ">=" PGUID 0 b t f 911 911 16 677 676 0 0 oidnamege intltsel intltjoinsel )); +DATA(insert OID = 680 ( ">" PGUID 0 b t f 911 911 16 676 677 0 0 oidnamegt intltsel intltjoinsel )); +DATA(insert OID = 681 ( "<>" PGUID 0 b t f 911 911 16 681 678 0 0 oidnamene intltsel intltjoinsel )); + +DATA(insert OID = 697 ( "~" PGUID 0 b t f 411 25 16 0 698 0 0 char8regexeq eqsel eqjoinsel )); +DATA(insert OID = 698 ( "!~" PGUID 0 b t f 411 25 16 0 697 0 0 char8regexne neqsel neqjoinsel )); + +DATA(insert OID = 830 ( "<" PGUID 0 b t f 810 810 16 834 833 0 0 oidint2lt intltsel intltjoinsel )); +DATA(insert OID = 831 ( "<=" PGUID 0 b t f 810 810 16 833 834 0 0 oidint2le intltsel intltjoinsel )); +DATA(insert OID = 832 ( "=" PGUID 0 b t f 810 810 16 832 835 0 0 oidint2eq intltsel intltjoinsel )); +DATA(insert OID = 833 ( ">=" PGUID 0 b t f 810 810 16 831 830 0 0 oidint2ge intltsel intltjoinsel )); +DATA(insert OID = 834 ( ">" PGUID 0 b t f 810 810 16 830 831 0 0 oidint2gt intltsel intltjoinsel )); +DATA(insert OID = 835 ( "<>" PGUID 0 b t f 810 810 16 835 832 0 0 oidint2ne intltsel intltjoinsel )); + +DATA(insert OID = 839 ( "~" PGUID 0 b t f 409 25 16 0 841 0 0 char2regexeq eqsel eqjoinsel )); +DATA(insert OID = 841 ( "!~" PGUID 0 b t f 409 25 16 0 839 0 0 char2regexne neqsel neqjoinsel )); +DATA(insert OID = 840 ( "~" PGUID 0 b t f 410 25 16 0 842 0 0 char4regexeq eqsel eqjoinsel )); +DATA(insert OID = 842 ( "!~" PGUID 0 b t f 410 25 16 0 840 0 0 char4regexne neqsel neqjoinsel )); + +DATA(insert OID = 930 ( "<" PGUID 0 b t f 910 910 16 934 933 0 0 oidint4lt intltsel intltjoinsel )); +DATA(insert OID = 931 ( "<=" PGUID 0 b t f 910 910 16 933 934 0 0 oidint4le intltsel intltjoinsel )); +DATA(insert OID = 932 ( "=" PGUID 0 b t f 910 910 16 932 935 0 0 oidint4eq intltsel intltjoinsel )); +DATA(insert OID = 933 ( ">=" PGUID 0 b t f 910 910 16 931 930 0 0 oidint4ge intltsel intltjoinsel )); +DATA(insert OID = 934 ( ">" PGUID 0 b t f 910 910 16 930 931 0 0 oidint4gt intltsel intltjoinsel )); +DATA(insert OID = 935 ( "<>" PGUID 0 b t f 910 910 16 935 932 0 0 oidint4ne intltsel intltjoinsel )); + +DATA(insert OID = 965 ( "^" PGUID 0 b t f 701 701 701 0 0 0 0 dpow - - )); +DATA(insert OID = 966 ( "+" PGUID 0 b t f 1034 1033 1034 0 0 0 0 aclinsert intltsel intltjoinsel )); +DATA(insert OID = 967 ( "-" PGUID 0 b t f 1034 1033 1034 0 0 0 0 aclremove intltsel intltjoinsel )); +DATA(insert OID = 968 ( "~" PGUID 0 b t f 1034 1033 16 0 0 0 0 aclcontains intltsel intltjoinsel )); + +DATA(insert OID = 1054 ( "=" PGUID 0 b t t 1042 1042 16 1054 1057 1058 1058 bpchareq eqsel eqjoinsel )); +DATA(insert OID = 1055 ( "~" PGUID 0 b t f 1042 25 16 0 1056 0 0 textregexeq eqsel eqjoinsel )); +DATA(insert OID = 1056 ( "!~" PGUID 0 b t f 1042 25 16 0 1055 0 0 textregexne neqsel neqjoinsel )); +DATA(insert OID = 1057 ( "<>" PGUID 0 b t f 1042 1042 16 1057 1054 0 0 bpcharne neqsel neqjoinsel )); +DATA(insert OID = 1058 ( "<" PGUID 0 b t f 1042 1042 16 1060 1061 0 0 bpcharlt intltsel intltjoinsel )); +DATA(insert OID = 1059 ( "<=" PGUID 0 b t f 1042 1042 16 1061 1060 0 0 bpcharle intltsel intltjoinsel )); +DATA(insert OID = 1060 ( ">" PGUID 0 b t f 1042 1042 16 1058 1059 0 0 bpchargt intltsel intltjoinsel )); +DATA(insert OID = 1061 ( ">=" PGUID 0 b t f 1042 1042 16 1059 1058 0 0 bpcharge intltsel intltjoinsel )); + +DATA(insert OID = 1062 ( "=" PGUID 0 b t t 1043 1043 16 1062 1065 1066 1066 varchareq eqsel eqjoinsel )); +DATA(insert OID = 1063 ( "~" PGUID 0 b t f 1043 25 16 0 1064 0 0 textregexeq eqsel eqjoinsel )); +DATA(insert OID = 1064 ( "!~" PGUID 0 b t f 1043 25 16 0 1063 0 0 textregexne neqsel neqjoinsel )); +DATA(insert OID = 1065 ( "<>" PGUID 0 b t f 1043 1043 16 1065 1062 0 0 varcharne neqsel neqjoinsel )); +DATA(insert OID = 1066 ( "<" PGUID 0 b t f 1043 1043 16 1068 1069 0 0 varcharlt intltsel intltjoinsel )); +DATA(insert OID = 1067 ( "<=" PGUID 0 b t f 1043 1043 16 1069 1068 0 0 varcharle intltsel intltjoinsel )); +DATA(insert OID = 1068 ( ">" PGUID 0 b t f 1043 1043 16 1066 1067 0 0 varchargt intltsel intltjoinsel )); +DATA(insert OID = 1069 ( ">=" PGUID 0 b t f 1043 1043 16 1067 1066 0 0 varcharge intltsel intltjoinsel )); + +DATA(insert OID = 1093 ( "=" PGUID 0 b t t 1082 1082 16 1093 1094 1095 1095 date_eq eqsel eqjoinsel )); +DATA(insert OID = 1094 ( "<>" PGUID 0 b t f 1082 1082 16 1094 1093 0 0 date_ne neqsel neqjoinsel )); +DATA(insert OID = 1095 ( "<" PGUID 0 b t f 1082 1082 16 1097 1098 0 0 date_lt intltsel intltjoinsel )); +DATA(insert OID = 1096 ( "<=" PGUID 0 b t f 1082 1082 16 1098 1097 0 0 date_le intltsel intltjoinsel )); +DATA(insert OID = 1097 ( ">" PGUID 0 b t f 1082 1082 16 1095 1096 0 0 date_gt intltsel intltjoinsel )); +DATA(insert OID = 1098 ( ">=" PGUID 0 b t f 1082 1082 16 1096 1065 0 0 date_ge intltsel intltjoinsel )); + +DATA(insert OID = 1108 ( "=" PGUID 0 b t t 1083 1083 16 1108 1109 1110 1110 time_eq eqsel eqjoinsel )); +DATA(insert OID = 1109 ( "<>" PGUID 0 b t f 1083 1083 16 1109 1108 0 0 time_ne neqsel neqjoinsel )); +DATA(insert OID = 1110 ( "<" PGUID 0 b t f 1083 1083 16 1112 1113 0 0 time_lt intltsel intltjoinsel )); +DATA(insert OID = 1111 ( "<=" PGUID 0 b t f 1083 1083 16 1113 1112 0 0 time_le intltsel intltjoinsel )); +DATA(insert OID = 1112 ( ">" PGUID 0 b t f 1083 1083 16 1110 1111 0 0 time_gt intltsel intltjoinsel )); +DATA(insert OID = 1113 ( ">=" PGUID 0 b t f 1083 1083 16 1111 1065 0 0 time_ge intltsel intltjoinsel )); + +/* float48 operators */ +DATA(insert OID = 1116 ( "+" PGUID 0 b t f 700 701 701 1116 0 0 0 float48pl - - )); +DATA(insert OID = 1117 ( "-" PGUID 0 b t f 700 701 701 0 0 0 0 float48mi - - )); +DATA(insert OID = 1118 ( "/" PGUID 0 b t f 700 701 701 0 0 0 0 float48div - - )); +DATA(insert OID = 1119 ( "*" PGUID 0 b t f 700 701 701 1119 0 0 0 float48mul - - )); +DATA(insert OID = 1120 ( "=" PGUID 0 b t t 700 701 16 1120 1121 1122 1122 float48eq eqsel eqjoinsel )); +DATA(insert OID = 1121 ( "<>" PGUID 0 b t f 700 701 16 1121 1120 0 0 float48ne neqsel neqjoinsel )); +DATA(insert OID = 1122 ( "<" PGUID 0 b t f 700 701 16 1123 1125 0 0 float48lt intltsel intltjoinsel )); +DATA(insert OID = 1123 ( ">" PGUID 0 b t f 700 701 16 1122 1124 0 0 float48gt intgtsel intgtjoinsel )); +DATA(insert OID = 1124 ( "<=" PGUID 0 b t f 700 701 16 1125 1123 0 0 float48le intltsel intltjoinsel )); +DATA(insert OID = 1125 ( ">=" PGUID 0 b t f 700 701 16 1124 1122 0 0 float48ge intgtsel intgtjoinsel )); + +/* float84 operators */ +DATA(insert OID = 1126 ( "+" PGUID 0 b t f 701 700 701 1126 0 0 0 float84pl - - )); +DATA(insert OID = 1127 ( "-" PGUID 0 b t f 701 700 701 0 0 0 0 float84mi - - )); +DATA(insert OID = 1128 ( "/" PGUID 0 b t f 701 700 701 0 0 0 0 float84div - - )); +DATA(insert OID = 1129 ( "*" PGUID 0 b t f 701 700 701 1129 0 0 0 float84mul - - )); +DATA(insert OID = 1130 ( "=" PGUID 0 b t t 701 700 16 1130 1131 1132 1132 float84eq eqsel eqjoinsel )); +DATA(insert OID = 1131 ( "<>" PGUID 0 b t f 701 700 16 1131 1130 0 0 float84ne neqsel neqjoinsel )); +DATA(insert OID = 1132 ( "<" PGUID 0 b t f 701 700 16 1133 1135 0 0 float84lt intltsel intltjoinsel )); +DATA(insert OID = 1133 ( ">" PGUID 0 b t f 701 700 16 1132 1134 0 0 float84gt intgtsel intgtjoinsel )); +DATA(insert OID = 1134 ( "<=" PGUID 0 b t f 701 700 16 1135 1133 0 0 float84le intltsel intltjoinsel )); +DATA(insert OID = 1135 ( ">=" PGUID 0 b t f 701 700 16 1134 1132 0 0 float84ge intgtsel intgtjoinsel )); + +/* int4 and oid equality */ +DATA(insert OID = 1136 ( "=" PGUID 0 b t t 23 26 16 1137 0 0 0 int4eqoid eqsel eqjoinsel )); +DATA(insert OID = 1137 ( "=" PGUID 0 b t t 26 23 16 1136 0 0 0 oideqint4 eqsel eqjoinsel )); + +/* LIKE hacks by Keith Parks. */ +DATA(insert OID = 1201 ( "~~" PGUID 0 b t f 409 25 16 0 1202 0 0 char2like eqsel eqjoinsel )); +DATA(insert OID = 1202 ( "!~~" PGUID 0 b t f 409 25 16 0 1201 0 0 char2nlike neqsel neqjoinsel )); +DATA(insert OID = 1203 ( "~~" PGUID 0 b t f 410 25 16 0 1204 0 0 char4like eqsel eqjoinsel )); +DATA(insert OID = 1204 ( "!~~" PGUID 0 b t f 410 25 16 0 1203 0 0 char4nlike neqsel neqjoinsel )); +DATA(insert OID = 1205 ( "~~" PGUID 0 b t f 411 25 16 0 1206 0 0 char8like eqsel eqjoinsel )); +DATA(insert OID = 1206 ( "!~~" PGUID 0 b t f 411 25 16 0 1205 0 0 char8nlike neqsel neqjoinsel )); +DATA(insert OID = 1207 ( "~~" PGUID 0 b t f 19 25 16 0 1208 0 0 namelike eqsel eqjoinsel )); +DATA(insert OID = 1208 ( "!~~" PGUID 0 b t f 19 25 16 0 1207 0 0 namenlike neqsel neqjoinsel )); +DATA(insert OID = 1209 ( "~~" PGUID 0 b t f 25 25 16 0 1210 0 0 textlike eqsel eqjoinsel )); +DATA(insert OID = 1210 ( "!~~" PGUID 0 b t f 25 25 16 0 1209 0 0 textnlike neqsel neqjoinsel )); +DATA(insert OID = 1211 ( "~~" PGUID 0 b t f 1042 25 16 0 1212 0 0 textlike eqsel eqjoinsel )); +DATA(insert OID = 1212 ( "!~~" PGUID 0 b t f 1042 25 16 0 1211 0 0 textnlike neqsel neqjoinsel )); +DATA(insert OID = 1213 ( "~~" PGUID 0 b t f 1043 25 16 0 1214 0 0 textlike eqsel eqjoinsel )); +DATA(insert OID = 1214 ( "!~~" PGUID 0 b t f 1043 25 16 0 1213 0 0 textnlike neqsel neqjoinsel )); +DATA(insert OID = 1215 ( "~~" PGUID 0 b t f 20 25 16 0 1216 0 0 char16like eqsel eqjoinsel )); +DATA(insert OID = 1216 ( "!~~" PGUID 0 b t f 20 25 16 0 1215 0 0 char16nlike neqsel neqjoinsel )); + +/* case-insensitive LIKE hacks */ +DATA(insert OID = 1220 ( "~*" PGUID 0 b t f 409 25 16 0 1221 0 0 char2icregexeq eqsel eqjoinsel )); +DATA(insert OID = 1221 ( "!~*" PGUID 0 b t f 409 25 16 0 1220 0 0 char2icregexne neqsel neqjoinsel )); +DATA(insert OID = 1222 ( "~*" PGUID 0 b t f 410 25 16 0 1223 0 0 char4icregexeq eqsel eqjoinsel )); +DATA(insert OID = 1223 ( "!~*" PGUID 0 b t f 410 25 16 0 1222 0 0 char4icregexne neqsel neqjoinsel )); +DATA(insert OID = 1224 ( "~*" PGUID 0 b t f 411 25 16 0 1225 0 0 char8icregexeq eqsel eqjoinsel )); +DATA(insert OID = 1225 ( "!~*" PGUID 0 b t f 411 25 16 0 1224 0 0 char8icregexne neqsel neqjoinsel )); +DATA(insert OID = 1226 ( "~*" PGUID 0 b t f 19 25 16 0 1227 0 0 nameicregexeq eqsel eqjoinsel )); +DATA(insert OID = 1227 ( "!~*" PGUID 0 b t f 19 25 16 0 1226 0 0 nameicregexne neqsel neqjoinsel )); +DATA(insert OID = 1228 ( "~*" PGUID 0 b t f 25 25 16 0 1229 0 0 texticregexeq eqsel eqjoinsel )); +DATA(insert OID = 1229 ( "!~*" PGUID 0 b t f 25 25 16 0 1228 0 0 texticregexne eqsel eqjoinsel )); +DATA(insert OID = 1230 ( "~*" PGUID 0 b t f 20 25 16 0 1231 0 0 char16icregexeq eqsel eqjoinsel )); +DATA(insert OID = 1231 ( "!~*" PGUID 0 b t f 20 25 16 0 1230 0 0 char16icregexne neqsel neqjoinsel )); + + + +/* + * function prototypes + */ +extern void OperatorCreate(char *operatorName, + char *leftTypeName, + char *rightTypeName, + char *procedureName, + uint16 precedence, + bool isLeftAssociative, + char *commutatorName, + char *negatorName, + char *restrictionName, + char *joinName, + bool canHash, + char *leftSortName, + char *rightSortName); + +#endif /* PG_OPERATOR_H */ diff --git a/src/backend/catalog/pg_parg.h b/src/backend/catalog/pg_parg.h new file mode 100644 index 00000000000..aa088278455 --- /dev/null +++ b/src/backend/catalog/pg_parg.h @@ -0,0 +1,116 @@ +/*------------------------------------------------------------------------- + * + * pg_parg.h-- + * definition of the system "parg" relation (pg_parg) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_parg.h,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_PARG_H +#define PG_PARG_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_parg definition. cpp turns this into + * typedef struct FormData_pg_parg + * ---------------- + */ +CATALOG(pg_parg) { + Oid parproid; + int2 parnum; + char parbound; + Oid partype; +} FormData_pg_parg; + +/* ---------------- + * Form_pg_parg corresponds to a pointer to a tuple with + * the format of pg_parg relation. + * ---------------- + */ +typedef FormData_pg_parg *Form_pg_parg; + +/* ---------------- + * compiler constants for pg_parg + * ---------------- + */ +#define Natts_pg_parg 4 +#define Anum_pg_parg_parproid 1 +#define Anum_pg_parg_parnum 2 +#define Anum_pg_parg_parbound 3 +#define Anum_pg_parg_partype 4 + +/* ---------------- + * initial contents of pg_parg + * ---------------- + */ + +DATA(insert OID = 0 ( 28 1 - 23 )); +DATA(insert OID = 0 ( 29 1 - 16 )); +DATA(insert OID = 0 ( 30 1 - 23 )); +DATA(insert OID = 0 ( 31 1 - 17 )); +DATA(insert OID = 0 ( 32 1 - 23 )); +DATA(insert OID = 0 ( 33 1 - 18 )); +DATA(insert OID = 0 ( 34 1 - 23 )); +DATA(insert OID = 0 ( 35 1 - 19 )); +DATA(insert OID = 0 ( 36 1 - 23 )); +DATA(insert OID = 0 ( 37 1 - 20 )); +DATA(insert OID = 0 ( 38 1 - 23 )); +DATA(insert OID = 0 ( 39 1 - 21 )); +DATA(insert OID = 0 ( 40 1 - 23 )); +DATA(insert OID = 0 ( 41 1 - 22 )); +DATA(insert OID = 0 ( 42 1 - 23 )); +DATA(insert OID = 0 ( 43 1 - 23 )); +DATA(insert OID = 0 ( 44 1 - 23 )); +DATA(insert OID = 0 ( 45 1 - 24 )); +DATA(insert OID = 0 ( 46 1 - 23 )); +DATA(insert OID = 0 ( 47 1 - 25 )); +DATA(insert OID = 0 ( 50 1 - 23 )); +DATA(insert OID = 0 ( 50 2 - 23 )); +DATA(insert OID = 0 ( 50 3 - 23 )); +DATA(insert OID = 0 ( 51 1 - 23 )); +DATA(insert OID = 0 ( 52 1 - 23 )); +DATA(insert OID = 0 ( 52 2 - 23 )); +DATA(insert OID = 0 ( 52 3 - 23 )); +DATA(insert OID = 0 ( 52 4 - 23 )); +DATA(insert OID = 0 ( 53 1 - 23 )); +DATA(insert OID = 0 ( 54 1 - 23 )); +DATA(insert OID = 0 ( 54 2 - 23 )); +DATA(insert OID = 0 ( 55 1 - 23 )); +DATA(insert OID = 0 ( 55 2 - 23 )); +DATA(insert OID = 0 ( 56 1 - 23 )); +DATA(insert OID = 0 ( 56 2 - 23 )); +DATA(insert OID = 0 ( 57 1 - 23 )); +DATA(insert OID = 0 ( 57 2 - 23 )); +DATA(insert OID = 0 ( 57 3 - 23 )); +DATA(insert OID = 0 ( 60 1 - 16 )); +DATA(insert OID = 0 ( 60 2 - 16 )); +DATA(insert OID = 0 ( 61 1 - 18 )); +DATA(insert OID = 0 ( 61 2 - 18 )); +DATA(insert OID = 0 ( 63 1 - 21 )); +DATA(insert OID = 0 ( 63 2 - 21 )); +DATA(insert OID = 0 ( 64 1 - 21 )); +DATA(insert OID = 0 ( 64 2 - 21 )); +DATA(insert OID = 0 ( 65 1 - 23 )); +DATA(insert OID = 0 ( 65 2 - 23 )); +DATA(insert OID = 0 ( 66 1 - 23 )); +DATA(insert OID = 0 ( 66 2 - 23 )); +DATA(insert OID = 0 ( 67 1 - 25 )); +DATA(insert OID = 0 ( 67 2 - 25 )); + +#endif /* PG_PARG_H */ diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c new file mode 100644 index 00000000000..d8273efcce8 --- /dev/null +++ b/src/backend/catalog/pg_proc.c @@ -0,0 +1,265 @@ +/*------------------------------------------------------------------------- + * + * pg_proc.c-- + * routines to support manipulation of the pg_proc relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/pg_proc.c,v 1.1.1.1 1996/07/09 06:21:17 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/rel.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/builtins.h" +#include "utils/sets.h" + +#include "nodes/pg_list.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_proc.h" +#include "catalog/indexing.h" +#include "tcop/dest.h" +#include "parser/parse_query.h" +#include "tcop/tcopprot.h" +#include "catalog/pg_type.h" +#include "parser/catalog_utils.h" +#include "utils/lsyscache.h" +#include "optimizer/internal.h" +#include "optimizer/planner.h" + +/* ---------------------------------------------------------------- + * ProcedureDefine + * ---------------------------------------------------------------- + */ +Oid +ProcedureCreate(char *procedureName, + bool returnsSet, + char *returnTypeName, + char *languageName, + char *prosrc, + char *probin, + bool canCache, + bool trusted, + int32 byte_pct, + int32 perbyte_cpu, + int32 percall_cpu, + int32 outin_ratio, + List *argList, + CommandDest dest) +{ + register i; + Relation rdesc; + HeapTuple tup; + bool defined; + uint16 parameterCount; + char nulls[ Natts_pg_proc ]; + Datum values[ Natts_pg_proc ]; + Oid languageObjectId; + Oid typeObjectId; + List *x; + QueryTreeList *querytree_list; + List *plan_list; + Oid typev[8]; + Oid relid; + Oid toid; + text *prosrctext; + TupleDesc tupDesc; + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(PointerIsValid(prosrc)); + Assert(PointerIsValid(probin)); + + parameterCount = 0; + memset(typev, 0, 8 * sizeof(Oid)); + foreach (x, argList) { + Value *t = lfirst(x); + + if (parameterCount == 8) + elog(WARN, "Procedures cannot take more than 8 arguments"); + + if (strcmp(strVal(t), "opaque") == 0) { + if (strcmp(languageName, "sql") == 0) { + elog(WARN, "ProcedureDefine: sql functions cannot take type \"opaque\""); + } + else + toid = 0; + } else { + toid = TypeGet(strVal(t), &defined); + + if (!OidIsValid(toid)) { + elog(WARN, "ProcedureCreate: arg type '%s' is not defined", + strVal(t)); + } + + if (!defined) { + elog(NOTICE, "ProcedureCreate: arg type '%s' is only a shell", + strVal(t)); + } + } + + typev[parameterCount++] = toid; + } + + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(procedureName), + UInt16GetDatum(parameterCount), + PointerGetDatum(typev), + 0); + + if (HeapTupleIsValid(tup)) + elog(WARN, "ProcedureCreate: procedure %s already exists with same arguments", + procedureName); + + if (!strcmp(languageName, "sql")) { + /* If this call is defining a set, check if the set is already + * defined by looking to see whether this call's function text + * matches a function already in pg_proc. If so just return the + * OID of the existing set. + */ + if (!strcmp(procedureName, GENERICSETNAME)) { + prosrctext = textin(prosrc); + tup = SearchSysCacheTuple(PROSRC, + PointerGetDatum(prosrctext), + 0,0,0); + if (HeapTupleIsValid(tup)) + return tup->t_oid; + } + } + + tup = SearchSysCacheTuple(LANNAME, + PointerGetDatum(languageName), + 0,0,0); + + if (!HeapTupleIsValid(tup)) + elog(WARN, "ProcedureCreate: no such language %s", + languageName); + + languageObjectId = tup->t_oid; + + if (strcmp(returnTypeName, "opaque") == 0) { + if (strcmp(languageName, "sql") == 0) { + elog(WARN, "ProcedureCreate: sql functions cannot return type \"opaque\""); + } + else + typeObjectId = 0; + } + + else { + typeObjectId = TypeGet(returnTypeName, &defined); + + if (!OidIsValid(typeObjectId)) { + elog(NOTICE, "ProcedureCreate: type '%s' is not yet defined", + returnTypeName); +#if 0 + elog(NOTICE, "ProcedureCreate: creating a shell for type '%s'", + returnTypeName); +#endif + typeObjectId = TypeShellMake(returnTypeName); + if (!OidIsValid(typeObjectId)) { + elog(WARN, "ProcedureCreate: could not create type '%s'", + returnTypeName); + } + } + + else if (!defined) { + elog(NOTICE, "ProcedureCreate: return type '%s' is only a shell", + returnTypeName); + } + } + + /* don't allow functions of complex types that have the same name as + existing attributes of the type */ + if (parameterCount == 1 && + (toid = TypeGet(strVal(lfirst(argList)), &defined)) && + defined && + (relid = typeid_get_relid(toid)) != 0 && + get_attnum(relid, procedureName) != InvalidAttrNumber) + elog(WARN, "method %s already an attribute of type %s", + procedureName, strVal(lfirst(argList))); + + + /* + * If this is a postquel procedure, we parse it here in order to + * be sure that it contains no syntax errors. We should store + * the plan in an Inversion file for use later, but for now, we + * just store the procedure's text in the prosrc attribute. + */ + + if (strcmp(languageName, "sql") == 0) { + plan_list = pg_plan(prosrc, typev, parameterCount, + &querytree_list, dest); + + /* typecheck return value */ + pg_checkretval(typeObjectId, querytree_list); + } + + for (i = 0; i < Natts_pg_proc; ++i) { + nulls[i] = ' '; + values[i] = (Datum)NULL; + } + + i = 0; + values[i++] = PointerGetDatum(procedureName); + values[i++] = Int32GetDatum(GetUserId()); + values[i++] = ObjectIdGetDatum(languageObjectId); + + /* XXX isinherited is always false for now */ + + values[i++] = Int8GetDatum((bool) 0); + + /* XXX istrusted is always false for now */ + + values[i++] = Int8GetDatum(trusted); + values[i++] = Int8GetDatum(canCache); + values[i++] = UInt16GetDatum(parameterCount); + values[i++] = Int8GetDatum(returnsSet); + values[i++] = ObjectIdGetDatum(typeObjectId); + + values[i++] = (Datum) typev; + /* + * The following assignments of constants are made. The real values + * will have to be extracted from the arglist someday soon. + */ + values[i++] = Int32GetDatum(byte_pct); /* probyte_pct */ + values[i++] = Int32GetDatum(perbyte_cpu); /* properbyte_cpu */ + values[i++] = Int32GetDatum(percall_cpu); /* propercall_cpu */ + values[i++] = Int32GetDatum(outin_ratio); /* prooutin_ratio */ + + values[i++] = (Datum)fmgr(TextInRegProcedure, prosrc); /* prosrc */ + values[i++] = (Datum)fmgr(TextInRegProcedure, probin); /* probin */ + + rdesc = heap_openr(ProcedureRelationName); + + tupDesc = rdesc->rd_att; + tup = heap_formtuple(tupDesc, + values, + nulls); + + heap_insert(rdesc, tup); + + if (RelationGetRelationTupleForm(rdesc)->relhasindex) + { + Relation idescs[Num_pg_proc_indices]; + + CatalogOpenIndices(Num_pg_proc_indices, Name_pg_proc_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_proc_indices, rdesc, tup); + CatalogCloseIndices(Num_pg_proc_indices, idescs); + } + heap_close(rdesc); + return tup->t_oid; +} + diff --git a/src/backend/catalog/pg_proc.h b/src/backend/catalog/pg_proc.h new file mode 100644 index 00000000000..f2828394192 --- /dev/null +++ b/src/backend/catalog/pg_proc.h @@ -0,0 +1,769 @@ +/*------------------------------------------------------------------------- + * + * pg_proc.h-- + * definition of the system "procedure" relation (pg_proc) + * along with the relation's initial contents. + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_proc.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * The script catalog/genbki.sh reads this file and generates .bki + * information from the DATA() statements. utils/Gen_fmgrtab.sh + * generates fmgr.h and fmgrtab.c the same way. + * + * XXX do NOT break up DATA() statements into multiple lines! + * the scripts are not as smart as you might think... + * XXX (eg. #if 0 #endif won't do what you think) + * + *------------------------------------------------------------------------- + */ +#ifndef PG_PROC_H +#define PG_PROC_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "nodes/pg_list.h" +#include "tcop/dest.h" + +/* ---------------- + * pg_proc definition. cpp turns this into + * typedef struct FormData_pg_proc + * ---------------- + */ +CATALOG(pg_proc) BOOTSTRAP { + NameData proname; + Oid proowner; + Oid prolang; + bool proisinh; + bool proistrusted; + bool proiscachable; + int2 pronargs; + bool proretset; + Oid prorettype; + oid8 proargtypes; + int4 probyte_pct; + int4 properbyte_cpu; + int4 propercall_cpu; + int4 prooutin_ratio; + text prosrc; /* VARIABLE LENGTH FIELD */ + bytea probin; /* VARIABLE LENGTH FIELD */ +} FormData_pg_proc; + +/* ---------------- + * Form_pg_proc corresponds to a pointer to a tuple with + * the format of pg_proc relation. + * ---------------- + */ +typedef FormData_pg_proc *Form_pg_proc; + +/* ---------------- + * compiler constants for pg_proc + * ---------------- + */ +#define Natts_pg_proc 16 +#define Anum_pg_proc_proname 1 +#define Anum_pg_proc_proowner 2 +#define Anum_pg_proc_prolang 3 +#define Anum_pg_proc_proisinh 4 +#define Anum_pg_proc_proistrusted 5 +#define Anum_pg_proc_proiscachable 6 +#define Anum_pg_proc_pronargs 7 +#define Anum_pg_proc_proretset 8 +#define Anum_pg_proc_prorettype 9 +#define Anum_pg_proc_proargtypes 10 +#define Anum_pg_proc_probyte_pct 11 +#define Anum_pg_proc_properbyte_cpu 12 +#define Anum_pg_proc_propercall_cpu 13 +#define Anum_pg_proc_prooutin_ratio 14 +#define Anum_pg_proc_prosrc 15 +#define Anum_pg_proc_probin 16 + +/* ---------------- + * initial contents of pg_proc + * ---------------- + */ + +/* keep the following ordered by OID so that later changes can be made easier*/ + +/* OIDS 1 - 99 */ +DATA(insert OID = 28 ( boolin PGUID 11 f t f 1 f 16 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 29 ( boolout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 30 ( byteain PGUID 11 f t f 1 f 17 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 31 ( byteaout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 32 ( charin PGUID 11 f t f 1 f 18 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 33 ( charout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 34 ( namein PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 35 ( nameout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 36 ( char16in PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 37 ( char16out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 38 ( int2in PGUID 11 f t f 1 f 21 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 39 ( int2out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 40 ( int28in PGUID 11 f t f 1 f 22 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 41 ( int28out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 42 ( int4in PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 43 ( int4out PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 44 ( regprocin PGUID 11 f t f 1 f 24 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 45 ( regprocout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 46 ( textin PGUID 11 f t f 1 f 25 "0" 100 0 0 100 foo bar )); +#define TextInRegProcedure 46 + +DATA(insert OID = 47 ( textout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 48 ( tidin PGUID 11 f t f 1 f 27 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 49 ( tidout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 50 ( xidin PGUID 11 f t f 1 f 28 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 51 ( xidout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 52 ( cidin PGUID 11 f t f 1 f 29 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 53 ( cidout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 54 ( oid8in PGUID 11 f t f 1 f 30 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 55 ( oid8out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 60 ( booleq PGUID 11 f t f 2 f 16 "16 16" 100 0 0 100 foo bar )); +DATA(insert OID = 61 ( chareq PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +#define CharacterEqualRegProcedure 61 + +DATA(insert OID = 62 ( nameeq PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +#define NameEqualRegProcedure 62 + +DATA(insert OID = 63 ( int2eq PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +#define Integer16EqualRegProcedure 63 + +DATA(insert OID = 64 ( int2lt PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 65 ( int4eq PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +#define Integer32EqualRegProcedure 65 + +DATA(insert OID = 66 ( int4lt PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 67 ( texteq PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); +#define TextEqualRegProcedure 67 + +DATA(insert OID = 68 ( xideq PGUID 11 f t f 2 f 16 "28 28" 100 0 0 100 foo bar )); +DATA(insert OID = 69 ( cideq PGUID 11 f t f 2 f 16 "29 29" 100 0 0 100 foo bar )); +DATA(insert OID = 70 ( charne PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 71 ( charlt PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 72 ( charle PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 73 ( chargt PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 74 ( charge PGUID 11 f t f 2 f 16 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 75 ( charpl PGUID 11 f t f 2 f 18 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 76 ( charmi PGUID 11 f t f 2 f 18 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 77 ( charmul PGUID 11 f t f 2 f 18 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 78 ( chardiv PGUID 11 f t f 2 f 18 "18 18" 100 0 0 100 foo bar )); + +DATA(insert OID = 79 ( nameregexeq PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 80 ( nameregexne PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 81 ( textregexeq PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 82 ( textregexne PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 83 ( textcat PGUID 11 f t f 2 f 25 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 84 ( boolne PGUID 11 f t f 2 f 16 "16 16" 100 0 0 100 foo bar )); + +DATA(insert OID = 97 ( rtsel PGUID 11 f t f 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); +DATA(insert OID = 98 ( rtnpage PGUID 11 f t f 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); +DATA(insert OID = 99 ( btreesel PGUID 11 f t f 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); + +/* OIDS 100 - 199 */ + +DATA(insert OID = 100 ( btreenpage PGUID 11 f t f 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); +DATA(insert OID = 101 ( eqsel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +#define EqualSelectivityProcedure 101 + +DATA(insert OID = 102 ( neqsel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +DATA(insert OID = 103 ( intltsel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +DATA(insert OID = 104 ( intgtsel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +DATA(insert OID = 105 ( eqjoinsel PGUID 11 f t f 5 f 701 "26 26 21 26 21" 100 0 0 100 foo bar )); +DATA(insert OID = 106 ( neqjoinsel PGUID 11 f t f 5 f 701 "26 26 21 26 21" 100 0 0 100 foo bar )); +DATA(insert OID = 107 ( intltjoinsel PGUID 11 f t f 5 f 701 "26 26 21 26 21" 100 0 0 100 foo bar )); +DATA(insert OID = 108 ( intgtjoinsel PGUID 11 f t f 5 f 701 "26 26 21 26 21" 100 0 0 100 foo bar )); + + + +DATA(insert OID = 117 ( point_in PGUID 11 f t f 1 f 600 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 118 ( point_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 119 ( lseg_in PGUID 11 f t f 1 f 601 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 120 ( lseg_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 121 ( path_in PGUID 11 f t f 1 f 602 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 122 ( path_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 123 ( box_in PGUID 11 f t f 1 f 603 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 124 ( box_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 125 ( box_overlap PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 126 ( box_ge PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 127 ( box_gt PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 128 ( box_eq PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 129 ( box_lt PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 130 ( box_le PGUID 11 f t f 2 f 16 "603 603" 100 1 0 100 foo bar )); +DATA(insert OID = 131 ( point_above PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 132 ( point_left PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 133 ( point_right PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 134 ( point_below PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 135 ( point_eq PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 136 ( on_pb PGUID 11 f t f 2 f 16 "600 603" 100 0 0 100 foo bar )); +DATA(insert OID = 137 ( on_ppath PGUID 11 f t f 2 f 16 "600 602" 100 0 1 0 foo bar )); +DATA(insert OID = 138 ( box_center PGUID 11 f t f 1 f 600 "603" 100 1 0 100 foo bar )); +DATA(insert OID = 139 ( areasel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +DATA(insert OID = 140 ( areajoinsel PGUID 11 f t f 5 f 701 "26 26 21 0 23" 100 0 0 100 foo bar )); +DATA(insert OID = 141 ( int4mul PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 142 ( int4fac PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 143 ( pointdist PGUID 11 f t f 2 f 23 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 144 ( int4ne PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 145 ( int2ne PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 146 ( int2gt PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 147 ( int4gt PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 148 ( int2le PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 149 ( int4le PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 150 ( int4ge PGUID 11 f t f 2 f 16 "23 23" 100 0 0 100 foo bar )); +#define INT4GE_PROC_OID 150 +DATA(insert OID = 151 ( int2ge PGUID 11 f t f 2 f 16 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 152 ( int2mul PGUID 11 f t f 2 f 21 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 153 ( int2div PGUID 11 f t f 2 f 21 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 154 ( int4div PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 155 ( int2mod PGUID 11 f t f 2 f 21 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 156 ( int4mod PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 157 ( textne PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); +DATA(insert OID = 158 ( int24eq PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 159 ( int42eq PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 160 ( int24lt PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 161 ( int42lt PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 162 ( int24gt PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 163 ( int42gt PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 164 ( int24ne PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 165 ( int42ne PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 166 ( int24le PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 167 ( int42le PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 168 ( int24ge PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 169 ( int42ge PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 170 ( int24mul PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 171 ( int42mul PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 172 ( int24div PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 173 ( int42div PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 174 ( int24mod PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 175 ( int42mod PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 176 ( int2pl PGUID 11 f t f 2 f 21 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 177 ( int4pl PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 178 ( int24pl PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 179 ( int42pl PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 180 ( int2mi PGUID 11 f t f 2 f 21 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 181 ( int4mi PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 182 ( int24mi PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 183 ( int42mi PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 184 ( oideq PGUID 11 f t f 2 f 16 "26 26" 100 0 0 100 foo bar )); +#define ObjectIdEqualRegProcedure 184 + +DATA(insert OID = 185 ( oidne PGUID 11 f t f 2 f 16 "26 26" 100 0 0 100 foo bar )); +DATA(insert OID = 186 ( box_same PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 187 ( box_contain PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 188 ( box_left PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 189 ( box_overleft PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 190 ( box_overright PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 191 ( box_right PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 192 ( box_contained PGUID 11 f t f 2 f 16 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 193 ( rt_box_union PGUID 11 f t f 2 f 603 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 194 ( rt_box_inter PGUID 11 f t f 2 f 603 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 195 ( rt_box_size PGUID 11 f t f 2 f 700 "603 700" 100 0 0 100 foo bar )); +DATA(insert OID = 196 ( rt_bigbox_size PGUID 11 f t f 2 f 700 "603 700" 100 0 0 100 foo bar )); +DATA(insert OID = 197 ( rt_poly_union PGUID 11 f t f 2 f 604 "604 604" 100 0 0 100 foo bar )); +DATA(insert OID = 198 ( rt_poly_inter PGUID 11 f t f 2 f 604 "604 604" 100 0 0 100 foo bar )); +DATA(insert OID = 199 ( rt_poly_size PGUID 11 f t f 2 f 23 "604 23" 100 0 0 100 foo bar )); + +/* OIDS 200 - 299 */ + +DATA(insert OID = 200 ( float4in PGUID 11 f t f 1 f 700 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 201 ( float4out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 202 ( float4mul PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 203 ( float4div PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 204 ( float4pl PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 205 ( float4mi PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 206 ( float4um PGUID 11 f t f 1 f 700 "700" 100 0 0 100 foo bar )); +DATA(insert OID = 207 ( float4abs PGUID 11 f t f 1 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 208 ( float4inc PGUID 11 f t f 1 f 700 "700" 100 0 0 100 foo bar )); +DATA(insert OID = 209 ( float4larger PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 211 ( float4smaller PGUID 11 f t f 2 f 700 "700 700" 100 0 0 100 foo bar )); + +DATA(insert OID = 212 ( int4um PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 213 ( int2um PGUID 11 f t f 1 f 21 "21" 100 0 0 100 foo bar )); + +DATA(insert OID = 214 ( float8in PGUID 11 f t f 1 f 701 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 215 ( float8out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 216 ( float8mul PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 217 ( float8div PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 218 ( float8pl PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 219 ( float8mi PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 220 ( float8um PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 221 ( float8abs PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 222 ( float8inc PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 223 ( float8larger PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 224 ( float8smaller PGUID 11 f t f 2 f 701 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 228 ( dround PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 229 ( dtrunc PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 230 ( dsqrt PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 231 ( dcbrt PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 232 ( dpow PGUID 11 f t f 2 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 233 ( dexp PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 234 ( dlog1 PGUID 11 f t f 1 f 701 "701" 100 0 0 100 foo bar )); + +DATA(insert OID = 240 ( nabstimein PGUID 11 f t f 1 f 702 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 241 ( nabstimeout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 242 ( reltimein PGUID 11 f t f 1 f 703 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 243 ( reltimeout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 244 ( timepl PGUID 11 f t f 2 f 702 "702 703" 100 0 0 100 foo bar )); +DATA(insert OID = 245 ( timemi PGUID 11 f t f 2 f 702 "702 703" 100 0 0 100 foo bar )); +DATA(insert OID = 246 ( tintervalin PGUID 11 f t f 1 f 704 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 247 ( tintervalout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 248 ( ininterval PGUID 11 f t f 2 f 16 "702 704" 100 0 0 100 foo bar )); +DATA(insert OID = 249 ( intervalrel PGUID 11 f t f 1 f 703 "704" 100 0 0 100 foo bar )); +DATA(insert OID = 250 ( timenow PGUID 11 f t f 0 f 702 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 251 ( abstimeeq PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 252 ( abstimene PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 253 ( abstimelt PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 254 ( abstimegt PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 255 ( abstimele PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 256 ( abstimege PGUID 11 f t f 2 f 16 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 257 ( reltimeeq PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 258 ( reltimene PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 259 ( reltimelt PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 260 ( reltimegt PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 261 ( reltimele PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 262 ( reltimege PGUID 11 f t f 2 f 16 "703 703" 100 0 0 100 foo bar )); +DATA(insert OID = 263 ( intervaleq PGUID 11 f t f 2 f 16 "704 704" 100 0 0 100 foo bar )); +DATA(insert OID = 264 ( intervalct PGUID 11 f t f 2 f 16 "704 704" 100 0 0 100 foo bar )); +DATA(insert OID = 265 ( intervalov PGUID 11 f t f 2 f 16 "704 704" 100 0 0 100 foo bar )); +DATA(insert OID = 266 ( intervalleneq PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 267 ( intervallenne PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 268 ( intervallenlt PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 269 ( intervallengt PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 270 ( intervallenle PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 271 ( intervallenge PGUID 11 f t f 2 f 16 "704 703" 100 0 0 100 foo bar )); +DATA(insert OID = 272 ( intervalstart PGUID 11 f t f 1 f 702 "704" 100 0 0 100 foo bar )); +DATA(insert OID = 273 ( intervalend PGUID 11 f t f 1 f 702 "704" 100 0 0 100 foo bar )); +DATA(insert OID = 274 ( timeofday PGUID 11 f t f 0 f 25 "0" 100 0 0 100 foo bar )); + +DATA(insert OID = 276 ( int2fac PGUID 11 f t f 1 f 21 "21" 100 0 0 100 foo bar )); +DATA(insert OID = 279 ( float48mul PGUID 11 f t f 2 f 701 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 280 ( float48div PGUID 11 f t f 2 f 701 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 281 ( float48pl PGUID 11 f t f 2 f 701 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 282 ( float48mi PGUID 11 f t f 2 f 701 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 283 ( float84mul PGUID 11 f t f 2 f 701 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 284 ( float84div PGUID 11 f t f 2 f 701 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 285 ( float84pl PGUID 11 f t f 2 f 701 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 286 ( float84mi PGUID 11 f t f 2 f 701 "701 700" 100 0 0 100 foo bar )); + +DATA(insert OID = 287 ( float4eq PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 288 ( float4ne PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 289 ( float4lt PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 290 ( float4le PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 291 ( float4gt PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 292 ( float4ge PGUID 11 f t f 2 f 16 "700 700" 100 0 0 100 foo bar )); + +DATA(insert OID = 293 ( float8eq PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 294 ( float8ne PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 295 ( float8lt PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 296 ( float8le PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 297 ( float8gt PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 298 ( float8ge PGUID 11 f t f 2 f 16 "701 701" 100 0 0 100 foo bar )); + +DATA(insert OID = 299 ( float48eq PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); + +/* OIDS 300 - 399 */ + +DATA(insert OID = 300 ( float48ne PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 301 ( float48lt PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 302 ( float48le PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 303 ( float48gt PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 304 ( float48ge PGUID 11 f t f 2 f 16 "700 701" 100 0 0 100 foo bar )); +DATA(insert OID = 305 ( float84eq PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 306 ( float84ne PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 307 ( float84lt PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 308 ( float84le PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 309 ( float84gt PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); +DATA(insert OID = 310 ( float84ge PGUID 11 f t f 2 f 16 "701 700" 100 0 0 100 foo bar )); + +DATA(insert OID = 311 ( ftod PGUID 11 f t f 2 f 701 "700" 100 0 0 100 foo bar )); +DATA(insert OID = 312 ( dtof PGUID 11 f t f 2 f 700 "701" 100 0 0 100 foo bar )); +DATA(insert OID = 313 ( i2toi4 PGUID 11 f t f 2 f 23 "21" 100 0 0 100 foo bar )); +DATA(insert OID = 314 ( i4toi2 PGUID 11 f t f 2 f 21 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 315 ( keyfirsteq PGUID 11 f t f 2 f 16 "0 21" 100 0 0 100 foo bar )); + +DATA(insert OID = 320 ( rtinsert PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 321 ( rtdelete PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 322 ( rtgettuple PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 323 ( rtbuild PGUID 11 f t f 9 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 324 ( rtbeginscan PGUID 11 f t f 4 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 325 ( rtendscan PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 326 ( rtmarkpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 327 ( rtrestrpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 328 ( rtrescan PGUID 11 f t f 3 f 23 "0" 100 0 0 100 foo bar )); + +DATA(insert OID = 330 ( btgettuple PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 331 ( btinsert PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 332 ( btdelete PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 333 ( btbeginscan PGUID 11 f t f 4 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 334 ( btrescan PGUID 11 f t f 3 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 335 ( btendscan PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 336 ( btmarkpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 337 ( btrestrpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 338 ( btbuild PGUID 11 f t f 9 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 339 ( poly_same PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 340 ( poly_contain PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 341 ( poly_left PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 342 ( poly_overleft PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 343 ( poly_overright PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 344 ( poly_right PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 345 ( poly_contained PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 346 ( poly_overlap PGUID 11 f t f 2 f 16 "604 604" 100 0 1 0 foo bar )); +DATA(insert OID = 347 ( poly_in PGUID 11 f t f 1 f 604 "0" 100 0 1 0 foo bar )); +DATA(insert OID = 348 ( poly_out PGUID 11 f t f 1 f 23 "0" 100 0 1 0 foo bar )); + +DATA(insert OID = 350 ( btint2cmp PGUID 11 f t f 2 f 23 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 351 ( btint4cmp PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 352 ( btint42cmp PGUID 11 f t f 2 f 23 "23 21" 100 0 0 100 foo bar )); +DATA(insert OID = 353 ( btint24cmp PGUID 11 f t f 2 f 23 "21 23" 100 0 0 100 foo bar )); +DATA(insert OID = 354 ( btfloat4cmp PGUID 11 f t f 2 f 23 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 355 ( btfloat8cmp PGUID 11 f t f 2 f 23 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 356 ( btoidcmp PGUID 11 f t f 2 f 23 "26 26" 100 0 0 100 foo bar )); +DATA(insert OID = 357 ( btabstimecmp PGUID 11 f t f 2 f 23 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 358 ( btcharcmp PGUID 11 f t f 2 f 23 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 359 ( btnamecmp PGUID 11 f t f 2 f 23 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 360 ( bttextcmp PGUID 11 f t f 2 f 23 "25 25" 100 0 0 100 foo bar )); + +DATA(insert OID = 361 ( lseg_distance PGUID 11 f t f 2 f 701 "601 601" 100 0 0 100 foo bar )); +DATA(insert OID = 362 ( lseg_interpt PGUID 11 f t f 2 f 600 "601 601" 100 0 0 100 foo bar )); +DATA(insert OID = 363 ( dist_ps PGUID 11 f t f 2 f 701 "600 601" 100 0 0 100 foo bar )); +DATA(insert OID = 364 ( dist_pb PGUID 11 f t f 2 f 701 "600 603" 100 0 0 100 foo bar )); +DATA(insert OID = 365 ( dist_sb PGUID 11 f t f 2 f 701 "601 603" 100 0 0 100 foo bar )); +DATA(insert OID = 366 ( close_ps PGUID 11 f t f 2 f 600 "600 601" 100 0 0 100 foo bar )); +DATA(insert OID = 367 ( close_pb PGUID 11 f t f 2 f 600 "600 603" 100 0 0 100 foo bar )); +DATA(insert OID = 368 ( close_sb PGUID 11 f t f 2 f 600 "601 603" 100 0 0 100 foo bar )); +DATA(insert OID = 369 ( on_ps PGUID 11 f t f 2 f 16 "600 601" 100 0 0 100 foo bar )); +DATA(insert OID = 370 ( path_distance PGUID 11 f t f 2 f 701 "602 602" 100 0 1 0 foo bar )); +DATA(insert OID = 371 ( dist_ppth PGUID 11 f t f 2 f 701 "600 602" 100 0 1 0 foo bar )); +DATA(insert OID = 372 ( on_sb PGUID 11 f t f 2 f 16 "601 603" 100 0 0 100 foo bar )); +DATA(insert OID = 373 ( inter_sb PGUID 11 f t f 2 f 16 "601 603" 100 0 0 100 foo bar )); +DATA(insert OID = 374 ( btchar16cmp PGUID 11 f t f 2 f 23 "19 19" 100 0 0 100 foo bar )); + +/* OIDS 400 - 499 */ + +DATA(insert OID = 438 ( hashsel PGUID 11 f t t 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); +DATA(insert OID = 439 ( hashnpage PGUID 11 f t t 7 f 701 "26 26 21 0 23 23 26" 100 0 0 100 foo bar )); + +DATA(insert OID = 440 ( hashgettuple PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 441 ( hashinsert PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 442 ( hashdelete PGUID 11 f t f 2 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 443 ( hashbeginscan PGUID 11 f t f 4 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 444 ( hashrescan PGUID 11 f t f 3 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 445 ( hashendscan PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 446 ( hashmarkpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 447 ( hashrestrpos PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 448 ( hashbuild PGUID 11 f t f 9 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 449 ( hashint2 PGUID 11 f t f 2 f 23 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 450 ( hashint4 PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 451 ( hashfloat4 PGUID 11 f t f 2 f 23 "700 700" 100 0 0 100 foo bar )); +DATA(insert OID = 452 ( hashfloat8 PGUID 11 f t f 2 f 23 "701 701" 100 0 0 100 foo bar )); +DATA(insert OID = 453 ( hashoid PGUID 11 f t f 2 f 23 "26 26" 100 0 0 100 foo bar )); +DATA(insert OID = 454 ( hashchar PGUID 11 f t f 2 f 23 "18 18" 100 0 0 100 foo bar )); +DATA(insert OID = 455 ( hashname PGUID 11 f t f 2 f 23 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 456 ( hashtext PGUID 11 f t f 2 f 23 "25 25" 100 0 0 100 foo bar )); +DATA(insert OID = 466 ( char2in PGUID 11 f t f 1 f 409 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 467 ( char4in PGUID 11 f t f 1 f 410 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 468 ( char8in PGUID 11 f t f 1 f 411 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 469 ( char2out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 470 ( char4out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 471 ( char8out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 472 ( char2eq PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 473 ( char4eq PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 474 ( char8eq PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 475 ( char2lt PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 476 ( char4lt PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 477 ( char8lt PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 478 ( char2le PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 479 ( char4le PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 480 ( char8le PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 481 ( char2gt PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 482 ( char4gt PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 483 ( char8gt PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 484 ( char2ge PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 490 ( char16eq PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +#define Character16EqualRegProcedure 490 +DATA(insert OID = 492 ( char16lt PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 493 ( char16le PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 494 ( char16gt PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 495 ( char16ge PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 496 ( char16ne PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); + +DATA(insert OID = 499 ( hashchar16 PGUID 11 f t f 2 f 23 "19 19" 100 0 0 100 foo bar )); + +/* OIDS 500 - 599 */ + +/* OIDS 600 - 699 */ + +DATA(insert OID = 650 ( int4notin PGUID 11 f t f 2 f 16 "21 0" 100 0 0 100 foo bar )); +DATA(insert OID = 651 ( oidnotin PGUID 11 f t f 2 f 16 "26 0" 100 0 0 100 foo bar )); +DATA(insert OID = 652 ( int44in PGUID 11 f t f 1 f 22 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 653 ( int44out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 655 ( namelt PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 656 ( namele PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 657 ( namegt PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 658 ( namege PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 659 ( namene PGUID 11 f t f 2 f 16 "19 19" 100 0 0 100 foo bar )); +DATA(insert OID = 682 ( mktinterval PGUID 11 f t f 2 f 704 "702 702" 100 0 0 100 foo bar )); +DATA(insert OID = 683 ( oid8eq PGUID 11 f t f 2 f 16 "30 30" 100 0 0 100 foo bar )); +DATA(insert OID = 684 ( char4ge PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 685 ( char8ge PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 686 ( char2ne PGUID 11 f t f 2 f 16 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 687 ( char4ne PGUID 11 f t f 2 f 16 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 688 ( char8ne PGUID 11 f t f 2 f 16 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 689 ( btchar2cmp PGUID 11 f t f 2 f 23 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 690 ( btchar4cmp PGUID 11 f t f 2 f 23 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 691 ( btchar8cmp PGUID 11 f t f 2 f 23 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 692 ( hashchar2 PGUID 11 f t f 2 f 23 "409 409" 100 0 0 100 foo bar )); +DATA(insert OID = 693 ( hashchar4 PGUID 11 f t f 2 f 23 "410 410" 100 0 0 100 foo bar )); +DATA(insert OID = 694 ( hashchar8 PGUID 11 f t f 2 f 23 "411 411" 100 0 0 100 foo bar )); +DATA(insert OID = 695 ( char8regexeq PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 696 ( char8regexne PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 699 ( char2regexeq PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); + +/* OIDS 700 - 799 */ +DATA(insert OID = 700 ( char16regexeq PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 701 ( char16regexne PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); + +DATA(insert OID = 710 ( GetPgUserName PGUID 11 f t f 0 f 19 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 711 ( userfntest PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 713 ( oidrand PGUID 11 f t f 2 f 16 "26 23" 100 0 0 100 foo bar )); +DATA(insert OID = 715 ( oidsrand PGUID 11 f t f 1 f 16 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 716 ( oideqint4 PGUID 11 f t f 2 f 16 "26 23" 100 0 0 100 foo bar )); +DATA(insert OID = 717 ( int4eqoid PGUID 11 f t f 2 f 16 "23 26" 100 0 0 100 foo bar )); + + +DATA(insert OID = 720 ( byteaGetSize PGUID 11 f t f 1 f 23 "17" 100 0 0 100 foo bar )); +DATA(insert OID = 721 ( byteaGetByte PGUID 11 f t f 2 f 23 "17 23" 100 0 0 100 foo bar )); +DATA(insert OID = 722 ( byteaSetByte PGUID 11 f t f 3 f 17 "17 23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 723 ( byteaGetBit PGUID 11 f t f 2 f 23 "17 23" 100 0 0 100 foo bar )); +DATA(insert OID = 724 ( byteaSetBit PGUID 11 f t f 3 f 17 "17 23 23" 100 0 0 100 foo bar )); + +DATA(insert OID = 730 ( pqtest PGUID 11 f t f 1 f 23 "25" 100 0 0 100 foo bar )); + +DATA(insert OID = 740 ( text_lt PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); +DATA(insert OID = 741 ( text_le PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); +DATA(insert OID = 742 ( text_gt PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); +DATA(insert OID = 743 ( text_ge PGUID 11 f t f 2 f 16 "25 25" 100 0 0 0 foo bar )); + +DATA(insert OID = 744 ( array_eq PGUID 11 f t f 2 f 16 "0 0" 100 0 0 100 foo bar)); +DATA(insert OID = 745 ( array_assgn PGUID 11 f t f 8 f 23 "0 23 0 0 0 23 23 0" 100 0 0 100 foo bar)); +DATA(insert OID = 746 ( array_clip PGUID 11 f t f 7 f 23 "0 23 0 0 23 23 0" 100 0 0 100 foo bar)); +DATA(insert OID = 747 ( array_dims PGUID 11 f t f 1 f 25 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 748 ( array_set PGUID 11 f t f 8 f 23 "0 23 0 0 23 23 23 0" 100 0 0 100 foo bar)); +DATA(insert OID = 749 ( array_ref PGUID 11 f t f 7 f 23 "0 23 0 23 23 23 0" 100 0 0 100 foo bar)); +DATA(insert OID = 750 ( array_in PGUID 11 f t f 2 f 23 "0 0" 100 0 0 100 foo bar )); +DATA(insert OID = 751 ( array_out PGUID 11 f t f 2 f 23 "0 0" 100 0 0 100 foo bar )); + +DATA(insert OID = 752 ( filename_in PGUID 11 f t f 2 f 605 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 753 ( filename_out PGUID 11 f t f 2 f 19 "0" 100 0 0 100 foo bar )); + +DATA(insert OID = 760 ( smgrin PGUID 11 f t f 1 f 210 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 761 ( smgrout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 762 ( smgreq PGUID 11 f t f 2 f 16 "210 210" 100 0 0 100 foo bar )); +DATA(insert OID = 763 ( smgrne PGUID 11 f t f 2 f 16 "210 210" 100 0 0 100 foo bar )); + +DATA(insert OID = 764 ( lo_import PGUID 11 f t f 1 f 26 "25" 100 0 0 100 foo bar )); +DATA(insert OID = 765 ( lo_export PGUID 11 f t f 2 f 23 "26 25" 100 0 0 100 foo bar )); + +DATA(insert OID = 766 ( int4inc PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 767 ( int2inc PGUID 11 f t f 1 f 21 "21" 100 0 0 100 foo bar )); +DATA(insert OID = 768 ( int4larger PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 769 ( int4smaller PGUID 11 f t f 2 f 23 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 770 ( int2larger PGUID 11 f t f 2 f 23 "21 21" 100 0 0 100 foo bar )); +DATA(insert OID = 771 ( int2smaller PGUID 11 f t f 2 f 23 "21 21" 100 0 0 100 foo bar )); + +/* OIDS 800 - 899 */ +DATA(insert OID = 820 ( oidint2in PGUID 11 f t f 1 f 810 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 821 ( oidint2out PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 822 ( oidint2lt PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 823 ( oidint2le PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 824 ( oidint2eq PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); + +#define OidInt2EqRegProcedure 824 + +DATA(insert OID = 825 ( oidint2ge PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 826 ( oidint2gt PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 827 ( oidint2ne PGUID 11 f t f 2 f 16 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 828 ( oidint2cmp PGUID 11 f t f 2 f 21 "810 810" 100 0 0 100 foo bar)); +DATA(insert OID = 829 ( mkoidint2 PGUID 11 f t f 2 f 810 "26 21" 100 0 0 100 foo bar)); + +DATA(insert OID = 837 ( char2regexne PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); +DATA(insert OID = 836 ( char4regexeq PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); +DATA(insert OID = 838 ( char4regexne PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); + +DATA(insert OID = 850 ( textlike PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 851 ( textnlike PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 852 ( char2like PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); +DATA(insert OID = 853 ( char2nlike PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); +DATA(insert OID = 854 ( char4like PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); +DATA(insert OID = 855 ( char4nlike PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); +DATA(insert OID = 856 ( char8like PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 857 ( char8nlike PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 858 ( namelike PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 859 ( namenlike PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 860 ( char16like PGUID 11 f t f 2 f 16 "20 25" 100 0 0 100 foo bar )); +DATA(insert OID = 861 ( char16nlike PGUID 11 f t f 2 f 16 "20 25" 100 0 0 100 foo bar )); + +/* OIDS 900 - 999 */ + +DATA(insert OID = 920 ( oidint4in PGUID 11 f t f 1 f 910 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 921 ( oidint4out PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 922 ( oidint4lt PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 923 ( oidint4le PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 924 ( oidint4eq PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); + +#define OidInt4EqRegProcedure 924 + +DATA(insert OID = 925 ( oidint4ge PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 926 ( oidint4gt PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 927 ( oidint4ne PGUID 11 f t f 2 f 16 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 928 ( oidint4cmp PGUID 11 f t f 2 f 23 "910 910" 100 0 0 100 foo bar)); +DATA(insert OID = 929 ( mkoidint4 PGUID 11 f t f 2 f 910 "26 23" 100 0 0 100 foo bar)); + +DATA(insert OID = 940 ( oidnamein PGUID 11 f t f 1 f 911 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 941 ( oidnameout PGUID 11 f t f 1 f 19 "0" 100 0 0 100 foo bar)); +DATA(insert OID = 942 ( oidnamelt PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 943 ( oidnamele PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 944 ( oidnameeq PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); + +#define OidNameEqRegProcedure 944 + +DATA(insert OID = 945 ( oidnamege PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 946 ( oidnamegt PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 947 ( oidnamene PGUID 11 f t f 2 f 16 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 948 ( oidnamecmp PGUID 11 f t f 2 f 23 "911 911" 100 0 0 100 foo bar)); +DATA(insert OID = 949 ( mkoidname PGUID 11 f t f 2 f 911 "26 19" 100 0 0 100 foo bar)); + +DATA(insert OID = 952 ( lo_open PGUID 11 f t f 2 f 23 "26 23" 100 0 0 100 foo bar )); +DATA(insert OID = 953 ( lo_close PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 954 ( LOread PGUID 11 f t f 2 f 17 "23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 955 ( LOwrite PGUID 11 f t f 2 f 23 "23 17" 100 0 0 100 foo bar )); +DATA(insert OID = 956 ( lo_lseek PGUID 11 f t f 3 f 23 "23 23 23" 100 0 0 100 foo bar )); +DATA(insert OID = 957 ( lo_creat PGUID 11 f t f 1 f 26 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 958 ( lo_tell PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); +DATA(insert OID = 964 ( lo_unlink PGUID 11 f t f 1 f 23 "23" 100 0 0 100 foo bar )); + +DATA(insert OID = 972 ( RegprocToOid PGUID 11 f t f 1 f 26 "24" 100 0 0 100 foo bar )); + +DATA(insert OID = 973 ( path_inter PGUID 11 f t f 2 f 16 "602 602" 100 0 10 100 foo bar )); +DATA(insert OID = 974 ( box_copy PGUID 11 f t f 1 f 603 "603" 100 0 0 100 foo bar )); +DATA(insert OID = 975 ( box_area PGUID 11 f t f 1 f 701 "603" 100 0 0 100 foo bar )); +DATA(insert OID = 976 ( box_length PGUID 11 f t f 1 f 701 "603" 100 0 0 100 foo bar )); +DATA(insert OID = 977 ( box_height PGUID 11 f t f 1 f 701 "603" 100 0 0 100 foo bar )); +DATA(insert OID = 978 ( box_distance PGUID 11 f t f 2 f 701 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 980 ( box_intersect PGUID 11 f t f 2 f 603 "603 603" 100 0 0 100 foo bar )); +DATA(insert OID = 981 ( box_diagonal PGUID 11 f t f 1 f 601 "603" 100 0 0 100 foo bar )); +DATA(insert OID = 982 ( path_n_lt PGUID 11 f t f 2 f 16 "602 602" 100 0 0 100 foo bar )); +DATA(insert OID = 983 ( path_n_gt PGUID 11 f t f 2 f 16 "602 602" 100 0 0 100 foo bar )); +DATA(insert OID = 984 ( path_n_eq PGUID 11 f t f 2 f 16 "602 602" 100 0 0 100 foo bar )); +DATA(insert OID = 985 ( path_n_le PGUID 11 f t f 2 f 16 "602 602" 100 0 0 100 foo bar )); +DATA(insert OID = 986 ( path_n_ge PGUID 11 f t f 2 f 16 "602 602" 100 0 0 100 foo bar )); +DATA(insert OID = 987 ( path_length PGUID 11 f t f 1 f 701 "602" 100 0 1 0 foo bar )); +DATA(insert OID = 988 ( point_copy PGUID 11 f t f 1 f 600 "600" 100 0 0 100 foo bar )); +DATA(insert OID = 989 ( point_vert PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 990 ( point_horiz PGUID 11 f t f 2 f 16 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 991 ( point_distance PGUID 11 f t f 2 f 701 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 992 ( point_slope PGUID 11 f t f 2 f 701 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 993 ( lseg_construct PGUID 11 f t f 2 f 601 "600 600" 100 0 0 100 foo bar )); +DATA(insert OID = 994 ( lseg_intersect PGUID 11 f t f 2 f 16 "601 601" 100 0 0 100 foo bar )); +DATA(insert OID = 995 ( lseg_parallel PGUID 11 f t f 2 f 16 "601 601" 100 0 0 100 foo bar )); +DATA(insert OID = 996 ( lseg_perp PGUID 11 f t f 2 f 16 "601 601" 100 0 0 100 foo bar )); +DATA(insert OID = 997 ( lseg_vertical PGUID 11 f t f 1 f 16 "601" 100 0 0 100 foo bar )); +DATA(insert OID = 998 ( lseg_horizontal PGUID 11 f t f 1 f 16 "601" 100 0 0 100 foo bar )); +DATA(insert OID = 999 ( lseg_eq PGUID 11 f t f 2 f 16 "601 601" 100 0 0 100 foo bar )); + +/* OIDS 1000 - 1999 */ + +DATA(insert OID = 1029 ( NullValue PGUID 11 f t f 1 f 16 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1030 ( NonNullValue PGUID 11 f t f 1 f 16 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1031 ( aclitemin PGUID 11 f t f 1 f 1033 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1032 ( aclitemout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1035 ( aclinsert PGUID 11 f t f 2 f 1034 "1034 1033" 100 0 0 100 foo bar )); +DATA(insert OID = 1036 ( aclremove PGUID 11 f t f 2 f 1034 "1034 1033" 100 0 0 100 foo bar )); +DATA(insert OID = 1037 ( aclcontains PGUID 11 f t f 2 f 16 "1034 1033" 100 0 0 100 foo bar )); +DATA(insert OID = 1038 ( seteval PGUID 11 f t f 1 f 23 "26" 100 0 0 100 foo bar )); +#define SetEvalRegProcedure 1038 + +DATA(insert OID = 1044 ( bpcharin PGUID 11 f t f 3 f 1042 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1045 ( bpcharout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1046 ( varcharin PGUID 11 f t f 3 f 1043 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1047 ( varcharout PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1048 ( bpchareq PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1049 ( bpcharlt PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1050 ( bpcharle PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1051 ( bpchargt PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1052 ( bpcharge PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1053 ( bpcharne PGUID 11 f t f 2 f 16 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1070 ( varchareq PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1071 ( varcharlt PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1072 ( varcharle PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1073 ( varchargt PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1074 ( varcharge PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1075 ( varcharne PGUID 11 f t f 2 f 16 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1078 ( bpcharcmp PGUID 11 f t f 2 f 23 "1042 1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1079 ( varcharcmp PGUID 11 f t f 2 f 23 "1043 1043" 100 0 0 100 foo bar )); +DATA(insert OID = 1080 ( hashbpchar PGUID 11 f t f 1 f 23 "1042" 100 0 0 100 foo bar )); +DATA(insert OID = 1081 ( hashvarchar PGUID 11 f t f 1 f 23 "1043" 100 0 0 100 foo bar )); + +DATA(insert OID = 1084 ( date_in PGUID 11 f t f 1 f 1082 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1085 ( date_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1086 ( date_eq PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1087 ( date_lt PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1088 ( date_le PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1089 ( date_gt PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1090 ( date_ge PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1091 ( date_ne PGUID 11 f t f 2 f 16 "1082 1082" 100 0 0 100 foo bar )); +DATA(insert OID = 1092 ( date_cmp PGUID 11 f t f 2 f 23 "1082 1082" 100 0 0 100 foo bar )); + +DATA(insert OID = 1099 ( time_in PGUID 11 f t f 1 f 1083 "0" 100 0 0 100 foo bar )); + +/* OIDS 1100 - 1199 */ +DATA(insert OID = 1100 ( time_out PGUID 11 f t f 1 f 23 "0" 100 0 0 100 foo bar )); +DATA(insert OID = 1101 ( time_eq PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1102 ( time_lt PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1103 ( time_le PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1104 ( time_gt PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1105 ( time_ge PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1106 ( time_ne PGUID 11 f t f 2 f 16 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1107 ( time_cmp PGUID 11 f t f 2 f 23 "1083 1083" 100 0 0 100 foo bar )); +DATA(insert OID = 1200 ( int42reltime PGUID 11 f t f 1 f 703 "21" 100 0 0 100 foo bar )); + +DATA(insert OID = 1230 ( char2icregexeq PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1231 ( char2icregexne PGUID 11 f t f 2 f 16 "409 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1232 ( char4icregexeq PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1233 ( char4icregexne PGUID 11 f t f 2 f 16 "410 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1234 ( char8icregexeq PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1235 ( char8icregexne PGUID 11 f t f 2 f 16 "411 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1236 ( char16icregexeq PGUID 11 f t f 2 f 16 "20 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1237 ( char16icregexne PGUID 11 f t f 2 f 16 "20 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1238 ( texticregexeq PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 1239 ( texticregexne PGUID 11 f t f 2 f 16 "25 25" 100 0 1 0 foo bar )); +DATA(insert OID = 1240 ( nameicregexeq PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); +DATA(insert OID = 1241 ( nameicregexne PGUID 11 f t f 2 f 16 "19 25" 100 0 0 100 foo bar )); + + +#include "nodes/pg_list.h" + +/* + * prototypes for functions pg_proc.c + */ +extern Oid ProcedureCreate(char* procedureName, + bool returnsSet, + char *returnTypeName, + char *languageName, + char *prosrc, + char *probin, + bool canCache, + bool trusted, + int32 byte_pct, + int32 perbyte_cpu, + int32 percall_cpu, + int32 outin_ratio, + List *argList, + CommandDest dest); + + +#endif /* PG_PROC_H */ diff --git a/src/backend/catalog/pg_rewrite.h b/src/backend/catalog/pg_rewrite.h new file mode 100644 index 00000000000..9f200746274 --- /dev/null +++ b/src/backend/catalog/pg_rewrite.h @@ -0,0 +1,64 @@ +/*------------------------------------------------------------------------- + * + * pg_rewrite.h-- + * definition of the system "rewrite-rule" relation (pg_rewrite) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_rewrite.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_REWRITE_H +#define PG_REWRITE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_rewrite definition. cpp turns this into + * typedef struct FormData_pg_rewrite + * ---------------- + */ +CATALOG(pg_rewrite) { + NameData rulename; + char ev_type; + Oid ev_class; + int2 ev_attr; + bool is_instead; + text ev_qual; /* VARLENA */ + text action; /* VARLENA */ +} FormData_pg_rewrite; + +/* ---------------- + * Form_pg_rewrite corresponds to a pointer to a tuple with + * the format of pg_rewrite relation. + * ---------------- + */ +typedef FormData_pg_rewrite *Form_pg_rewrite; + +/* ---------------- + * compiler constants for pg_rewrite + * ---------------- + */ +#define Natts_pg_rewrite 7 +#define Anum_pg_rewrite_rulename 1 +#define Anum_pg_rewrite_ev_type 2 +#define Anum_pg_rewrite_ev_class 3 +#define Anum_pg_rewrite_ev_attr 4 +#define Anum_pg_rewrite_is_instead 5 +#define Anum_pg_rewrite_ev_qual 6 +#define Anum_pg_rewrite_action 7 + +#endif /* PG_REWRITE_H */ diff --git a/src/backend/catalog/pg_server.h b/src/backend/catalog/pg_server.h new file mode 100644 index 00000000000..63052381959 --- /dev/null +++ b/src/backend/catalog/pg_server.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * pg_server.h-- + * definition of the system "server" relation (pg_server) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_server.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_SERVER_H +#define PG_SERVER_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_server definition. cpp turns this into + * typedef struct FormData_pg_server + * ---------------- + */ +CATALOG(pg_server) BOOTSTRAP { + NameData sername; + int2 serpid; + int2 serport; +} FormData_pg_server; + +/* ---------------- + * Form_pg_server corresponds to a pointer to a tuple with + * the format of pg_server relation. + * ---------------- + */ +typedef FormData_pg_server *Form_pg_server; + +/* ---------------- + * compiler constants for pg_server + * ---------------- + */ +#define Natts_pg_server 3 +#define Anum_pg_server_sername 1 +#define Anum_pg_server_serpid 2 +#define Anum_pg_server_serport 3 + +#endif /* PG_SERVER_H */ diff --git a/src/backend/catalog/pg_statistic.h b/src/backend/catalog/pg_statistic.h new file mode 100644 index 00000000000..d8f0c19dffa --- /dev/null +++ b/src/backend/catalog/pg_statistic.h @@ -0,0 +1,60 @@ +/*------------------------------------------------------------------------- + * + * pg_statistic.h-- + * definition of the system "statistic" relation (pg_statistic) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_statistic.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_STATISTIC_H +#define PG_STATISTIC_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_statistic definition. cpp turns this into + * typedef struct FormData_pg_statistic + * ---------------- + */ +CATALOG(pg_statistic) { + Oid starelid; + int2 staattnum; + Oid staop; + text stalokey; /* VARIABLE LENGTH FIELD */ + text stahikey; /* VARIABLE LENGTH FIELD */ +} FormData_pg_statistic; + +/* ---------------- + * Form_pg_statistic corresponds to a pointer to a tuple with + * the format of pg_statistic relation. + * ---------------- + */ +typedef FormData_pg_statistic *Form_pg_statistic; + +/* ---------------- + * compiler constants for pg_statistic + * ---------------- + */ +#define Natts_pg_statistic 5 +#define Anum_pg_statistic_starelid 1 +#define Anum_pg_statistic_staattnum 2 +#define Anum_pg_statistic_staop 3 +#define Anum_pg_statistic_stalokey 4 +#define Anum_pg_statistic_stahikey 5 + +#endif /* PG_STATISTIC_H */ diff --git a/src/backend/catalog/pg_time.h b/src/backend/catalog/pg_time.h new file mode 100644 index 00000000000..4990f231ba1 --- /dev/null +++ b/src/backend/catalog/pg_time.h @@ -0,0 +1,41 @@ +/*------------------------------------------------------------------------- + * + * pg_time.h-- + * the system commit-time relation "pg_time" is not a "heap" relation. + * it is automatically created by the transam/ code and the + * information here is all bogus and is just here to make the + * relcache code happy. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_time.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * The structures and macros used by the transam/ code + * to access pg_time should some day go here -cim 6/18/90 + * + *------------------------------------------------------------------------- + */ +#ifndef PG_TIME_H +#define PG_TIME_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +CATALOG(pg_time) BOOTSTRAP { + Oid timefoo; +} FormData_pg_time; + +typedef FormData_pg_time *Form_pg_time; + +#define Natts_pg_time 1 +#define Anum_pg_time_timefoo 1 + + +#endif /* PG_TIME_H */ diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c new file mode 100644 index 00000000000..fe9baa05a44 --- /dev/null +++ b/src/backend/catalog/pg_type.c @@ -0,0 +1,595 @@ +/*------------------------------------------------------------------------- + * + * pg_type.c-- + * routines to support manipulation of the pg_type relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/catalog/pg_type.c,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/tupdesc.h" +#include "utils/builtins.h" +#include "utils/rel.h" +#include "utils/palloc.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "parser/catalog_utils.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/indexing.h" +#include "storage/lmgr.h" + +/* ---------------------------------------------------------------- + * TypeGetWithOpenRelation + * + * preforms a scan on pg_type for a type tuple with the + * given type name. + * ---------------------------------------------------------------- + * pg_type_desc -- reldesc for pg_type + * typeName -- name of type to be fetched + * defined -- has the type been defined? + */ +static Oid +TypeGetWithOpenRelation(Relation pg_type_desc, + char* typeName, + bool *defined) +{ + HeapScanDesc scan; + HeapTuple tup; + + static ScanKeyData typeKey[1] = { + { 0, Anum_pg_type_typname, NameEqualRegProcedure } + }; + + /* ---------------- + * initialize the scan key and begin a scan of pg_type + * ---------------- + */ + fmgr_info(NameEqualRegProcedure, + &typeKey[0].sk_func, &typeKey[0].sk_nargs); + typeKey[0].sk_argument = PointerGetDatum(typeName); + + scan = heap_beginscan(pg_type_desc, + 0, + SelfTimeQual, + 1, + typeKey); + + /* ---------------- + * get the type tuple, if it exists. + * ---------------- + */ + tup = heap_getnext(scan, 0, (Buffer *) 0); + + /* ---------------- + * if no type tuple exists for the given type name, then + * end the scan and return appropriate information. + * ---------------- + */ + if (! HeapTupleIsValid(tup)) { + heap_endscan(scan); + *defined = false; + return InvalidOid; + } + + /* ---------------- + * here, the type tuple does exist so we pull information from + * the typisdefined field of the tuple and return the tuple's + * oid, which is the oid of the type. + * ---------------- + */ + heap_endscan(scan); + *defined = (bool) ((TypeTupleForm) GETSTRUCT(tup))->typisdefined; + + return + tup->t_oid; +} + +/* ---------------------------------------------------------------- + * TypeGet + * + * Finds the ObjectId of a type, even if uncommitted; "defined" + * is only set if the type has actually been defined, i.e., if + * the type tuple is not a shell. + * + * Note: the meat of this function is now in the function + * TypeGetWithOpenRelation(). -cim 6/15/90 + * + * Also called from util/remove.c + * ---------------------------------------------------------------- + */ +Oid +TypeGet(char* typeName, /* name of type to be fetched */ + bool *defined) /* has the type been defined? */ +{ + Relation pg_type_desc; + Oid typeoid; + + /* ---------------- + * open the pg_type relation + * ---------------- + */ + pg_type_desc = heap_openr(TypeRelationName); + + /* ---------------- + * scan the type relation for the information we want + * ---------------- + */ + typeoid = TypeGetWithOpenRelation(pg_type_desc, + typeName, + defined); + + /* ---------------- + * close the type relation and return the type oid. + * ---------------- + */ + heap_close(pg_type_desc); + + return + typeoid; +} + +/* ---------------------------------------------------------------- + * TypeShellMakeWithOpenRelation + * + * ---------------------------------------------------------------- + */ +Oid +TypeShellMakeWithOpenRelation(Relation pg_type_desc, char *typeName) +{ + register int i; + HeapTuple tup; + Datum values[ Natts_pg_type ]; + char nulls[ Natts_pg_type ]; + Oid typoid; + TupleDesc tupDesc; + + /* ---------------- + * initialize our nulls[] and values[] arrays + * ---------------- + */ + for (i = 0; i < Natts_pg_type; ++i) { + nulls[i] = ' '; + values[i] = (Datum)NULL; /* redundant, but safe */ + } + + /* ---------------- + * initialize values[] with the type name and + * ---------------- + */ + i = 0; + values[i++] = (Datum) typeName; /* 1 */ + values[i++] = (Datum) InvalidOid; /* 2 */ + values[i++] = (Datum) (int16) 0; /* 3 */ + values[i++] = (Datum) (int16) 0; /* 4 */ + values[i++] = (Datum) (bool) 0; /* 5 */ + values[i++] = (Datum) (bool) 0; /* 6 */ + values[i++] = (Datum) (bool) 0; /* 7 */ + values[i++] = (Datum) (bool) 0; /* 8 */ + values[i++] = (Datum) InvalidOid; /* 9 */ + values[i++] = (Datum) InvalidOid; /* 10 */ + values[i++] = (Datum) InvalidOid; /* 11 */ + values[i++] = (Datum) InvalidOid; /* 12 */ + values[i++] = (Datum) InvalidOid; /* 13 */ + values[i++] = (Datum) InvalidOid; /* 14 */ + values[i++] = (Datum) 'i'; /* 15 */ + + /* + * ... and fill typdefault with a bogus value + */ + values[i++] = + (Datum)fmgr(TextInRegProcedure, typeName); /* 15 */ + + /* ---------------- + * create a new type tuple with FormHeapTuple + * ---------------- + */ + tupDesc = pg_type_desc->rd_att; + + tup = heap_formtuple(tupDesc, values, nulls); + + /* ---------------- + * insert the tuple in the relation and get the tuple's oid. + * ---------------- + */ + heap_insert(pg_type_desc, tup); + typoid = tup->t_oid; + + if (RelationGetRelationTupleForm(pg_type_desc)->relhasindex) + { + Relation idescs[Num_pg_type_indices]; + + CatalogOpenIndices(Num_pg_type_indices, Name_pg_type_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_type_indices, pg_type_desc, tup); + CatalogCloseIndices(Num_pg_type_indices, idescs); + } + /* ---------------- + * free the tuple and return the type-oid + * ---------------- + */ + pfree(tup); + + return + typoid; +} + +/* ---------------------------------------------------------------- + * TypeShellMake + * + * This procedure inserts a "shell" tuple into the type + * relation. The type tuple inserted has invalid values + * and in particular, the "typisdefined" field is false. + * + * This is used so that a tuple exists in the catalogs. + * The invalid fields should be fixed up sometime after + * this routine is called, and then the "typeisdefined" + * field is set to true. -cim 6/15/90 + * ---------------------------------------------------------------- + */ +Oid +TypeShellMake(char *typeName) +{ + Relation pg_type_desc; + Oid typoid; + + Assert(PointerIsValid(typeName)); + + /* ---------------- + * open pg_type + * ---------------- + */ + pg_type_desc = heap_openr(TypeRelationName); + + /* ---------------- + * insert the shell tuple + * ---------------- + */ + typoid = TypeShellMakeWithOpenRelation(pg_type_desc, typeName); + + /* ---------------- + * close pg_type and return the tuple's oid. + * ---------------- + */ + heap_close(pg_type_desc); + + return + typoid; +} + +/* ---------------------------------------------------------------- + * TypeCreate + * + * This does all the necessary work needed to define a new type. + * ---------------------------------------------------------------- + */ +Oid +TypeCreate(char *typeName, + Oid relationOid, /* only for 'c'atalog typeTypes */ + int16 internalSize, + int16 externalSize, + char typeType, + char typDelim, + char *inputProcedure, + char *outputProcedure, + char *sendProcedure, + char *receiveProcedure, + char *elementTypeName, + char *defaultTypeValue, /* internal rep */ + bool passedByValue, + char alignment) +{ + register i, j; + Relation pg_type_desc; + HeapScanDesc pg_type_scan; + + Oid typeObjectId; + Oid elementObjectId = InvalidOid; + + HeapTuple tup; + char nulls[Natts_pg_type]; + char replaces[Natts_pg_type]; + Datum values[Natts_pg_type]; + + Buffer buffer; + char *procname; + char *procs[4]; + bool defined; + ItemPointerData itemPointerData; + TupleDesc tupDesc; + + Oid argList[8]; + + + static ScanKeyData typeKey[1] = { + { 0, Anum_pg_type_typname, NameEqualRegProcedure } + }; + + fmgr_info(NameEqualRegProcedure, + &typeKey[0].sk_func, &typeKey[0].sk_nargs); + + /* ---------------- + * check that the type is not already defined. + * ---------------- + */ + typeObjectId = TypeGet(typeName, &defined); + if (OidIsValid(typeObjectId) && defined) { + elog(WARN, "TypeCreate: type %s already defined", typeName); + } + + /* ---------------- + * if this type has an associated elementType, then we check that + * it is defined. + * ---------------- + */ + if (elementTypeName) { + elementObjectId = TypeGet(elementTypeName, &defined); + if (!defined) { + elog(WARN, "TypeCreate: type %s is not defined", elementTypeName); + } + } + + /* ---------------- + * XXX comment me + * ---------------- + */ + if (externalSize == 0) { + externalSize = -1; /* variable length */ + } + + /* ---------------- + * initialize arrays needed by FormHeapTuple + * ---------------- + */ + for (i = 0; i < Natts_pg_type; ++i) { + nulls[i] = ' '; + replaces[i] = 'r'; + values[i] = (Datum)NULL; /* redundant, but nice */ + } + + /* + * XXX + * + * Do this so that user-defined types have size -1 instead of zero if + * they are variable-length - this is so that everything else in the + * backend works. + */ + + if (internalSize == 0) + internalSize = -1; + + /* ---------------- + * initialize the values[] information + * ---------------- + */ + i = 0; + values[i++] = PointerGetDatum(typeName); /* 1 */ + values[i++] = (Datum) GetUserId(); /* 2 */ + values[i++] = (Datum) internalSize; /* 3 */ + values[i++] = (Datum) externalSize; /* 4 */ + values[i++] = (Datum) passedByValue; /* 5 */ + values[i++] = (Datum) typeType; /* 6 */ + values[i++] = (Datum) (bool) 1; /* 7 */ + values[i++] = (Datum) typDelim; /* 8 */ + values[i++] = (Datum) (typeType == 'c' ? relationOid : InvalidOid); /* 9 */ + values[i++] = (Datum) elementObjectId; /* 10 */ + + /* + * arguments to type input and output functions must be 0 + */ + memset(argList, 0, 8 * sizeof(Oid)); + + procs[0] = inputProcedure; + procs[1] = outputProcedure; + procs[2] = (receiveProcedure) ? receiveProcedure : inputProcedure; + procs[3] = (sendProcedure) ? sendProcedure : outputProcedure; + + for (j = 0; j < 4; ++j) { + procname = procs[j]; + + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(procname), + Int32GetDatum(1), + PointerGetDatum(argList), + 0); + + if (!HeapTupleIsValid(tup)) { + /* + * it is possible for the input/output procedure + * to take two arguments, where the second argument + * is the element type (eg array_in/array_out) + */ + if (OidIsValid(elementObjectId)) { + tup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(procname), + Int32GetDatum(2), + PointerGetDatum(argList), + 0); + } + if (!HeapTupleIsValid(tup)) { + func_error("TypeCreate", procname, 1, (int*)argList); + } + } + + values[i++] = (Datum)tup->t_oid; /* 11 - 14 */ + } + + /* ---------------- + * set default alignment + * ---------------- + */ + values[i++] = (Datum)alignment; /* 15 */ + + /* ---------------- + * initialize the default value for this type. + * ---------------- + */ + values[i] = (Datum)fmgr(TextInRegProcedure, /* 16 */ + PointerIsValid(defaultTypeValue) + ? defaultTypeValue : "-"); /* XXX default typdefault */ + + /* ---------------- + * open pg_type and begin a scan for the type name. + * ---------------- + */ + pg_type_desc = heap_openr(TypeRelationName); + + /* ----------------- + * Set a write lock initially so as not upgrade a read to a write + * when the heap_insert() or heap_replace() is called. + * ----------------- + */ + RelationSetLockForWrite(pg_type_desc); + + typeKey[0].sk_argument = PointerGetDatum(typeName); + pg_type_scan = heap_beginscan(pg_type_desc, + 0, + SelfTimeQual, + 1, + typeKey); + + /* ---------------- + * define the type either by adding a tuple to the type + * relation, or by updating the fields of the "shell" tuple + * already there. + * ---------------- + */ + tup = heap_getnext(pg_type_scan, 0, &buffer); + if (HeapTupleIsValid(tup)) { + tup = heap_modifytuple(tup, + buffer, + pg_type_desc, + values, + nulls, + replaces); + + /* XXX may not be necessary */ + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + + setheapoverride(true); + (void) heap_replace(pg_type_desc, &itemPointerData, tup); + setheapoverride(false); + + typeObjectId = tup->t_oid; + } else { + tupDesc = pg_type_desc->rd_att; + + tup = heap_formtuple(tupDesc, + values, + nulls); + + heap_insert(pg_type_desc, tup); + + typeObjectId = tup->t_oid; + } + + /* ---------------- + * finish up + * ---------------- + */ + heap_endscan(pg_type_scan); + + if (RelationGetRelationTupleForm(pg_type_desc)->relhasindex) + { + Relation idescs[Num_pg_type_indices]; + + CatalogOpenIndices(Num_pg_type_indices, Name_pg_type_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_type_indices, pg_type_desc, tup); + CatalogCloseIndices(Num_pg_type_indices, idescs); + } + RelationUnsetLockForWrite(pg_type_desc); + heap_close(pg_type_desc); + + + return + typeObjectId; +} + +/* ---------------------------------------------------------------- + * TypeRename + * + * This renames a type + * ---------------------------------------------------------------- + */ +void +TypeRename(char *oldTypeName, char *newTypeName) +{ + Relation pg_type_desc; + Relation idescs[Num_pg_type_indices]; + Oid type_oid; + HeapTuple tup; + bool defined; + ItemPointerData itemPointerData; + + /* check that that the new type is not already defined */ + type_oid = TypeGet(newTypeName, &defined); + if (OidIsValid(type_oid) && defined) { + elog(WARN, "TypeRename: type %s already defined", newTypeName); + } + + /* get the type tuple from the catalog index scan manager */ + pg_type_desc = heap_openr(TypeRelationName); + tup = TypeNameIndexScan(pg_type_desc, oldTypeName); + + /* ---------------- + * change the name of the type + * ---------------- + */ + if (HeapTupleIsValid(tup)) { + + namestrcpy(& (((TypeTupleForm) GETSTRUCT(tup))->typname),newTypeName); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + + setheapoverride(true); + heap_replace(pg_type_desc, &itemPointerData, tup); + setheapoverride(false); + + /* update the system catalog indices */ + CatalogOpenIndices(Num_pg_type_indices, Name_pg_type_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_type_indices, pg_type_desc, tup); + CatalogCloseIndices(Num_pg_type_indices, idescs); + + /* all done */ + pfree(tup); + + } else { + elog(WARN, "TypeRename: type %s not defined", oldTypeName); + } + + /* finish up */ + heap_close(pg_type_desc); +} + +/* + * makeArrayTypeName(typeName); + * - given a base type name, make an array of type name out of it + * + * the CALLER is responsible for pfreeing the + */ + +char* +makeArrayTypeName(char* typeName) +{ + char *arr; + + if (!typeName) return NULL; + arr = palloc (strlen(typeName) + 2); + arr[0] = '_'; + strcpy(arr+1, typeName); + + return arr; + +} diff --git a/src/backend/catalog/pg_type.h b/src/backend/catalog/pg_type.h new file mode 100644 index 00000000000..dc3fe94e8aa --- /dev/null +++ b/src/backend/catalog/pg_type.h @@ -0,0 +1,267 @@ +/*------------------------------------------------------------------------- + * + * pg_type.h-- + * definition of the system "type" relation (pg_type) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_type.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_TYPE_H +#define PG_TYPE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "utils/rel.h" /* for Relation */ + +/* ---------------- + * pg_type definition. cpp turns this into + * typedef struct FormData_pg_type + * ---------------- + */ +CATALOG(pg_type) BOOTSTRAP { + NameData typname; + Oid typowner; + int2 typlen; + int2 typprtlen; + bool typbyval; + char typtype; + bool typisdefined; + char typdelim; + Oid typrelid; + Oid typelem; + regproc typinput; + regproc typoutput; + regproc typreceive; + regproc typsend; + char typalign; /* alignment (c=char, s=short, i=int, d=double) */ + text typdefault; /* VARIABLE LENGTH FIELD */ +} TypeTupleFormData; + +/* ---------------- + * Form_pg_type corresponds to a pointer to a tuple with + * the format of pg_type relation. + * ---------------- + */ +typedef TypeTupleFormData *TypeTupleForm; + +/* ---------------- + * compiler constants for pg_type + * ---------------- + */ +#define Natts_pg_type 16 +#define Anum_pg_type_typname 1 +#define Anum_pg_type_typowner 2 +#define Anum_pg_type_typlen 3 +#define Anum_pg_type_typprtlen 4 +#define Anum_pg_type_typbyval 5 +#define Anum_pg_type_typtype 6 +#define Anum_pg_type_typisdefined 7 +#define Anum_pg_type_typdelim 8 +#define Anum_pg_type_typrelid 9 +#define Anum_pg_type_typelem 10 +#define Anum_pg_type_typinput 11 +#define Anum_pg_type_typoutput 12 +#define Anum_pg_type_typreceive 13 +#define Anum_pg_type_typsend 14 +#define Anum_pg_type_typalign 15 +#define Anum_pg_type_typdefault 16 + +/* ---------------- + * initial contents of pg_type + * ---------------- + */ + +/* keep the following ordered by OID so that later changes can be made easier*/ + +/* OIDS 1 - 99 */ +DATA(insert OID = 16 ( bool PGUID 1 1 t b t \054 0 0 boolin boolout boolin boolout c _null_ )); + +#define BOOLOID 16 + +DATA(insert OID = 17 ( bytea PGUID -1 -1 f b t \054 0 18 byteain byteaout byteain byteaout i _null_ )); +DATA(insert OID = 18 ( char PGUID 1 1 t b t \054 0 0 charin charout charin charout c _null_ )); + +DATA(insert OID = 19 ( name PGUID NAMEDATALEN NAMEDATALEN f b t \054 0 18 namein nameout namein nameout d _null_ )); +DATA(insert OID = 20 ( char16 PGUID 16 16 f b t \054 0 18 char16in char16out char16in char16out i _null_ )); +/*DATA(insert OID = 20 ( dt PGUID 4 10 t b t \054 0 0 dtin dtout dtin dtout i _null_ )); */ +DATA(insert OID = 21 ( int2 PGUID 2 5 t b t \054 0 0 int2in int2out int2in int2out s _null_ )); + +#define INT2OID 21 + +DATA(insert OID = 22 ( int28 PGUID 16 50 f b t \054 0 21 int28in int28out int28in int28out i _null_ )); + +/* + * XXX -- the implementation of int28's in postgres is a hack, and will + * go away someday. until that happens, there is a case (in the + * catalog cache management code) where we need to step gingerly + * over piles of int28's on the sidewalk. in order to do so, we + * need the OID of the int28 tuple from pg_type. + */ + +#define INT28OID 22 + + +DATA(insert OID = 23 ( int4 PGUID 4 10 t b t \054 0 0 int4in int4out int4in int4out i _null_ )); + +#define INT4OID 23 + +DATA(insert OID = 24 ( regproc PGUID 4 16 t b t \054 0 0 regprocin regprocout regprocin regprocout i _null_ )); +DATA(insert OID = 25 ( text PGUID -1 -1 f b t \054 0 18 textin textout textin textout i _null_ )); +DATA(insert OID = 26 ( oid PGUID 4 10 t b t \054 0 0 int4in int4out int4in int4out i _null_ )); + +#define OIDOID 26 + +DATA(insert OID = 27 ( tid PGUID 6 19 f b t \054 0 0 tidin tidout tidin tidout i _null_ )); +DATA(insert OID = 28 ( xid PGUID 4 12 t b t \054 0 0 xidin xidout xidin xidout i _null_ )); +DATA(insert OID = 29 ( cid PGUID 2 3 t b t \054 0 0 cidin cidout cidin cidout s _null_ )); +DATA(insert OID = 30 ( oid8 PGUID 32 89 f b t \054 0 26 oid8in oid8out oid8in oid8out i _null_ )); +DATA(insert OID = 32 ( SET PGUID -1 -1 f r t \054 0 -1 textin textout textin textout i _null_ )); + +DATA(insert OID = 71 ( pg_type PGUID 1 1 t b t \054 71 0 foo bar foo bar c _null_)); +DATA(insert OID = 75 ( pg_attribute PGUID 1 1 t b t \054 75 0 foo bar foo bar c _null_)); +DATA(insert OID = 76 ( pg_demon PGUID 1 1 t b t \054 76 0 foo bar foo bar c _null_)); +DATA(insert OID = 80 ( pg_magic PGUID 1 1 t b t \054 80 0 foo bar foo bar c _null_)); +DATA(insert OID = 81 ( pg_proc PGUID 1 1 t b t \054 81 0 foo bar foo bar c _null_)); +DATA(insert OID = 82 ( pg_server PGUID 1 1 t b t \054 82 0 foo bar foo bar c _null_)); +DATA(insert OID = 83 ( pg_class PGUID 1 1 t b t \054 83 0 foo bar foo bar c _null_)); +DATA(insert OID = 86 ( pg_user PGUID 1 1 t b t \054 86 0 foo bar foo bar c _null_)); +DATA(insert OID = 87 ( pg_group PGUID 1 1 t b t \054 87 0 foo bar foo bar c _null_)); +DATA(insert OID = 88 ( pg_database PGUID 1 1 t b t \054 88 0 foo bar foo bar c _null_)); +DATA(insert OID = 89 ( pg_defaults PGUID 1 1 t b t \054 89 0 foo bar foo bar c _null_)); +DATA(insert OID = 90 ( pg_variable PGUID 1 1 t b t \054 90 0 foo bar foo bar c _null_)); +DATA(insert OID = 99 ( pg_log PGUID 1 1 t b t \054 99 0 foo bar foo bar c _null_)); + +/* OIDS 100 - 199 */ + +DATA(insert OID = 100 ( pg_time PGUID 1 1 t b t \054 100 0 foo bar foo bar c _null_)); +DATA(insert OID = 101 ( pg_time PGUID 1 1 t b t \054 101 0 foo bar foo bar c _null_)); + +/* OIDS 200 - 299 */ + +DATA(insert OID = 210 ( smgr PGUID 2 12 t b t \054 0 -1 smgrin smgrout smgrin smgrout s _null_ )); + +/* OIDS 300 - 399 */ + +/* OIDS 400 - 499 */ +DATA(insert OID = 409 ( char2 PGUID 2 2 t b t \054 0 18 char2in char2out char2in char2out s _null_ )); +DATA(insert OID = 410 ( char4 PGUID 4 4 t b t \054 0 18 char4in char4out char4in char4out i _null_ )); +DATA(insert OID = 411 ( char8 PGUID 8 8 f b t \054 0 18 char8in char8out char8in char8out i _null_ )); + +/* OIDS 500 - 599 */ + +/* OIDS 600 - 699 */ +DATA(insert OID = 600 ( point PGUID 16 24 f b t \054 0 701 point_in point_out point_in point_out d _null_ )); +DATA(insert OID = 601 ( lseg PGUID 32 48 f b t \054 0 600 lseg_in lseg_out lseg_in lseg_out d _null_ )); +DATA(insert OID = 602 ( path PGUID -1 -1 f b t \054 0 600 path_in path_out path_in path_out d _null_ )); +DATA(insert OID = 603 ( box PGUID 32 100 f b t \073 0 600 box_in box_out box_in box_out d _null_ )); +DATA(insert OID = 604 ( polygon PGUID -1 -1 f b t \054 0 -1 poly_in poly_out poly_in poly_out d _null_ )); +DATA(insert OID = 605 ( filename PGUID 256 -1 f b t \054 0 18 filename_in filename_out filename_in filename_out i _null_ )); + +/* OIDS 700 - 799 */ + +#define FLOAT4OID 700 + +DATA(insert OID = 700 ( float4 PGUID 4 12 f b t \054 0 0 float4in float4out float4in float4out i _null_ )); + + +#define FLOAT8OID 701 + +DATA(insert OID = 701 ( float8 PGUID 8 24 f b t \054 0 0 float8in float8out float8in float8out d _null_ )); +DATA(insert OID = 702 ( abstime PGUID 4 20 t b t \054 0 0 nabstimein nabstimeout nabstimein nabstimeout i _null_ )); +DATA(insert OID = 703 ( reltime PGUID 4 20 t b t \054 0 0 reltimein reltimeout reltimein reltimeout i _null_ )); +DATA(insert OID = 704 ( tinterval PGUID 12 47 f b t \054 0 0 tintervalin tintervalout tintervalin tintervalout i _null_ )); +DATA(insert OID = 705 ( unknown PGUID -1 -1 f b t \054 0 18 textin textout textin textout i _null_ )); + +#define UNKNOWNOID 705 + +/* OIDS 800 - 899 */ +DATA(insert OID = 810 ( oidint2 PGUID 6 20 f b t \054 0 0 oidint2in oidint2out oidint2in oidint2out i _null_ )); + +/* OIDS 900 - 999 */ +DATA(insert OID = 910 ( oidint4 PGUID 8 20 f b t \054 0 0 oidint4in oidint4out oidint4in oidint4out i _null_ )); +DATA(insert OID = 911 ( oidname PGUID OIDNAMELEN OIDNAMELEN f b t \054 0 0 oidnamein oidnameout oidnamein oidnameout i _null_ )); + +/* OIDS 1000 - 1099 */ +DATA(insert OID = 1000 ( _bool PGUID -1 -1 f b t \054 0 16 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1001 ( _bytea PGUID -1 -1 f b t \054 0 17 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1002 ( _char PGUID -1 -1 f b t \054 0 18 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1003 ( _name PGUID -1 -1 f b t \054 0 19 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1004 ( _char16 PGUID -1 -1 f b t \054 0 19 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1005 ( _int2 PGUID -1 -1 f b t \054 0 21 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1006 ( _int28 PGUID -1 -1 f b t \054 0 22 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1007 ( _int4 PGUID -1 -1 f b t \054 0 23 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1008 ( _regproc PGUID -1 -1 f b t \054 0 24 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1009 ( _text PGUID -1 -1 f b t \054 0 25 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1028 ( _oid PGUID -1 -1 f b t \054 0 26 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1010 ( _tid PGUID -1 -1 f b t \054 0 27 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1011 ( _xid PGUID -1 -1 f b t \054 0 28 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1012 ( _cid PGUID -1 -1 f b t \054 0 29 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1013 ( _oid8 PGUID -1 -1 f b t \054 0 30 array_in array_out array_in array_out i _null_ )); +/*DATA(insert OID = 1014 ( _lock PGUID -1 -1 f b t \054 0 31 array_in array_out array_in array_out i _null_ ));*/ +DATA(insert OID = 1015 ( _stub PGUID -1 -1 f b t \054 0 33 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1016 ( _ref PGUID -1 -1 f b t \054 0 591 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1017 ( _point PGUID -1 -1 f b t \054 0 600 array_in array_out array_in array_out d _null_ )); +DATA(insert OID = 1018 ( _lseg PGUID -1 -1 f b t \054 0 601 array_in array_out array_in array_out d _null_ )); +DATA(insert OID = 1019 ( _path PGUID -1 -1 f b t \054 0 602 array_in array_out array_in array_out d _null_ )); +DATA(insert OID = 1020 ( _box PGUID -1 -1 f b t \073 0 603 array_in array_out array_in array_out d _null_ )); +DATA(insert OID = 1021 ( _float4 PGUID -1 -1 f b t \054 0 700 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1022 ( _float8 PGUID -1 -1 f b t \054 0 701 array_in array_out array_in array_out d _null_ )); +DATA(insert OID = 1023 ( _abstime PGUID -1 -1 f b t \054 0 702 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1024 ( _reltime PGUID -1 -1 f b t \054 0 703 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1025 ( _tinterval PGUID -1 -1 f b t \054 0 704 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1026 ( _filename PGUID -1 -1 f b t \054 0 605 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1027 ( _polygon PGUID -1 -1 f b t \054 0 604 array_in array_out array_in array_out d _null_ )); +/* Note: the size of an aclitem needs to match sizeof(AclItem) in acl.h */ +DATA(insert OID = 1033 ( aclitem PGUID 8 -1 f b t \054 0 0 aclitemin aclitemout aclitemin aclitemout i _null_ )); +DATA(insert OID = 1034 ( _aclitem PGUID -1 -1 f b t \054 0 1033 array_in array_out array_in array_out i _null_ )); + +DATA(insert OID = 1039 ( _char2 PGUID -1 -1 f b t \054 0 409 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1040 ( _char4 PGUID -1 -1 f b t \054 0 410 array_in array_out array_in array_out i _null_ )); +DATA(insert OID = 1041 ( _char8 PGUID -1 -1 f b t \054 0 411 array_in array_out array_in array_out i _null_ )); + +#define BPCHAROID 1042 +DATA(insert OID = 1042 ( bpchar PGUID -1 -1 f b t \054 0 18 bpcharin bpcharout bpcharin bpcharout i _null_ )); +#define VARCHAROID 1043 +DATA(insert OID = 1043 ( varchar PGUID -1 -1 f b t \054 0 18 varcharin varcharout varcharin varcharout i _null_ )); + +DATA(insert OID = 1082 ( date PGUID 4 10 t b t \054 0 0 date_in date_out date_in date_out i _null_ )); +DATA(insert OID = 1083 ( time PGUID 8 16 f b t \054 0 0 time_in time_out time_in time_out i _null_ )); +/* + * prototypes for functions in pg_type.c + */ +extern Oid TypeGet(char *typeName, bool *defined); +extern Oid TypeShellMakeWithOpenRelation(Relation pg_type_desc, + char *typeName); +extern Oid TypeShellMake(char *typeName); +extern Oid TypeCreate(char *typeName, + Oid relationOid, + int16 internalSize, + int16 externalSize, + char typeType, + char typDelim, + char *inputProcedure, + char *outputProcedure, + char *sendProcedure, + char *receiveProcedure, + char *elementTypeName, + char *defaultTypeValue, + bool passedByValue, char alignment); +extern void TypeRename(char *oldTypeName, char *newTypeName); +extern char *makeArrayTypeName(char *typeName); + + +#endif /* PG_TYPE_H */ diff --git a/src/backend/catalog/pg_user.h b/src/backend/catalog/pg_user.h new file mode 100644 index 00000000000..25fd02cc0c7 --- /dev/null +++ b/src/backend/catalog/pg_user.h @@ -0,0 +1,99 @@ +/*------------------------------------------------------------------------- + * + * pg_user.h-- + * definition of the system "user" relation (pg_user) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_user.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_USER_H +#define PG_USER_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +/* ---------------- + * pg_user definition. cpp turns this into + * typedef struct FormData_pg_user + * ---------------- + */ +CATALOG(pg_user) BOOTSTRAP { + NameData usename; + int4 usesysid; + bool usecreatedb; + bool usetrace; + bool usesuper; + bool usecatupd; +} FormData_pg_user; + +/* ---------------- + * Form_pg_user corresponds to a pointer to a tuple with + * the format of pg_user relation. + * ---------------- + */ +typedef FormData_pg_user *Form_pg_user; + +/* ---------------- + * compiler constants for pg_user + * ---------------- + */ +#define Natts_pg_user 6 +#define Anum_pg_user_usename 1 +#define Anum_pg_user_usesysid 2 +#define Anum_pg_user_usecreatedb 3 +#define Anum_pg_user_usetrace 4 +#define Anum_pg_user_usesuper 5 +#define Anum_pg_user_usecatupd 6 + +/* ---------------- + * initial contents of pg_user + * ---------------- + */ +DATA(insert OID = 0 ( postgres PGUID t t t t )); + +BKI_BEGIN +#ifdef ALLOW_PG_GROUP +BKI_END + +DATA(insert OID = 0 ( mike 799 t t t t )); +DATA(insert OID = 0 ( mao 1806 t t t t )); +DATA(insert OID = 0 ( hellers 1089 t t t t )); +DATA(insert OID = 0 ( joey 5209 t t t t )); +DATA(insert OID = 0 ( jolly 5443 t t t t )); +DATA(insert OID = 0 ( sunita 6559 t t t t )); +DATA(insert OID = 0 ( paxson 3029 t t t t )); +DATA(insert OID = 0 ( marc 2435 t t t t )); +DATA(insert OID = 0 ( jiangwu 6124 t t t t )); +DATA(insert OID = 0 ( aoki 2360 t t t t )); +DATA(insert OID = 0 ( avi 31080 t t t t )); +DATA(insert OID = 0 ( kristin 1123 t t t t )); +DATA(insert OID = 0 ( andrew 5229 t t t t )); +DATA(insert OID = 0 ( nobuko 5493 t t t t )); +DATA(insert OID = 0 ( hartzell 6676 t t t t )); +DATA(insert OID = 0 ( devine 6724 t t t t )); +DATA(insert OID = 0 ( boris 6396 t t t t )); +DATA(insert OID = 0 ( sklower 354 t t t t )); +DATA(insert OID = 0 ( marcel 31113 t t t t )); +DATA(insert OID = 0 ( ginger 3692 t t t t )); +DATA(insert OID = 0 ( woodruff 31026 t t t t )); +DATA(insert OID = 0 ( searcher 8261 t t t t )); + +BKI_BEGIN +#endif /* ALLOW_PG_GROUP */ +BKI_END + +#endif /* PG_USER_H */ diff --git a/src/backend/catalog/pg_variable.h b/src/backend/catalog/pg_variable.h new file mode 100644 index 00000000000..d38a1185740 --- /dev/null +++ b/src/backend/catalog/pg_variable.h @@ -0,0 +1,40 @@ +/*------------------------------------------------------------------------- + * + * pg_variable.h-- + * the system variable relation "pg_variable" is not a "heap" relation. + * it is automatically created by the transam/ code and the + * information here is all bogus and is just here to make the + * relcache code happy. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_variable.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * The structures and macros used by the transam/ code + * to access pg_variable should someday go here -cim 6/18/90 + * + *------------------------------------------------------------------------- + */ +#ifndef PG_VARIABLE_H +#define PG_VARIABLE_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" + +CATALOG(pg_variable) BOOTSTRAP { + Oid varfoo; +} FormData_pg_variable; + +typedef FormData_pg_variable *Form_pg_variable; + +#define Natts_pg_variable 1 +#define Anum_pg_variable_varfoo 1 + +#endif /* PG_VARIABLE_H */ diff --git a/src/backend/catalog/pg_version.h b/src/backend/catalog/pg_version.h new file mode 100644 index 00000000000..fea795bd492 --- /dev/null +++ b/src/backend/catalog/pg_version.h @@ -0,0 +1,58 @@ +/*------------------------------------------------------------------------- + * + * pg_version.h-- + * definition of the system "version" relation (pg_version) + * along with the relation's initial contents. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_version.h,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ + * + * NOTES + * the genbki.sh script reads this file and generates .bki + * information from the DATA() statements. + * + *------------------------------------------------------------------------- + */ +#ifndef PG_VERSION_H +#define PG_VERSION_H + +/* ---------------- + * postgres.h contains the system type definintions and the + * CATALOG(), BOOTSTRAP and DATA() sugar words so this file + * can be read by both genbki.sh and the C compiler. + * ---------------- + */ +#include "postgres.h" +#include "utils/nabstime.h" + +/* ---------------- + * pg_version definition. cpp turns this into + * typedef struct FormData_pg_version + * ---------------- + */ +CATALOG(pg_version) { + Oid verrelid; + Oid verbaseid; + int4 vertime; /* really should be some abstime */ +} FormData_pg_version; + +/* ---------------- + * Form_pg_version corresponds to a pointer to a tuple with + * the format of pg_version relation. + * ---------------- + */ +typedef FormData_pg_version *VersionTupleForm; + +/* ---------------- + * compiler constants for pg_version + * ---------------- + */ +#define Natts_pg_version 3 +#define Anum_pg_version_verrelid 1 +#define Anum_pg_version_verbaseid 2 +#define Anum_pg_version_vertime 3 + + +#endif /* PG_VERSION_H */ diff --git a/src/backend/catalog/unused_oids b/src/backend/catalog/unused_oids new file mode 100644 index 00000000000..9608204f495 --- /dev/null +++ b/src/backend/catalog/unused_oids @@ -0,0 +1,41 @@ +#!/bin/sh +# unused_oids +# +# $Header: /cvsroot/pgsql/src/backend/catalog/Attic/unused_oids,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ +# +# finds blocks of oids that have not already been claimed by +# post_hackers for internal purposes. primarily useful for +# finding valid oids for new internal function oids. the numbers +# printed are inclusive ranges of valid (unused) oids. +# +# before using a large empty block, make sure you aren't about +# to take over what was intended as expansion space for something +# else. also, before using a number, do a "grepsrc" to make sure +# that someone isn't using a literal numeric constant somewhere.. +# +# non-berkeley post_hackers should probably not try to use oids +# less than the highest one that comes with the distributed source. +# +# run this script in src/backend/catalog. +# +egrep '^DATA' pg_*.h | \ + sed -e 's/^.*OID[^=]*=[^0-9]*//' -e 's/[^0-9].*$//' | \ + sort -n | \ + uniq | \ + awk ' +BEGIN { + last = 0; +} +/^[0-9]/ { + if ($1 > last + 1) { + if ($1 > last + 2) { + print last + 1, "-", $1 - 1; + } else { + print last + 1; + } + } + last = $1; +} +END { + print last + 1, "-"; +}' diff --git a/src/backend/commands/Makefile.inc b/src/backend/commands/Makefile.inc new file mode 100644 index 00000000000..d05052dfccd --- /dev/null +++ b/src/backend/commands/Makefile.inc @@ -0,0 +1,25 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the commands module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/commands/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:18 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:=$(VPATH):$(CURDIR)/commands + + +SRCS_COMMANDS= async.c creatinh.c command.c copy.c defind.c define.c \ + purge.c remove.c rename.c vacuum.c version.c view.c cluster.c \ + recipe.c explain.c + +HEADERS+= async.h command.h copy.h creatinh.h defrem.h purge.h \ + rename.h vacuum.h version.h view.h cluster.h \ + recipe.h + + diff --git a/src/backend/commands/_deadcode/version.c b/src/backend/commands/_deadcode/version.c new file mode 100644 index 00000000000..6dd311cee7e --- /dev/null +++ b/src/backend/commands/_deadcode/version.c @@ -0,0 +1,336 @@ +/*------------------------------------------------------------------------- + * + * version.c-- + * This file contains all the rules that govern all version semantics. + * + * Copyright (c) 1994, Regents of the University of California + * + * The version stuff has not been tested under postgres95 and probably doesn't + * work! - jolly 8/19/95 + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/_deadcode/Attic/version.c,v 1.1.1.1 1996/07/09 06:21:23 scrappy Exp $ + * + * NOTES + * At the point the version is defined, 2 physical relations are created + * _added and _deleted. + * + * In addition, 4 rules are defined which govern the semantics of versions + * w.r.t retrieves, appends, replaces and deletes. + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" + +#include "utils/rel.h" +#include "access/heapam.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "nodes/pg_list.h" +#include "commands/version.h" +#include "access/xact.h" /* for GetCurrentXactStartTime */ +#include "tcop/tcopprot.h" + +#define MAX_QUERY_LEN 1024 + +char rule_buf[MAX_QUERY_LEN]; +static char attr_list[MAX_QUERY_LEN]; + +static void setAttrList(char *bname); + +/* + * problem: the version system assumes that the rules it declares will + * be fired in the order of declaration, it also assumes + * goh's silly instead semantics. Unfortunately, it is a pain + * to make the version system work with the new semantics. + * However the whole problem can be solved, and some nice + * functionality can be achieved if we get multiple action rules + * to work. So thats what I did -- glass + * + * Well, at least they've been working for about 20 minutes. + * + * So any comments in this code about 1 rule per transction are false...:) + * + */ + +/* + * This is needed because the rule system only allows + * *1* rule to be defined per transaction. + * + * NOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOOO + * OOOOOOOOOOOOOOOOOOO!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * + * DONT DO THAT!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + * + * If you commit the current Xact all the palloced memory GOES AWAY + * and could be re-palloced in the new Xact and the whole hell breaks + * loose and poor people like me spend 2 hours of their live chassing + * a strange memory bug instead of watching the "Get Smart" marathon + * in NICK ! + * DO NOT COMMIT THE XACT, just increase the Cid counter! + * _sp. + */ +static void +eval_as_new_xact(char *query) +{ + /* WARNING! do not uncomment the following lines WARNING! + * CommitTransactionCommand(); + * StartTransactionCommand(); + */ + CommandCounterIncrement(); + pg_eval(query, (char **) NULL, (Oid *) NULL, 0); +} + +/* + * Define a version. + */ +void +DefineVersion(char *name, char *fromRelname, char *date) +{ + char *bname; + static char saved_basename[512]; + static char saved_snapshot[512]; + + if (date == NULL) { + /* no time ranges */ + bname = fromRelname; + (void) strcpy(saved_basename, (char *) bname); + *saved_snapshot = (char)NULL; + } else { + /* version is a snapshot */ + bname = fromRelname; + (void) strcpy(saved_basename, (char *) bname); + sprintf(saved_snapshot, "['%s']", date); + } + + + /* + * Calls the routine ``GetAttrList'' get the list of attributes + * from the base relation. + * Code is put here so that we only need to look up the attribute once for + * both appends and replaces. + */ + setAttrList(bname); + + VersionCreate (name, saved_basename); + VersionAppend (name, saved_basename); + VersionDelete (name, saved_basename,saved_snapshot); + VersionReplace (name, saved_basename,saved_snapshot); + VersionRetrieve (name, saved_basename, saved_snapshot); +} + + +/* + * Creates the deltas. + */ +void +VersionCreate(char *vname, char *bname) +{ + static char query_buf [MAX_QUERY_LEN]; + + /* + * Creating the dummy version relation for triggering rules. + */ + sprintf(query_buf, "SELECT * INTO TABLE %s from %s where 1 =2", + vname, bname); + + pg_eval (query_buf, (char **) NULL, (Oid *) NULL, 0); + + /* + * Creating the ``v_added'' relation + */ + sprintf (query_buf, "SELECT * INTO TABLE %s_added from %s where 1 = 2", + vname, bname); + eval_as_new_xact (query_buf); + + /* + * Creating the ``v_deleted'' relation. + */ + sprintf (query_buf, "CREATE TABLE %s_del (DOID oid)", vname); + eval_as_new_xact (query_buf); +} + + +/* + * Given the relation name, does a catalog lookup for that relation and + * sets the global variable 'attr_list' with the list of attributes (names) + * for that relation. + */ +static void +setAttrList(char *bname) +{ + Relation rdesc; + int i = 0; + int maxattrs = 0; + char *attrname; + char temp_buf[512]; + int notfirst = 0; + + rdesc = heap_openr(bname); + if (rdesc == NULL ) { + elog(WARN,"Unable to expand all -- amopenr failed "); + return; + } + maxattrs = RelationGetNumberOfAttributes(rdesc); + + attr_list[0] = '\0'; + + for ( i = maxattrs-1 ; i > -1 ; --i ) { + attrname = (rdesc->rd_att->attrs[i]->attname).data; + + if (notfirst == 1) { + sprintf(temp_buf, ", %s = new.%s", attrname, attrname); + } else { + sprintf(temp_buf, "%s = new.%s", attrname, attrname); + notfirst = 1; + } + strcat(attr_list, temp_buf); + } + + heap_close(rdesc); + + return; +} + +/* + * This routine defines the rule governing the append semantics of + * versions. All tuples appended to a version gets appended to the + * _added relation. + */ +void +VersionAppend(char *vname, char *bname) +{ + sprintf(rule_buf, + "define rewrite rule %s_append is on INSERT to %s do instead append %s_added(%s)", + vname, vname, vname, attr_list); + + eval_as_new_xact(rule_buf); +} + + +/* + * This routine defines the rule governing the retrieval semantics of + * versions. To retrieve tuples from a version , we need to: + * + * 1. Retrieve all tuples in the _added relation. + * 2. Retrieve all tuples in the base relation which are not in + * the _del relation. + */ +void +VersionRetrieve(char *vname, char *bname, char *snapshot) +{ + + sprintf(rule_buf, + "define rewrite rule %s_retrieve is on SELECT to %s do instead\n\ +SELECT %s_1.oid, %s_1.* from _%s in %s%s, %s_1 in (%s_added | _%s) \ +where _%s.oid !!= '%s_del.DOID'", + vname, vname, vname, vname, bname, + bname, snapshot, + vname, vname, bname, bname, vname); + + eval_as_new_xact(rule_buf); + + /* printf("%s\n",rule_buf); */ + +} + +/* + * This routine defines the rules that govern the delete semantics of + * versions. Two things happens when we delete a tuple from a version: + * + * 1. If the tuple to be deleted was added to the version *after* + * the version was created, then we simply delete the tuple + * from the _added relation. + * 2. If the tuple to be deleted is actually in the base relation, + * then we have to mark that tuple as being deleted by adding + * it to the _del relation. + */ +void +VersionDelete(char *vname, char *bname, char *snapshot) +{ + + sprintf(rule_buf, + "define rewrite rule %s_delete1 is on delete to %s do instead\n \ +[delete %s_added where current.oid = %s_added.oid\n \ + append %s_del(DOID = current.oid) from _%s in %s%s \ + where current.oid = _%s.oid] \n", + vname,vname,vname,vname,vname, +bname,bname,snapshot,bname); + + eval_as_new_xact(rule_buf); +#ifdef OLD_REWRITE + sprintf(rule_buf, + "define rewrite rule %s_delete2 is on delete to %s do instead \n \ + append %s_del(DOID = current.oid) from _%s in %s%s \ + where current.oid = _%s.oid \n", + vname,vname,vname,bname,bname,snapshot,bname); + + eval_as_new_xact(rule_buf); +#endif /* OLD_REWRITE */ +} + +/* + * This routine defines the rules that govern the update semantics + * of versions. To update a tuple in a version: + * + * 1. If the tuple is in _added, we simply ``replace'' + * the tuple (as per postgres style). + * 2. if the tuple is in the base relation, then two things have to + * happen: + * 2.1 The tuple is marked ``deleted'' from the base relation by + * adding the tuple to the _del relation. + * 2.2 A copy of the tuple is appended to the _added relation + */ +void +VersionReplace(char *vname, char *bname, char *snapshot) +{ + sprintf(rule_buf, + "define rewrite rule %s_replace1 is on replace to %s do instead \n\ +[replace %s_added(%s) where current.oid = %s_added.oid \n\ + append %s_del(DOID = current.oid) from _%s in %s%s \ + where current.oid = _%s.oid\n\ + append %s_added(%s) from _%s in %s%s \ + where current.oid !!= '%s_added.oid' and current.oid = _%s.oid]\n", + vname,vname,vname,attr_list,vname, + vname,bname,bname,snapshot,bname, +vname,attr_list,bname,bname,snapshot,vname,bname); + + eval_as_new_xact(rule_buf); + +/* printf("%s\n",rule_buf); */ +#ifdef OLD_REWRITE + sprintf(rule_buf, + "define rewrite rule %s_replace2 is on replace to %s do \n\ + append %s_del(DOID = current.oid) from _%s in %s%s \ + where current.oid = _%s.oid\n", + vname,vname,vname,bname,bname,snapshot,bname); + + eval_as_new_xact(rule_buf); + + sprintf(rule_buf, + "define rewrite rule %s_replace3 is on replace to %s do instead\n\ + append %s_added(%s) from _%s in %s%s \ + where current.oid !!= '%s_added.oid' and current.oid = \ + _%s.oid\n", + vname,vname, vname,attr_list,bname,bname,snapshot,vname,bname); + + eval_as_new_xact(rule_buf); +#endif /* OLD_REWRITE */ +/* printf("%s\n",rule_buf); */ + +} + diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c new file mode 100644 index 00000000000..2d3064fa472 --- /dev/null +++ b/src/backend/commands/async.c @@ -0,0 +1,605 @@ +/*------------------------------------------------------------------------- + * + * async.c-- + * Asynchronous notification + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* New Async Notification Model: + * 1. Multiple backends on same machine. Multiple backends listening on + * one relation. + * + * 2. One of the backend does a 'notify '. For all backends that + * are listening to this relation (all notifications take place at the + * end of commit), + * 2.a If the process is the same as the backend process that issued + * notification (we are notifying something that we are listening), + * signal the corresponding frontend over the comm channel using the + * out-of-band channel. + * 2.b For all other listening processes, we send kill(2) to wake up + * the listening backend. + * 3. Upon receiving a kill(2) signal from another backend process notifying + * that one of the relation that we are listening is being notified, + * we can be in either of two following states: + * 3.a We are sleeping, wake up and signal our frontend. + * 3.b We are in middle of another transaction, wait until the end of + * of the current transaction and signal our frontend. + * 4. Each frontend receives this notification and prcesses accordingly. + * + * -- jw, 12/28/93 + * + */ +/* + * The following is the old model which does not work. + */ +/* + * Model is: + * 1. Multiple backends on same machine. + * + * 2. Query on one backend sends stuff over an asynchronous portal by + * appending to a relation, and then doing an async. notification + * (which takes place after commit) to all listeners on this relation. + * + * 3. Async. notification results in all backends listening on relation + * to be woken up, by a process signal kill(2), with name of relation + * passed in shared memory. + * + * 4. Each backend notifies its respective frontend over the comm + * channel using the out-of-band channel. + * + * 5. Each frontend receives this notification and processes accordingly. + * + * #4,#5 are changing soon with pending rewrite of portal/protocol. + * + */ + +#include +#include +#include +#include "postgres.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/builtins.h" +#include "utils/tqual.h" +#include "access/xact.h" + +#include "commands/async.h" +#include "commands/copy.h" +#include "storage/buf.h" +#include "storage/itemptr.h" +#include "miscadmin.h" +#include "utils/portal.h" +#include "utils/excid.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "nodes/pg_list.h" +#include "tcop/dest.h" +#include "commands/command.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "catalog/pg_listener.h" + +#include "executor/execdefs.h" +/* #include "executor/execdesc.h"*/ + +#include "storage/bufmgr.h" +#include "lib/dllist.h" +#include "libpq/libpq.h" + + +static int notifyFrontEndPending = 0; +static int notifyIssued = 0; +static Dllist *pendingNotifies = NULL; + + +static int AsyncExistsPendingNotify(char *); +static void ClearPendingNotify(void); + +/* + *-------------------------------------------------------------- + * Async_NotifyHandler -- + * + * This is the signal handler for SIGUSR2. When the backend + * is signaled, the backend can be in two states. + * 1. If the backend is in the middle of another transaction, + * we set the flag, notifyFrontEndPending, and wait until + * the end of the transaction to notify the front end. + * 2. If the backend is not in the middle of another transaction, + * we notify the front end immediately. + * + * -- jw, 12/28/93 + * Results: + * none + * + * Side effects: + * none + */ +void +#if defined(PORTNAME_linux) +Async_NotifyHandler(int i) +#else +Async_NotifyHandler() +#endif +{ + extern TransactionState CurrentTransactionState; + + if ((CurrentTransactionState->state == TRANS_DEFAULT) && + (CurrentTransactionState->blockState == TRANS_DEFAULT)) { + + elog(DEBUG, "Waking up sleeping backend process"); + Async_NotifyFrontEnd(); + + }else { + elog(DEBUG, "Process is in the middle of another transaction, state = %d, block state = %d", + CurrentTransactionState->state, + CurrentTransactionState->blockState); + notifyFrontEndPending = 1; + } +} + +/* + *-------------------------------------------------------------- + * Async_Notify -- + * + * Adds the relation to the list of pending notifies. + * All notification happens at end of commit. + * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + * + * All notification of backend processes happens here, + * then each backend notifies its corresponding front end at + * the end of commit. + * + * This correspond to 'notify ' command + * -- jw, 12/28/93 + * + * Results: + * XXX + * + * Side effects: + * All tuples for relname in pg_listener are updated. + * + *-------------------------------------------------------------- + */ +void +Async_Notify(char *relname) +{ + + HeapTuple lTuple, rTuple; + Relation lRel; + HeapScanDesc sRel; + TupleDesc tdesc; + ScanKeyData key; + Buffer b; + Datum d, value[3]; + bool isnull; + char repl[3], nulls[3]; + + char *notifyName; + + elog(DEBUG,"Async_Notify: %s",relname); + + if (!pendingNotifies) + pendingNotifies = DLNewList(); + + notifyName = pstrdup(relname); + DLAddHead(pendingNotifies, DLNewElem(notifyName)); + + ScanKeyEntryInitialize(&key, 0, + Anum_pg_listener_relname, + NameEqualRegProcedure, + PointerGetDatum(notifyName)); + + lRel = heap_openr(ListenerRelationName); + tdesc = RelationGetTupleDescriptor(lRel); + sRel = heap_beginscan(lRel, 0, NowTimeQual, 1, &key); + + nulls[0] = nulls[1] = nulls[2] = ' '; + repl[0] = repl[1] = repl[2] = ' '; + repl[Anum_pg_listener_notify - 1] = 'r'; + value[0] = value[1] = value[2] = (Datum) 0; + value[Anum_pg_listener_notify - 1] = Int32GetDatum(1); + + while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0, &b))) { + d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_notify, + tdesc, &isnull); + if (!DatumGetInt32(d)) { + rTuple = heap_modifytuple(lTuple, b, lRel, value, nulls, repl); + (void) heap_replace(lRel, &lTuple->t_ctid, rTuple); + } + ReleaseBuffer(b); + } + heap_endscan(sRel); + heap_close(lRel); + notifyIssued = 1; +} + +/* + *-------------------------------------------------------------- + * Async_NotifyAtCommit -- + * + * Signal our corresponding frontend process on relations that + * were notified. Signal all other backend process that + * are listening also. + * + * -- jw, 12/28/93 + * + * Results: + * XXX + * + * Side effects: + * Tuples in pg_listener that has our listenerpid are updated so + * that the notification is 0. We do not want to notify frontend + * more than once. + * + * -- jw, 12/28/93 + * + *-------------------------------------------------------------- + */ +void +Async_NotifyAtCommit() +{ + HeapTuple lTuple; + Relation lRel; + HeapScanDesc sRel; + TupleDesc tdesc; + ScanKeyData key; + Datum d; + int ourpid; + bool isnull; + Buffer b; + extern TransactionState CurrentTransactionState; + + if (!pendingNotifies) + pendingNotifies = DLNewList(); + + if ((CurrentTransactionState->state == TRANS_DEFAULT) && + (CurrentTransactionState->blockState == TRANS_DEFAULT)) { + + if (notifyIssued) { /* 'notify ' issued by us */ + notifyIssued = 0; + StartTransactionCommand(); + elog(DEBUG, "Async_NotifyAtCommit."); + ScanKeyEntryInitialize(&key, 0, + Anum_pg_listener_notify, + Integer32EqualRegProcedure, + Int32GetDatum(1)); + lRel = heap_openr(ListenerRelationName); + sRel = heap_beginscan(lRel, 0, NowTimeQual, 1, &key); + tdesc = RelationGetTupleDescriptor(lRel); + ourpid = getpid(); + + while (HeapTupleIsValid(lTuple = heap_getnext(sRel,0, &b))) { + d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_relname, + tdesc, &isnull); + + if (AsyncExistsPendingNotify((char *) DatumGetPointer(d))) { + d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_pid, + tdesc, &isnull); + + if (ourpid == DatumGetInt32(d)) { + elog(DEBUG, "Notifying self, setting notifyFronEndPending to 1"); + notifyFrontEndPending = 1; + } else { + elog(DEBUG, "Notifying others"); +#ifndef WIN32 + if (kill(DatumGetInt32(d), SIGUSR2) < 0) { + if (errno == ESRCH) { + heap_delete(lRel, &lTuple->t_ctid); + } + } +#endif /* WIN32 */ + } + } + ReleaseBuffer(b); + } + CommitTransactionCommand(); + ClearPendingNotify(); + } + + if (notifyFrontEndPending) { /* we need to notify the frontend of + all pending notifies. */ + notifyFrontEndPending = 1; + Async_NotifyFrontEnd(); + } + } +} + +/* + *-------------------------------------------------------------- + * Async_NotifyAtAbort -- + * + * Gets rid of pending notifies. List elements are automatically + * freed through memory context. + * + * + * Results: + * XXX + * + * Side effects: + * XXX + * + *-------------------------------------------------------------- + */ +void +Async_NotifyAtAbort() +{ + extern TransactionState CurrentTransactionState; + + if (notifyIssued) { + ClearPendingNotify(); + } + notifyIssued = 0; + if (pendingNotifies) + DLFreeList(pendingNotifies); + pendingNotifies = DLNewList(); + + if ((CurrentTransactionState->state == TRANS_DEFAULT) && + (CurrentTransactionState->blockState == TRANS_DEFAULT)) { + if (notifyFrontEndPending) { /* don't forget to notify front end */ + Async_NotifyFrontEnd(); + } + } +} + +/* + *-------------------------------------------------------------- + * Async_Listen -- + * + * Register a backend (identified by its Unix PID) as listening + * on the specified relation. + * + * This corresponds to the 'listen ' command in SQL + * + * One listener per relation, pg_listener relation is keyed + * on (relname,pid) to provide multiple listeners in future. + * + * Results: + * pg_listeners is updated. + * + * Side effects: + * XXX + * + *-------------------------------------------------------------- + */ +void +Async_Listen(char *relname, int pid) +{ + Datum values[Natts_pg_listener]; + char nulls[Natts_pg_listener]; + TupleDesc tdesc; + HeapScanDesc s; + HeapTuple htup,tup; + Relation lDesc; + Buffer b; + Datum d; + int i; + bool isnull; + int alreadyListener = 0; + int ourPid = getpid(); + char *relnamei; + TupleDesc tupDesc; + + elog(DEBUG,"Async_Listen: %s",relname); + for (i = 0 ; i < Natts_pg_listener; i++) { + nulls[i] = ' '; + values[i] = PointerGetDatum(NULL); + } + + i = 0; + values[i++] = (Datum) relname; + values[i++] = (Datum) pid; + values[i++] = (Datum) 0; /* no notifies pending */ + + lDesc = heap_openr(ListenerRelationName); + + /* is someone already listening. One listener per relation */ + tdesc = RelationGetTupleDescriptor(lDesc); + s = heap_beginscan(lDesc,0,NowTimeQual,0,(ScanKey)NULL); + while (HeapTupleIsValid(htup = heap_getnext(s,0,&b))) { + d = (Datum) heap_getattr(htup,b,Anum_pg_listener_relname,tdesc, + &isnull); + relnamei = DatumGetPointer(d); + if (!strncmp(relnamei,relname, NAMEDATALEN)) { + d = (Datum) heap_getattr(htup,b,Anum_pg_listener_pid,tdesc,&isnull); + pid = DatumGetInt32(d); + if (pid == ourPid) { + alreadyListener = 1; + } + } + ReleaseBuffer(b); + } + heap_endscan(s); + + if (alreadyListener) { + elog(NOTICE, "Async_Listen: We are already listening on %s", + relname); + return; + } + + tupDesc = lDesc->rd_att; + tup = heap_formtuple(tupDesc, + values, + nulls); + heap_insert(lDesc, tup); + + pfree(tup); + /* if (alreadyListener) { + elog(NOTICE,"Async_Listen: already one listener on %s (possibly dead)",relname); + }*/ + heap_close(lDesc); + + /* + * now that we are listening, we should make a note to ourselves + * to unlisten prior to dying. + */ + relnamei = malloc(NAMEDATALEN); /* persists to process exit */ + memset (relnamei, 0, NAMEDATALEN); + strncpy(relnamei, relname, NAMEDATALEN); + on_exitpg(Async_UnlistenOnExit, (caddr_t) relnamei); +} + +/* + *-------------------------------------------------------------- + * Async_Unlisten -- + * + * Remove the backend from the list of listening backends + * for the specified relation. + * + * This would correspond to the 'unlisten ' + * command, but there isn't one yet. + * + * Results: + * pg_listeners is updated. + * + * Side effects: + * XXX + * + *-------------------------------------------------------------- + */ +void +Async_Unlisten(char *relname, int pid) +{ + Relation lDesc; + HeapTuple lTuple; + + lTuple = SearchSysCacheTuple(LISTENREL, PointerGetDatum(relname), + Int32GetDatum(pid), + 0,0); + lDesc = heap_openr(ListenerRelationName); + if (lTuple != NULL) { + heap_delete(lDesc,&lTuple->t_ctid); + } + heap_close(lDesc); +} + +void +Async_UnlistenOnExit(int code, /* from exitpg */ + char *relname) +{ + Async_Unlisten((char *) relname, getpid()); +} + +/* + * -------------------------------------------------------------- + * Async_NotifyFrontEnd -- + * + * Perform an asynchronous notification to front end over + * portal comm channel. The name of the relation which contains the + * data is sent to the front end. + * + * We remove the notification flag from the pg_listener tuple + * associated with our process. + * + * Results: + * XXX + * + * Side effects: + * + * We make use of the out-of-band channel to transmit the + * notification to the front end. The actual data transfer takes + * place at the front end's request. + * + * -------------------------------------------------------------- + */ +GlobalMemory notifyContext = NULL; + +void +Async_NotifyFrontEnd() +{ + extern CommandDest whereToSendOutput; + HeapTuple lTuple, rTuple; + Relation lRel; + HeapScanDesc sRel; + TupleDesc tdesc; + ScanKeyData key[2]; + Datum d, value[3]; + char repl[3], nulls[3]; + Buffer b; + int ourpid; + bool isnull; + + notifyFrontEndPending = 0; + + elog(DEBUG, "Async_NotifyFrontEnd: notifying front end."); + + StartTransactionCommand(); + ourpid = getpid(); + ScanKeyEntryInitialize(&key[0], 0, + Anum_pg_listener_notify, + Integer32EqualRegProcedure, + Int32GetDatum(1)); + ScanKeyEntryInitialize(&key[1], 0, + Anum_pg_listener_pid, + Integer32EqualRegProcedure, + Int32GetDatum(ourpid)); + lRel = heap_openr(ListenerRelationName); + tdesc = RelationGetTupleDescriptor(lRel); + sRel = heap_beginscan(lRel, 0, NowTimeQual, 2, key); + + nulls[0] = nulls[1] = nulls[2] = ' '; + repl[0] = repl[1] = repl[2] = ' '; + repl[Anum_pg_listener_notify - 1] = 'r'; + value[0] = value[1] = value[2] = (Datum) 0; + value[Anum_pg_listener_notify - 1] = Int32GetDatum(0); + + while (HeapTupleIsValid(lTuple = heap_getnext(sRel, 0,&b))) { + d = (Datum) heap_getattr(lTuple, b, Anum_pg_listener_relname, + tdesc, &isnull); + rTuple = heap_modifytuple(lTuple, b, lRel, value, nulls, repl); + (void) heap_replace(lRel, &lTuple->t_ctid, rTuple); + + /* notifying the front end */ + + if (whereToSendOutput == Remote) { + pq_putnchar("A", 1); + pq_putint(ourpid, 4); + pq_putstr(DatumGetName(d)->data); + pq_flush(); + } else { + elog(NOTICE, "Async_NotifyFrontEnd: no asynchronous notification to frontend on interactive sessions"); + } + ReleaseBuffer(b); + } + CommitTransactionCommand(); +} + +static int +AsyncExistsPendingNotify(char *relname) +{ + Dlelem* p; + for (p = DLGetHead(pendingNotifies); + p != NULL; + p = DLGetSucc(p)) { + if (!strcmp(DLE_VAL(p), relname)) + return 1; + } + + return 0; +} + +static void +ClearPendingNotify() +{ + Dlelem* p; + while ( (p = DLRemHead(pendingNotifies)) != NULL) + free(DLE_VAL(p)); +} + diff --git a/src/backend/commands/async.h b/src/backend/commands/async.h new file mode 100644 index 00000000000..65e4bd69d55 --- /dev/null +++ b/src/backend/commands/async.h @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * async.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: async.h,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef ASYNC_H +#define ASYNC_H + +#include "nodes/memnodes.h" + +#if defined(PORTNAME_linux) +extern void Async_NotifyHandler(int); +#else +extern void Async_NotifyHandler(void); +#endif +extern void Async_Notify(char *relname); +extern void Async_NotifyAtCommit(void); +extern void Async_NotifyAtAbort(void); +extern void Async_Listen(char *relname, int pid); +extern void Async_Unlisten(char *relname, int pid); +extern void Async_UnlistenOnExit(int code, char *relname); + +extern GlobalMemory notifyContext; +extern void Async_NotifyFrontEnd(void); + +#endif /* ASYNC_H */ diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c new file mode 100644 index 00000000000..8400832d6ac --- /dev/null +++ b/src/backend/commands/cluster.c @@ -0,0 +1,370 @@ +/*------------------------------------------------------------------------- + * + * cluster.c-- + * Paul Brown's implementation of cluster index. + * + * I am going to use the rename function as a model for this in the + * parser and executor, and the vacuum code as an example in this + * file. As I go - in contrast to the rest of postgres - there will + * be BUCKETS of comments. This is to allow reviewers to understand + * my (probably bogus) assumptions about the way this works. + * [pbrown '94] + * + * Copyright (c) 1994-5, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/cluster.c,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include + +#include "postgres.h" + +#include "nodes/pg_list.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/genam.h" +#include "access/htup.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "access/xact.h" +#include "utils/tqual.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/index.h" +#include "catalog/indexing.h" +#include "catalog/pg_type.h" + +#include "commands/copy.h" +#include "commands/cluster.h" +#include "commands/rename.h" + +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "storage/itemptr.h" + +#include "miscadmin.h" +#include "tcop/dest.h" +#include "commands/command.h" + +#include "utils/builtins.h" +#include "utils/excid.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "catalog/pg_attribute.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_class.h" + +#include "optimizer/internal.h" + +#ifndef NO_SECURITY +#include "utils/acl.h" +#include "utils/syscache.h" +#endif /* !NO_SECURITY */ + +/* + * cluster + * + * Check that the relation is a relation in the appropriate user + * ACL. I will use the same security that limits users on the + * renamerel() function. + * + * Check that the index specified is appropriate for the task + * ( ie it's an index over this relation ). This is trickier. + * + * Create a list of all the other indicies on this relation. Because + * the cluster will wreck all the tids, I'll need to destroy bogus + * indicies. The user will have to re-create them. Not nice, but + * I'm not a nice guy. The alternative is to try some kind of post + * destroy re-build. This may be possible. I'll check out what the + * index create functiond want in the way of paramaters. On the other + * hand, re-creating n indicies may blow out the space. + * + * Create new (temporary) relations for the base heap and the new + * index. + * + * Exclusively lock the relations. + * + * Create new clustered index and base heap relation. + * + */ +void +cluster(char oldrelname[], char oldindexname[]) +{ + Oid OIDOldHeap, OIDOldIndex, OIDNewHeap; + + Relation OldHeap, OldIndex; + Relation NewHeap; + + char *NewIndexName; + char *szNewHeapName; + + /* + * + * I'm going to force all checking back into the commands.c function. + * + * Get the list if indicies for this relation. If the index we want + * is among them, do not add it to the 'kill' list, as it will be + * handled by the 'clean up' code which commits this transaction. + * + * I'm not using the SysCache, because this will happen but + * once, and the slow way is the sure way in this case. + * + */ + /* + * Like vacuum, cluster spans transactions, so I'm going to handle it in + * the same way. + */ + + /* matches the StartTransaction in PostgresMain() */ + + OldHeap = heap_openr(oldrelname); + if (!RelationIsValid(OldHeap)) { + elog(WARN, "cluster: unknown relation: \"%-.*s\"", + NAMEDATALEN, oldrelname); + } + OIDOldHeap = OldHeap->rd_id; /* Get OID for the index scan */ + + OldIndex=index_openr(oldindexname);/* Open old index relation */ + if (!RelationIsValid(OldIndex)) { + elog(WARN, "cluster: unknown index: \"%-.*s\"", + NAMEDATALEN, oldindexname); + } + OIDOldIndex = OldIndex->rd_id; /* OID for the index scan */ + + heap_close(OldHeap); + index_close(OldIndex); + + /* + * I need to build the copies of the heap and the index. The Commit() + * between here is *very* bogus. If someone is appending stuff, they will + * get the lock after being blocked and add rows which won't be present in + * the new table. Bleagh! I'd be best to try and ensure that no-one's + * in the tables for the entire duration of this process with a pg_vlock. + */ + NewHeap = copy_heap(OIDOldHeap); + OIDNewHeap = NewHeap->rd_id; + szNewHeapName = pstrdup(NewHeap->rd_rel->relname.data); + + /* Need to do this to make the new heap visible. */ + CommandCounterIncrement(); + + rebuildheap(OIDNewHeap, OIDOldHeap, OIDOldIndex); + + /* Need to do this to make the new heap visible. */ + CommandCounterIncrement(); + + /* can't be found in the SysCache. */ + copy_index(OIDOldIndex, OIDNewHeap); /* No contention with the old */ + + /* + * make this really happen. Flush all the buffers. + */ + CommitTransactionCommand(); + StartTransactionCommand(); + + /* + * Questionable bit here. Because the renamerel destroys all trace of the + * pre-existing relation, I'm going to Destroy old, and then rename new + * to old. If this fails, it fails, and you lose your old. Tough - say + * I. Have good backups! + */ + + /* + Here lies the bogosity. The RelationNameGetRelation returns a bad + list of TupleDescriptors. Damn. Can't work out why this is. + */ + + heap_destroy(oldrelname); /* AAAAAAAAGH!! */ + + CommandCounterIncrement(); + + /* + * The Commit flushes all palloced memory, so I have to grab the + * New stuff again. This is annoying, but oh heck! + */ +/* + renamerel(szNewHeapName.data, oldrelname); + TypeRename(&szNewHeapName, &szOldRelName); + + sprintf(NewIndexName.data, "temp_%x", OIDOldIndex); + renamerel(NewIndexName.data, szOldIndexName.data); +*/ + NewIndexName = palloc(NAMEDATALEN+1); /* XXX */ + sprintf(NewIndexName, "temp_%x", OIDOldIndex); + renamerel(NewIndexName, oldindexname); +} + +Relation +copy_heap(Oid OIDOldHeap) +{ + char NewName[NAMEDATALEN]; + TupleDesc OldHeapDesc, tupdesc; + Oid OIDNewHeap; + Relation NewHeap, OldHeap; + + /* + * Create a new heap relation with a temporary name, which has the + * same tuple description as the old one. + */ + sprintf(NewName,"temp_%x", OIDOldHeap); + + OldHeap= heap_open(OIDOldHeap); + OldHeapDesc= RelationGetTupleDescriptor(OldHeap); + + /* + * Need to make a copy of the tuple descriptor, heap_create modifies + * it. + */ + + tupdesc = CreateTupleDescCopy(OldHeapDesc); + + OIDNewHeap=heap_create(NewName, + NULL, + OldHeap->rd_rel->relarch, + OldHeap->rd_rel->relsmgr, + tupdesc); + + if (!OidIsValid(OIDNewHeap)) + elog(WARN,"clusterheap: cannot create temporary heap relation\n"); + + NewHeap=heap_open(OIDNewHeap); + + heap_close(NewHeap); + heap_close(OldHeap); + + return NewHeap; +} + +void +copy_index(Oid OIDOldIndex, Oid OIDNewHeap) +{ + Relation OldIndex, NewHeap; + HeapTuple Old_pg_index_Tuple, Old_pg_index_relation_Tuple, pg_proc_Tuple; + IndexTupleForm Old_pg_index_Form; + Form_pg_class Old_pg_index_relation_Form; + Form_pg_proc pg_proc_Form; + char *NewIndexName; + AttrNumber *attnumP; + int natts; + FuncIndexInfo * finfo; + + NewHeap = heap_open(OIDNewHeap); + OldIndex = index_open(OIDOldIndex); + + /* + * OK. Create a new (temporary) index for the one that's already + * here. To do this I get the info from pg_index, re-build the + * FunctInfo if I have to, and add a new index with a temporary + * name. + */ + Old_pg_index_Tuple = + SearchSysCacheTuple(INDEXRELID, + ObjectIdGetDatum(OldIndex->rd_id), + 0,0,0); + + Assert(Old_pg_index_Tuple); + Old_pg_index_Form = (IndexTupleForm)GETSTRUCT(Old_pg_index_Tuple); + + Old_pg_index_relation_Tuple = + SearchSysCacheTuple(RELOID, + ObjectIdGetDatum(OldIndex->rd_id), + 0,0,0); + + Assert(Old_pg_index_relation_Tuple); + Old_pg_index_relation_Form = + (Form_pg_class)GETSTRUCT(Old_pg_index_relation_Tuple); + + NewIndexName = palloc(NAMEDATALEN+1); /* XXX */ + sprintf(NewIndexName, "temp_%x", OIDOldIndex); /* Set the name. */ + + /* + * Ugly as it is, the only way I have of working out the number of + * attribues is to count them. Mostly there'll be just one but + * I've got to be sure. + */ + for (attnumP = &(Old_pg_index_Form->indkey[0]), natts = 0; + *attnumP != InvalidAttrNumber; + attnumP++, natts++); + + /* + * If this is a functional index, I need to rebuild the functional + * component to pass it to the defining procedure. + */ + if (Old_pg_index_Form->indproc != InvalidOid) { + FIgetnArgs(finfo) = natts; + FIgetProcOid(finfo) = Old_pg_index_Form->indproc; + + pg_proc_Tuple = + SearchSysCacheTuple(PROOID, + ObjectIdGetDatum(Old_pg_index_Form->indproc), + 0,0,0); + + Assert(pg_proc_Tuple); + pg_proc_Form = (Form_pg_proc)GETSTRUCT(pg_proc_Tuple); + namecpy(&(finfo->funcName), &(pg_proc_Form->proname)); + } else { + finfo = (FuncIndexInfo *) NULL; + natts = 1; + } + + index_create((NewHeap->rd_rel->relname).data, + NewIndexName, + finfo, + Old_pg_index_relation_Form->relam, + natts, + Old_pg_index_Form->indkey, + Old_pg_index_Form->indclass, + (uint16)0, (Datum) NULL, NULL); + + heap_close(OldIndex); + heap_close(NewHeap); +} + + +void +rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex) +{ + Relation LocalNewHeap, LocalOldHeap, LocalOldIndex; + IndexScanDesc ScanDesc; + RetrieveIndexResult ScanResult; + ItemPointer HeapTid; + HeapTuple LocalHeapTuple; + Buffer LocalBuffer; + Oid OIDNewHeapInsert; + + /* + * Open the relations I need. Scan through the OldHeap on the OldIndex and + * insert each tuple into the NewHeap. + */ + LocalNewHeap=(Relation)heap_open(OIDNewHeap); + LocalOldHeap=(Relation)heap_open(OIDOldHeap); + LocalOldIndex=(Relation)index_open(OIDOldIndex); + + ScanDesc=index_beginscan(LocalOldIndex, false, 0, (ScanKey) NULL); + + while ((ScanResult = + index_getnext(ScanDesc, ForwardScanDirection)) != NULL) { + + HeapTid = &ScanResult->heap_iptr; + LocalHeapTuple = heap_fetch(LocalOldHeap, 0, HeapTid, &LocalBuffer); + OIDNewHeapInsert = + heap_insert(LocalNewHeap, LocalHeapTuple); + pfree(ScanResult); + ReleaseBuffer(LocalBuffer); + } + + index_close(LocalOldIndex); + heap_close(LocalOldHeap); + heap_close(LocalNewHeap); +} + diff --git a/src/backend/commands/cluster.h b/src/backend/commands/cluster.h new file mode 100644 index 00000000000..2194e13f9a8 --- /dev/null +++ b/src/backend/commands/cluster.h @@ -0,0 +1,30 @@ +/*------------------------------------------------------------------------- + * + * cluster.h-- + * header file for postgres cluster command stuff + * + * Copyright (c) 1994-5, Regents of the University of California + * + * $Id: cluster.h,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef CLUSTER_H +#define CLUSTER_H + +/* + * defines for contant stuff + */ +#define _TEMP_RELATION_KEY_ "clXXXXXXXX" +#define _SIZE_OF_TEMP_RELATION_KEY_ 11 + + +/* + * functions + */ +extern void cluster(char oldrelname[], char oldindexname[]); +extern Relation copy_heap(Oid OIDOldHeap); +extern void copy_index(Oid OIDOldIndex, Oid OIDNewHeap); +extern void rebuildheap(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex); + +#endif /* CLUSTER_H */ diff --git a/src/backend/commands/command.c b/src/backend/commands/command.c new file mode 100644 index 00000000000..4283b594d59 --- /dev/null +++ b/src/backend/commands/command.c @@ -0,0 +1,511 @@ +/*------------------------------------------------------------------------- + * + * command.c-- + * random postgres portal and utility support code + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + * NOTES + * The PortalExecutorHeapMemory crap needs to be eliminated + * by designing a better executor / portal processing memory + * interface. + * + * The PerformAddAttribute() code, like most of the relation + * manipulating code in the commands/ directory, should go + * someplace closer to the lib/catalog code. + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/builtins.h" +#include "utils/tqual.h" + +#include "commands/copy.h" + +#include "storage/buf.h" +#include "storage/itemptr.h" + +#include "miscadmin.h" + +#include "utils/portal.h" +#include "utils/excid.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "tcop/dest.h" +#include "commands/command.h" + +#include "catalog/catalog.h" +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/pg_attribute.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_class.h" +#include "catalog/pg_type.h" +#include "catalog/indexing.h" + +#include "executor/executor.h" +#include "executor/execdefs.h" +#include "executor/execdesc.h" + +#include "optimizer/internal.h" +#include "optimizer/prep.h" /* for find_all_inheritors */ + + +#ifndef NO_SECURITY +#include "miscadmin.h" +#include "utils/acl.h" +#include "utils/syscache.h" +#endif /* !NO_SECURITY */ + +/* ---------------- + * PortalExecutorHeapMemory stuff + * + * This is where the XXXSuperDuperHacky code was. -cim 3/15/90 + * ---------------- + */ +MemoryContext PortalExecutorHeapMemory = NULL; + +/* -------------------------------- + * PortalCleanup + * -------------------------------- + */ +void +PortalCleanup(Portal portal) +{ + MemoryContext context; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(PortalIsValid(portal)); + AssertArg(portal->cleanup == PortalCleanup); + + /* ---------------- + * set proper portal-executor context before calling ExecMain. + * ---------------- + */ + context = MemoryContextSwitchTo((MemoryContext) PortalGetHeapMemory(portal)); + PortalExecutorHeapMemory = (MemoryContext) + PortalGetHeapMemory(portal); + + /* ---------------- + * tell the executor to shutdown the query + * ---------------- + */ + ExecutorEnd(PortalGetQueryDesc(portal), PortalGetState(portal)); + + /* ---------------- + * switch back to previous context + * ---------------- + */ + (void) MemoryContextSwitchTo(context); + PortalExecutorHeapMemory = (MemoryContext) NULL; +} + +/* -------------------------------- + * PerformPortalFetch + * -------------------------------- + */ +void +PerformPortalFetch(char *name, + bool forward, + int count, + char *tag, + CommandDest dest) +{ + Portal portal; + int feature; + QueryDesc *queryDesc; + MemoryContext context; + + /* ---------------- + * sanity checks + * ---------------- + */ + if (name == NULL) { + elog(NOTICE, "PerformPortalFetch: blank portal unsupported"); + return; + } + + /* ---------------- + * get the portal from the portal name + * ---------------- + */ + portal = GetPortalByName(name); + if (! PortalIsValid(portal)) { + elog(NOTICE, "PerformPortalFetch: portal \"%-.*s\" not found", + NAMEDATALEN, name); + return; + } + + /* ---------------- + * switch into the portal context + * ---------------- + */ + context= MemoryContextSwitchTo((MemoryContext)PortalGetHeapMemory(portal)); + + AssertState(context == + (MemoryContext)PortalGetHeapMemory(GetPortalByName(NULL))); + + /* ---------------- + * setup "feature" to tell the executor what direction and + * how many tuples to fetch. + * ---------------- + */ + if (forward) + feature = EXEC_FOR; + else + feature = EXEC_BACK; + + /* ---------------- + * tell the destination to prepare to recieve some tuples + * ---------------- + */ + queryDesc = PortalGetQueryDesc(portal); + BeginCommand(name, + queryDesc->operation, + portal->attinfo,/* QueryDescGetTypeInfo(queryDesc), */ + false, /* portal fetches don't end up in relations */ + false, /* this is a portal fetch, not a "retrieve portal" */ + tag, + dest); + + /* ---------------- + * execute the portal fetch operation + * ---------------- + */ + PortalExecutorHeapMemory = (MemoryContext) + PortalGetHeapMemory(portal); + + ExecutorRun(queryDesc, PortalGetState(portal), feature, count); + + /* ---------------- + * Note: the "end-of-command" tag is returned by higher-level + * utility code + * + * Return blank portal for now. + * Otherwise, this named portal will be cleaned. + * Note: portals will only be supported within a BEGIN...END + * block in the near future. Later, someone will fix it to + * do what is possible across transaction boundries. + * ---------------- + */ + (void) MemoryContextSwitchTo( + (MemoryContext)PortalGetHeapMemory(GetPortalByName(NULL))); +} + +/* -------------------------------- + * PerformPortalClose + * -------------------------------- + */ +void +PerformPortalClose(char *name, CommandDest dest) +{ + Portal portal; + + /* ---------------- + * sanity checks + * ---------------- + */ + if (name == NULL) { + elog(NOTICE, "PerformPortalClose: blank portal unsupported"); + return; + } + + /* ---------------- + * get the portal from the portal name + * ---------------- + */ + portal = GetPortalByName(name); + if (! PortalIsValid(portal)) { + elog(NOTICE, "PerformPortalClose: portal \"%-.*s\" not found", + NAMEDATALEN, name); + return; + } + + /* ---------------- + * Note: PortalCleanup is called as a side-effect + * ---------------- + */ + PortalDestroy(&portal); +} + +/* ---------------- + * PerformAddAttribute + * + * adds an additional attribute to a relation + * + * Adds attribute field(s) to a relation. Each new attribute + * is given attnums in sequential order and is added to the + * ATTRIBUTE relation. If the AMI fails, defunct tuples will + * remain in the ATTRIBUTE relation for later vacuuming. + * Later, there may be some reserved attribute names??? + * + * (If needed, can instead use elog to handle exceptions.) + * + * Note: + * Initial idea of ordering the tuple attributes so that all + * the variable length domains occured last was scratched. Doing + * so would not speed access too much (in general) and would create + * many complications in formtuple, amgetattr, and addattribute. + * + * scan attribute catalog for name conflict (within rel) + * scan type catalog for absence of data type (if not arg) + * create attnum magically??? + * create attribute tuple + * insert attribute in attribute catalog + * modify reldesc + * create new relation tuple + * insert new relation in relation catalog + * delete original relation from relation catalog + * ---------------- + */ +void +PerformAddAttribute(char *relationName, + char *userName, + bool inherits, + ColumnDef *colDef) +{ + Relation relrdesc, attrdesc; + HeapScanDesc attsdesc; + HeapTuple reltup; + HeapTuple attributeTuple; + AttributeTupleForm attribute; + FormData_pg_attribute attributeD; + int i; + int minattnum, maxatts; + HeapTuple tup; + ScanKeyData key[2]; + ItemPointerData oldTID; + Relation idescs[Num_pg_attr_indices]; + Relation ridescs[Num_pg_class_indices]; + bool hasindex; + + /* + * permissions checking. this would normally be done in utility.c, + * but this particular routine is recursive. + * + * normally, only the owner of a class can change its schema. + */ + if (IsSystemRelationName(relationName)) + elog(WARN, "PerformAddAttribute: class \"%-.*s\" is a system catalog", + NAMEDATALEN, relationName); +#ifndef NO_SECURITY + if (!pg_ownercheck(userName, relationName, RELNAME)) + elog(WARN, "PerformAddAttribute: you do not own class \"%s\"", + relationName); +#endif + + /* + * if the first element in the 'schema' list is a "*" then we are + * supposed to add this attribute to all classes that inherit from + * 'relationName' (as well as to 'relationName'). + * + * any permissions or problems with duplicate attributes will cause + * the whole transaction to abort, which is what we want -- all or + * nothing. + */ + if (colDef != NULL) { + if (inherits) { + Oid myrelid, childrelid; + List *child, *children; + + relrdesc = heap_openr(relationName); + if (!RelationIsValid(relrdesc)) { + elog(WARN, "PerformAddAttribute: unknown relation: \"%-.*s\"", + NAMEDATALEN, relationName); + } + myrelid = relrdesc->rd_id; + heap_close(relrdesc); + + /* this routine is actually in the planner */ + children = find_all_inheritors(lconsi(myrelid,NIL), NIL); + + /* + * find_all_inheritors does the recursive search of the + * inheritance hierarchy, so all we have to do is process + * all of the relids in the list that it returns. + */ + foreach (child, children) { + childrelid = lfirsti(child); + if (childrelid == myrelid) + continue; + relrdesc = heap_open(childrelid); + if (!RelationIsValid(relrdesc)) { + elog(WARN, "PerformAddAttribute: can't find catalog entry for inheriting class with oid %d", + childrelid); + } + PerformAddAttribute((relrdesc->rd_rel->relname).data, + userName, false, colDef); + heap_close(relrdesc); + } + } + } + + relrdesc = heap_openr(RelationRelationName); + reltup = ClassNameIndexScan(relrdesc, relationName); + + if (!PointerIsValid(reltup)) { + heap_close(relrdesc); + elog(WARN, "PerformAddAttribute: relation \"%s\" not found", + relationName); + } + /* + * XXX is the following check sufficient? + */ + if (((Form_pg_class) GETSTRUCT(reltup))->relkind == RELKIND_INDEX) { + elog(WARN, "PerformAddAttribute: index relation \"%s\" not changed", + relationName); + return; + } + + minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts; + maxatts = minattnum + 1; + if (maxatts > MaxHeapAttributeNumber) { + pfree(reltup); /* XXX temp */ + heap_close(relrdesc); /* XXX temp */ + elog(WARN, "PerformAddAttribute: relations limited to %d attributes", + MaxHeapAttributeNumber); + return; + } + + attrdesc = heap_openr(AttributeRelationName); + + Assert(attrdesc); + Assert(RelationGetRelationTupleForm(attrdesc)); + + /* + * Open all (if any) pg_attribute indices + */ + hasindex = RelationGetRelationTupleForm(attrdesc)->relhasindex; + if (hasindex) + CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs); + + ScanKeyEntryInitialize(&key[0], + (bits16) NULL, + (AttrNumber) Anum_pg_attribute_attrelid, + (RegProcedure)ObjectIdEqualRegProcedure, + (Datum) reltup->t_oid); + + ScanKeyEntryInitialize(&key[1], + (bits16) NULL, + (AttrNumber) Anum_pg_attribute_attname, + (RegProcedure)NameEqualRegProcedure, + (Datum) NULL); + + attributeD.attrelid = reltup->t_oid; + attributeD.attdefrel = InvalidOid; /* XXX temporary */ + attributeD.attnvals = 0; /* XXX temporary */ + attributeD.atttyparg = InvalidOid; /* XXX temporary */ + attributeD.attbound = 0; /* XXX temporary */ + attributeD.attcanindex = 0; /* XXX need this info */ + attributeD.attproc = InvalidOid; /* XXX tempoirary */ + attributeD.attcacheoff = -1; + + attributeTuple = heap_addheader(Natts_pg_attribute, + sizeof attributeD, + (char *)&attributeD); + + attribute = (AttributeTupleForm)GETSTRUCT(attributeTuple); + + i = 1 + minattnum; + + { + HeapTuple typeTuple; + TypeTupleForm form; + char *p; + int attnelems; + + /* + * XXX use syscache here as an optimization + */ + key[1].sk_argument = (Datum)colDef->colname; + attsdesc = heap_beginscan(attrdesc, 0, NowTimeQual, 2, key); + + + tup = heap_getnext(attsdesc, 0, (Buffer *) NULL); + if (HeapTupleIsValid(tup)) { + pfree(reltup); /* XXX temp */ + heap_endscan(attsdesc); /* XXX temp */ + heap_close(attrdesc); /* XXX temp */ + heap_close(relrdesc); /* XXX temp */ + elog(WARN, "PerformAddAttribute: attribute \"%s\" already exists in class \"%s\"", + key[1].sk_argument, + relationName); + return; + } + heap_endscan(attsdesc); + + /* + * check to see if it is an array attribute. + */ + + p = colDef->typename->name; + + if (colDef->typename->arrayBounds) + { + attnelems = length(colDef->typename->arrayBounds); + p = makeArrayTypeName(colDef->typename->name); + } + else + attnelems = 0; + + typeTuple = SearchSysCacheTuple(TYPNAME, + PointerGetDatum(p), + 0,0,0); + form = (TypeTupleForm)GETSTRUCT(typeTuple); + + if (!HeapTupleIsValid(typeTuple)) { + elog(WARN, "Add: type \"%s\" nonexistant", p); + } + namestrcpy(&(attribute->attname), (char*) key[1].sk_argument); + attribute->atttypid = typeTuple->t_oid; + attribute->attlen = form->typlen; + attribute->attnum = i; + attribute->attbyval = form->typbyval; + attribute->attnelems = attnelems; + attribute->attcacheoff = -1; + attribute->attisset = (bool) (form->typtype == 'c'); + attribute->attalign = form->typalign; + + heap_insert(attrdesc, attributeTuple); + if (hasindex) + CatalogIndexInsert(idescs, + Num_pg_attr_indices, + attrdesc, + attributeTuple); + } + + if (hasindex) + CatalogCloseIndices(Num_pg_attr_indices, idescs); + heap_close(attrdesc); + + ((Form_pg_class) GETSTRUCT(reltup))->relnatts = maxatts; + oldTID = reltup->t_ctid; + (void) heap_replace(relrdesc, &oldTID, reltup); + + /* keep catalog indices current */ + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, ridescs); + CatalogIndexInsert(ridescs, Num_pg_class_indices, relrdesc, reltup); + CatalogCloseIndices(Num_pg_class_indices, ridescs); + + pfree(reltup); + heap_close(relrdesc); +} diff --git a/src/backend/commands/command.h b/src/backend/commands/command.h new file mode 100644 index 00000000000..266c6b4be14 --- /dev/null +++ b/src/backend/commands/command.h @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * command.h-- + * prototypes for command.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: command.h,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef COMMAND_H +#define COMMAND_H + +#include "utils/portal.h" +#include "tcop/dest.h" + +extern MemoryContext PortalExecutorHeapMemory; + +/* + * PortalCleanup -- + * Cleans up the query state of the portal. + * + * Exceptions: + * BadArg if portal invalid. + */ +extern void PortalCleanup(Portal portal); + + +/* + * PerformPortalFetch -- + * Performs the POSTQUEL function FETCH. Fetches count (or all if 0) + * tuples in portal with name in the forward direction iff goForward. + * + * Exceptions: + * BadArg if forward invalid. + * "WARN" if portal not found. + */ +extern void PerformPortalFetch(char *name, bool forward, int count, + char *tag, CommandDest dest); + +/* + * PerformPortalClose -- + * Performs the POSTQUEL function CLOSE. + */ +extern void PerformPortalClose(char *name, CommandDest dest); + +/* + * PerformAddAttribute -- + * Performs the POSTQUEL function ADD. + */ +extern void PerformAddAttribute(char *relationName, char *userName, + bool inh, ColumnDef *colDef); + +#endif /* COMMAND_H */ diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c new file mode 100644 index 00000000000..7e10818abfc --- /dev/null +++ b/src/backend/commands/copy.c @@ -0,0 +1,782 @@ +/*------------------------------------------------------------------------- + * + * copy.c-- + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/copy.c,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include /* for mode_t */ +#include /* for umask(2) prototype */ + +#include "postgres.h" +#include "miscadmin.h" +#include "utils/builtins.h" +#include "utils/syscache.h" +#include "catalog/pg_type.h" +#include "catalog/pg_index.h" +#include "catalog/index.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "access/itup.h" +#include "access/relscan.h" +#include "access/funcindex.h" +#include "access/tupdesc.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" +#include "nodes/pg_list.h" +#include "executor/tuptable.h" +#include "executor/executor.h" +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "fmgr.h" +#include "machine.h" + +/* + * New copy code. + * + * This code "knows" the following about tuples: + * + */ + +static bool reading_from_input = false; + +/* non-export function prototypes */ +static void CopyTo(Relation rel, bool binary, FILE *fp, char *delim); +static void CopyFrom(Relation rel, bool binary, FILE *fp, char *delim); +static Oid GetOutputFunction(Oid type); +static Oid GetTypeElement(Oid type); +static Oid GetInputFunction(Oid type); +static Oid IsTypeByVal(Oid type); +static void GetIndexRelations(Oid main_relation_oid, + int *n_indices, + Relation **index_rels); +static char *CopyReadAttribute(int attno, FILE *fp, bool *isnull, char *delim); +static void CopyAttributeOut(FILE *fp, char *string, char *delim); +static int CountTuples(Relation relation); + +extern FILE *Pfout, *Pfin; + +void +DoCopy(char *relname, bool binary, bool from, bool pipe, char *filename, + char *delim) +{ + FILE *fp; + Relation rel; + reading_from_input = pipe; + + rel = heap_openr(relname); + if (rel == NULL) elog(WARN, "Copy: class %s does not exist.", relname); + + if (from) { + if (pipe && IsUnderPostmaster) ReceiveCopyBegin(); + if (IsUnderPostmaster) { + fp = pipe ? Pfin : fopen(filename, "r"); + }else { + fp = pipe ? stdin : fopen(filename, "r"); + } + if (fp == NULL) { + elog(WARN, "COPY: file %s could not be open for reading", filename); + } + CopyFrom(rel, binary, fp, delim); + }else { + + mode_t oumask = umask((mode_t) 0); + + if (pipe && IsUnderPostmaster) SendCopyBegin(); + if (IsUnderPostmaster) { + fp = pipe ? Pfout : fopen(filename, "w"); + + }else { + fp = pipe ? stdout : fopen(filename, "w"); + } + (void) umask(oumask); + if (fp == NULL) { + elog(WARN, "COPY: file %s could not be open for writing", filename); + } + CopyTo(rel, binary, fp, delim); + } + if (!pipe) { + fclose(fp); + }else if (!from && !binary) { + fputs(".\n", fp); + if (IsUnderPostmaster) fflush(Pfout); + } +} + +static void +CopyTo(Relation rel, bool binary, FILE *fp, char *delim) +{ + HeapTuple tuple; + HeapScanDesc scandesc; + + int32 attr_count, i; + AttributeTupleForm *attr; + func_ptr *out_functions; + int dummy; + Oid out_func_oid; + Oid *elements; + Datum value; + bool isnull = (bool) true; + char *nulls; + char *string; + int32 ntuples; + TupleDesc tupDesc; + + scandesc = heap_beginscan(rel, 0, NULL, 0, NULL); + + attr_count = rel->rd_att->natts; + attr = rel->rd_att->attrs; + tupDesc = rel->rd_att; + + if (!binary) { + out_functions = (func_ptr *) + palloc(attr_count * sizeof(func_ptr)); + elements = (Oid *) palloc(attr_count * sizeof(Oid)); + for (i = 0; i < attr_count; i++) { + out_func_oid = (Oid) GetOutputFunction(attr[i]->atttypid); + fmgr_info(out_func_oid, &out_functions[i], &dummy); + elements[i] = GetTypeElement(attr[i]->atttypid); + } + }else { + nulls = (char *) palloc(attr_count); + for (i = 0; i < attr_count; i++) nulls[i] = ' '; + + /* XXX expensive */ + + ntuples = CountTuples(rel); + fwrite(&ntuples, sizeof(int32), 1, fp); + } + + for (tuple = heap_getnext(scandesc, 0, NULL); + tuple != NULL; + tuple = heap_getnext(scandesc, 0, NULL)) { + + for (i = 0; i < attr_count; i++) { + value = (Datum) + heap_getattr(tuple, InvalidBuffer, i+1, tupDesc, &isnull); + if (!binary) { + if (!isnull) { + string = (char *) (out_functions[i]) (value, elements[i]); + CopyAttributeOut(fp, string, delim); + pfree(string); + } + if (i == attr_count - 1) { + fputc('\n', fp); + }else { + /* when copying out, only use the first char of the delim + string */ + fputc(delim[0], fp); + } + }else { + /* + * only interesting thing heap_getattr tells us in this case + * is if we have a null attribute or not. + */ + if (isnull) nulls[i] = 'n'; + } + } + + if (binary) { + int32 null_ct = 0, length; + + for (i = 0; i < attr_count; i++) { + if (nulls[i] == 'n') null_ct++; + } + + length = tuple->t_len - tuple->t_hoff; + fwrite(&length, sizeof(int32), 1, fp); + fwrite(&null_ct, sizeof(int32), 1, fp); + if (null_ct > 0) { + for (i = 0; i < attr_count; i++) { + if (nulls[i] == 'n') { + fwrite(&i, sizeof(int32), 1, fp); + nulls[i] = ' '; + } + } + } + fwrite((char *) tuple + tuple->t_hoff, length, 1, fp); + } + } + + heap_endscan(scandesc); + if (binary) { + pfree(nulls); + }else { + pfree(out_functions); + pfree(elements); + } + + heap_close(rel); +} + +static void +CopyFrom(Relation rel, bool binary, FILE *fp, char *delim) +{ + HeapTuple tuple; + IndexTuple ituple; + AttrNumber attr_count; + AttributeTupleForm *attr; + func_ptr *in_functions; + int i, dummy; + Oid in_func_oid; + Datum *values; + char *nulls, *index_nulls; + bool *byval; + bool isnull; + bool has_index; + int done = 0; + char *string, *ptr; + Relation *index_rels; + int32 len, null_ct, null_id; + int32 ntuples, tuples_read = 0; + bool reading_to_eof = true; + Oid *elements; + FuncIndexInfo *finfo, **finfoP; + TupleDesc *itupdescArr; + HeapTuple pgIndexTup; + IndexTupleForm *pgIndexP; + int *indexNatts; + char *predString; + Node **indexPred; + TupleDesc rtupdesc; + ExprContext *econtext; + TupleTable tupleTable; + TupleTableSlot *slot; + int natts; + AttrNumber *attnumP; + Datum idatum; + int n_indices; + InsertIndexResult indexRes; + TupleDesc tupDesc; + + tupDesc = RelationGetTupleDescriptor(rel); + attr = tupDesc->attrs; + attr_count = tupDesc->natts; + + has_index = false; + + /* + * This may be a scalar or a functional index. We initialize all + * kinds of arrays here to avoid doing extra work at every tuple + * copy. + */ + + if (rel->rd_rel->relhasindex) { + GetIndexRelations(rel->rd_id, &n_indices, &index_rels); + if (n_indices > 0) { + has_index = true; + itupdescArr = + (TupleDesc *)palloc(n_indices * sizeof(TupleDesc)); + pgIndexP = + (IndexTupleForm *)palloc(n_indices * sizeof(IndexTupleForm)); + indexNatts = (int *) palloc(n_indices * sizeof(int)); + finfo = (FuncIndexInfo *) palloc(n_indices * sizeof(FuncIndexInfo)); + finfoP = (FuncIndexInfo **) palloc(n_indices * sizeof(FuncIndexInfo *)); + indexPred = (Node **) palloc(n_indices * sizeof(Node*)); + econtext = NULL; + for (i = 0; i < n_indices; i++) { + itupdescArr[i] = RelationGetTupleDescriptor(index_rels[i]); + pgIndexTup = + SearchSysCacheTuple(INDEXRELID, + ObjectIdGetDatum(index_rels[i]->rd_id), + 0,0,0); + Assert(pgIndexTup); + pgIndexP[i] = (IndexTupleForm)GETSTRUCT(pgIndexTup); + for (attnumP = &(pgIndexP[i]->indkey[0]), natts = 0; + *attnumP != InvalidAttrNumber; + attnumP++, natts++); + if (pgIndexP[i]->indproc != InvalidOid) { + FIgetnArgs(&finfo[i]) = natts; + natts = 1; + FIgetProcOid(&finfo[i]) = pgIndexP[i]->indproc; + *(FIgetname(&finfo[i])) = '\0'; + finfoP[i] = &finfo[i]; + } else + finfoP[i] = (FuncIndexInfo *) NULL; + indexNatts[i] = natts; + if (VARSIZE(&pgIndexP[i]->indpred) != 0) { + predString = fmgr(F_TEXTOUT, &pgIndexP[i]->indpred); + indexPred[i] = stringToNode(predString); + pfree(predString); + /* make dummy ExprContext for use by ExecQual */ + if (econtext == NULL) { +#ifndef OMIT_PARTIAL_INDEX + tupleTable = ExecCreateTupleTable(1); + slot = ExecAllocTableSlot(tupleTable); + econtext = makeNode(ExprContext); + econtext->ecxt_scantuple = slot; + rtupdesc = RelationGetTupleDescriptor(rel); + slot->ttc_tupleDescriptor = rtupdesc; + /* + * There's no buffer associated with heap tuples here, + * so I set the slot's buffer to NULL. Currently, it + * appears that the only way a buffer could be needed + * would be if the partial index predicate referred to + * the "lock" system attribute. If it did, then + * heap_getattr would call HeapTupleGetRuleLock, which + * uses the buffer's descriptor to get the relation id. + * Rather than try to fix this, I'll just disallow + * partial indexes on "lock", which wouldn't be useful + * anyway. --Nels, Nov '92 + */ + /* SetSlotBuffer(slot, (Buffer) NULL); */ + /* SetSlotShouldFree(slot, false); */ + slot->ttc_buffer = (Buffer)NULL; + slot->ttc_shouldFree = false; +#endif /* OMIT_PARTIAL_INDEX */ + } + } else { + indexPred[i] = NULL; + } + } + } + } + + if (!binary) + { + in_functions = (func_ptr *) palloc(attr_count * sizeof(func_ptr)); + elements = (Oid *) palloc(attr_count * sizeof(Oid)); + for (i = 0; i < attr_count; i++) + { + in_func_oid = (Oid) GetInputFunction(attr[i]->atttypid); + fmgr_info(in_func_oid, &in_functions[i], &dummy); + elements[i] = GetTypeElement(attr[i]->atttypid); + } + } + else + { + fread(&ntuples, sizeof(int32), 1, fp); + if (ntuples != 0) reading_to_eof = false; + } + + values = (Datum *) palloc(sizeof(Datum) * attr_count); + nulls = (char *) palloc(attr_count); + index_nulls = (char *) palloc(attr_count); + byval = (bool *) palloc(attr_count * sizeof(bool)); + + for (i = 0; i < attr_count; i++) { + nulls[i] = ' '; + index_nulls[i] = ' '; + byval[i] = (bool) IsTypeByVal(attr[i]->atttypid); + } + + while (!done) { + if (!binary) { + for (i = 0; i < attr_count && !done; i++) { + string = CopyReadAttribute(i, fp, &isnull, delim); + if (isnull) { + values[i] = PointerGetDatum(NULL); + nulls[i] = 'n'; + }else if (string == NULL) { + done = 1; + }else { + values[i] = + (Datum)(in_functions[i])(string, + elements[i], + attr[i]->attlen); + /* + * Sanity check - by reference attributes cannot return + * NULL + */ + if (!PointerIsValid(values[i]) && + !(rel->rd_att->attrs[i]->attbyval)) { + elog(WARN, "copy from: Bad file format"); + } + } + } + }else { /* binary */ + fread(&len, sizeof(int32), 1, fp); + if (feof(fp)) { + done = 1; + }else { + fread(&null_ct, sizeof(int32), 1, fp); + if (null_ct > 0) { + for (i = 0; i < null_ct; i++) { + fread(&null_id, sizeof(int32), 1, fp); + nulls[null_id] = 'n'; + } + } + + string = (char *) palloc(len); + fread(string, len, 1, fp); + + ptr = string; + + for (i = 0; i < attr_count; i++) { + if (byval[i] && nulls[i] != 'n') { + + switch(attr[i]->attlen) { + case sizeof(char): + values[i] = (Datum) *(unsigned char *) ptr; + ptr += sizeof(char); + break; + case sizeof(short): + ptr = (char *) SHORTALIGN(ptr); + values[i] = (Datum) *(unsigned short *) ptr; + ptr += sizeof(short); + break; + case sizeof(int32): + ptr = (char *) INTALIGN(ptr); + values[i] = (Datum) *(uint32 *) ptr; + ptr += sizeof(int32); + break; + default: + elog(WARN, "COPY BINARY: impossible size!"); + break; + } + }else if (nulls[i] != 'n') { + switch (attr[i]->attlen) { + case -1: + if (attr[i]->attalign == 'd') + ptr = (char *)DOUBLEALIGN(ptr); + else + ptr = (char *)INTALIGN(ptr); + values[i] = (Datum) ptr; + ptr += * (uint32 *) ptr; + break; + case sizeof(char): + values[i] = (Datum)ptr; + ptr += attr[i]->attlen; + break; + case sizeof(short): + ptr = (char*)SHORTALIGN(ptr); + values[i] = (Datum)ptr; + ptr += attr[i]->attlen; + break; + case sizeof(int32): + ptr = (char*)INTALIGN(ptr); + values[i] = (Datum)ptr; + ptr += attr[i]->attlen; + break; + default: + if (attr[i]->attalign == 'd') + ptr = (char *)DOUBLEALIGN(ptr); + else + ptr = (char *)LONGALIGN(ptr); + values[i] = (Datum) ptr; + ptr += attr[i]->attlen; + } + } + } + } + } + if (done) continue; + + tupDesc = CreateTupleDesc(attr_count, attr); + tuple = heap_formtuple(tupDesc, values, nulls); + heap_insert(rel, tuple); + + if (has_index) { + for (i = 0; i < n_indices; i++) { + if (indexPred[i] != NULL) { +#ifndef OMIT_PARTIAL_INDEX + /* if tuple doesn't satisfy predicate, + * don't update index + */ + slot->val = tuple; + /*SetSlotContents(slot, tuple); */ + if (ExecQual((List*)indexPred[i], econtext) == false) + continue; +#endif /* OMIT_PARTIAL_INDEX */ + } + FormIndexDatum(indexNatts[i], + (AttrNumber *)&(pgIndexP[i]->indkey[0]), + tuple, + tupDesc, + InvalidBuffer, + &idatum, + index_nulls, + finfoP[i]); + ituple = index_formtuple(itupdescArr[i], &idatum, index_nulls); + ituple->t_tid = tuple->t_ctid; + indexRes = index_insert(index_rels[i], ituple); + if (indexRes) pfree(indexRes); + pfree(ituple); + } + } + + if (binary) pfree(string); + + for (i = 0; i < attr_count; i++) { + if (!byval[i] && nulls[i] != 'n') { + if (!binary) pfree((void*)values[i]); + }else if (nulls[i] == 'n') { + nulls[i] = ' '; + } + } + + pfree(tuple); + tuples_read++; + + if (!reading_to_eof && ntuples == tuples_read) done = true; + } + pfree(values); + if (!binary) pfree(in_functions); + pfree(nulls); + pfree(byval); + heap_close(rel); +} + +static Oid +GetOutputFunction(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0,0,0); + + if (HeapTupleIsValid(typeTuple)) + return((int) ((TypeTupleForm) GETSTRUCT(typeTuple))->typoutput); + + elog(WARN, "GetOutputFunction: Cache lookup of type %d failed", type); + return(InvalidOid); +} + +static Oid +GetTypeElement(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0,0,0); + + + if (HeapTupleIsValid(typeTuple)) + return((int) ((TypeTupleForm) GETSTRUCT(typeTuple))->typelem); + + elog(WARN, "GetOutputFunction: Cache lookup of type %d failed", type); + return(InvalidOid); +} + +static Oid +GetInputFunction(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0,0,0); + + if (HeapTupleIsValid(typeTuple)) + return((int) ((TypeTupleForm) GETSTRUCT(typeTuple))->typinput); + + elog(WARN, "GetInputFunction: Cache lookup of type %d failed", type); + return(InvalidOid); +} + +static Oid +IsTypeByVal(Oid type) +{ + HeapTuple typeTuple; + + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type), + 0,0,0); + + if (HeapTupleIsValid(typeTuple)) + return((int) ((TypeTupleForm) GETSTRUCT(typeTuple))->typbyval); + + elog(WARN, "GetInputFunction: Cache lookup of type %d failed", type); + + return(InvalidOid); +} + +/* + * Given the OID of a relation, return an array of index relation descriptors + * and the number of index relations. These relation descriptors are open + * using heap_open(). + * + * Space for the array itself is palloc'ed. + */ + +typedef struct rel_list { + Oid index_rel_oid; + struct rel_list *next; +} RelationList; + +static void +GetIndexRelations(Oid main_relation_oid, + int *n_indices, + Relation **index_rels) +{ + RelationList *head, *scan; + Relation pg_index_rel; + HeapScanDesc scandesc; + Oid index_relation_oid; + HeapTuple tuple; + TupleDesc tupDesc; + int i; + bool isnull; + + pg_index_rel = heap_openr(IndexRelationName); + scandesc = heap_beginscan(pg_index_rel, 0, NULL, 0, NULL); + tupDesc = RelationGetTupleDescriptor(pg_index_rel); + + *n_indices = 0; + + head = (RelationList *) palloc(sizeof(RelationList)); + scan = head; + head->next = NULL; + + for (tuple = heap_getnext(scandesc, 0, NULL); + tuple != NULL; + tuple = heap_getnext(scandesc, 0, NULL)) { + + index_relation_oid = + (Oid) DatumGetInt32(heap_getattr(tuple, InvalidBuffer, 2, + tupDesc, &isnull)); + if (index_relation_oid == main_relation_oid) { + scan->index_rel_oid = + (Oid) DatumGetInt32(heap_getattr(tuple, InvalidBuffer, + Anum_pg_index_indexrelid, + tupDesc, &isnull)); + (*n_indices)++; + scan->next = (RelationList *) palloc(sizeof(RelationList)); + scan = scan->next; + } + } + + heap_endscan(scandesc); + heap_close(pg_index_rel); + + *index_rels = (Relation *) palloc(*n_indices * sizeof(Relation)); + + for (i = 0, scan = head; i < *n_indices; i++, scan = scan->next) { + (*index_rels)[i] = index_open(scan->index_rel_oid); + } + + for (i = 0, scan = head; i < *n_indices + 1; i++) { + scan = head->next; + pfree(head); + head = scan; + } +} + +#define EXT_ATTLEN 5*8192 + +/* + returns 1 is c is in s +*/ +static bool +inString(char c, char* s) +{ + int i; + + if (s) { + i = 0; + while (s[i] != '\0') { + if (s[i] == c) + return 1; + i++; + } + } + return 0; +} + +/* + * Reads input from fp until eof is seen. If we are reading from standard + * input, AND we see a dot on a line by itself (a dot followed immediately + * by a newline), we exit as if we saw eof. This is so that copy pipelines + * can be used as standard input. + */ + +static char * +CopyReadAttribute(int attno, FILE *fp, bool *isnull, char *delim) +{ + static char attribute[EXT_ATTLEN]; + char c; + int done = 0; + int i = 0; + + if (feof(fp)) { + *isnull = (bool) false; + return(NULL); + } + + while (!done) { + c = getc(fp); + + if (feof(fp)) { + *isnull = (bool) false; + return(NULL); + }else if (reading_from_input && attno == 0 && i == 0 && c == '.') { + attribute[0] = c; + c = getc(fp); + if (c == '\n') { + *isnull = (bool) false; + return(NULL); + }else if (inString(c,delim)) { + attribute[1] = 0; + *isnull = (bool) false; + return(&attribute[0]); + }else { + attribute[1] = c; + i = 2; + } + }else if (c == '\\') { + c = getc(fp); + }else if (inString(c,delim) || c == '\n') { + done = 1; + } + if (!done) attribute[i++] = c; + if (i == EXT_ATTLEN - 1) + elog(WARN, "CopyReadAttribute - attribute length too long"); + } + attribute[i] = '\0'; + if (i == 0) { + *isnull = (bool) true; + return(NULL); + }else { + *isnull = (bool) false; + return(&attribute[0]); + } +} + +static void +CopyAttributeOut(FILE *fp, char *string, char *delim) +{ + int i; + int len = strlen(string); + + for (i = 0; i < len; i++) { + if (string[i] == delim[0] || string[i] == '\n' || string[i] == '\\') { + fputc('\\', fp); + } + fputc(string[i], fp); + } +} + +/* + * Returns the number of tuples in a relation. Unfortunately, currently + * must do a scan of the entire relation to determine this. + * + * relation is expected to be an open relation descriptor. + */ +static int +CountTuples(Relation relation) +{ + HeapScanDesc scandesc; + HeapTuple tuple; + + int i; + + scandesc = heap_beginscan(relation, 0, NULL, 0, NULL); + + for (tuple = heap_getnext(scandesc, 0, NULL), i = 0; + tuple != NULL; + tuple = heap_getnext(scandesc, 0, NULL), i++) + ; + heap_endscan(scandesc); + return(i); +} diff --git a/src/backend/commands/copy.h b/src/backend/commands/copy.h new file mode 100644 index 00000000000..ccd29555626 --- /dev/null +++ b/src/backend/commands/copy.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * copy.h-- + * Definitions for using the POSTGRES copy command. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: copy.h,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef COPY_H +#define COPY_H + +#include "postgres.h" + +void DoCopy(char *relname, bool binary, bool from, bool pipe, char *filename, + char *delim); + +#endif /* COPY_H */ diff --git a/src/backend/commands/creatinh.c b/src/backend/commands/creatinh.c new file mode 100644 index 00000000000..a0e3a9f682b --- /dev/null +++ b/src/backend/commands/creatinh.c @@ -0,0 +1,564 @@ +/*------------------------------------------------------------------------- + * + * creatinh.c-- + * POSTGRES create/destroy relation with inheritance utility code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.1.1.1 1996/07/09 06:21:19 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include /* for sprintf() */ +#include +#include "postgres.h" + +#include "tcop/tcopdebug.h" + +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/execnodes.h" + +#include "utils/syscache.h" +#include "utils/relcache.h" +#include "catalog/catname.h" +#include "catalog/pg_type.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_ipl.h" +#include "parser/catalog_utils.h" + +#include "commands/creatinh.h" + +#include "access/tupdesc.h" +#include "access/heapam.h" +#include "access/xact.h" + +/* ---------------- + * local stuff + * ---------------- + */ + +static int checkAttrExists(char *attributeName, + char *attributeType, List *schema); +static List *MergeAttributes(List *schema, List *supers); +static void StoreCatalogInheritance(Oid relationId, List *supers); + +/* ---------------------------------------------------------------- + * DefineRelation -- + * Creates a new relation. + * ---------------------------------------------------------------- + */ +void +DefineRelation(CreateStmt *stmt) +{ + char *relname = stmt->relname; + List *schema = stmt->tableElts; + int numberOfAttributes; + Oid relationId; + char archChar; + List *inheritList = NULL; + char *archiveName = NULL; + TupleDesc descriptor; + int heaploc, archloc; + + char* typename = NULL; /* the typename of this relation. not useod for now */ + + if ( strlen(relname) > NAMEDATALEN) + elog(WARN, "the relation name %s is > %d characters long", relname, + NAMEDATALEN); + + /* ---------------- + * Handle parameters + * XXX parameter handling missing below. + * ---------------- + */ + inheritList = stmt->inhRelnames; + + /* ---------------- + * determine archive mode + * XXX use symbolic constants... + * ---------------- + */ + archChar = 'n'; + + switch (stmt->archiveType) { + case ARCH_NONE: + archChar = 'n'; + break; + case ARCH_LIGHT: + archChar = 'l'; + break; + case ARCH_HEAVY: + archChar = 'h'; + break; + default: + elog(WARN, "Botched archive mode %d, ignoring", + stmt->archiveType); + break; + } + + if (stmt->location == -1) + heaploc = 0; + else + heaploc = stmt->location; + + /* + * For now, any user-defined relation defaults to the magnetic + * disk storgage manager. --mao 2 july 91 + */ + if (stmt->archiveLoc == -1) { + archloc = 0; + } else { + if (archChar == 'n') { + elog(WARN, "Set archive location, but not mode, for %s", + relname); + } + archloc = stmt->archiveLoc; + } + + /* ---------------- + * generate relation schema, including inherited attributes. + * ---------------- + */ + schema = MergeAttributes(schema, inheritList); + + numberOfAttributes = length(schema); + if (numberOfAttributes <= 0) { + elog(WARN, "DefineRelation: %s", + "please inherit from a relation or define an attribute"); + } + + /* ---------------- + * create a relation descriptor from the relation schema + * and create the relation. + * ---------------- + */ + descriptor = BuildDescForRelation(schema, relname); + relationId = heap_create(relname, + typename, + archChar, + heaploc, + descriptor); + + StoreCatalogInheritance(relationId, inheritList); + + /* ---------------- + * create an archive relation if necessary + * ---------------- + */ + if (archChar != 'n') { + /* + * Need to create an archive relation for this heap relation. + * We cobble up the command by hand, and increment the command + * counter ourselves. + */ + + CommandCounterIncrement(); + archiveName = MakeArchiveName(relationId); + + relationId = heap_create(archiveName, + typename, + 'n', /* archive isn't archived */ + archloc, + descriptor); + + pfree(archiveName); + } +} + +/* + * RemoveRelation -- + * Deletes a new relation. + * + * Exceptions: + * BadArg if name is invalid. + * + * Note: + * If the relation has indices defined on it, then the index relations + * themselves will be destroyed, too. + */ +void +RemoveRelation(char *name) +{ + AssertArg(name); + heap_destroy(name); +} + + +/* + * MergeAttributes -- + * Returns new schema given initial schema and supers. + * + * + * 'schema' is the column/attribute definition for the table. (It's a list + * of ColumnDef's.) It is destructively changed. + * 'inheritList' is the list of inherited relations (a list of Value(str)'s). + * + * Notes: + * The order in which the attributes are inherited is very important. + * Intuitively, the inherited attributes should come first. If a table + * inherits from multiple parents, the order of those attributes are + * according to the order of the parents specified in CREATE TABLE. + * + * Here's an example: + * + * create table person (name text, age int4, location point); + * create table emp (salary int4, manager char16) inherits(person); + * create table student (gpa float8) inherits (person); + * create table stud_emp (percent int4) inherits (emp, student); + * + * the order of the attributes of stud_emp is as follow: + * + * + * person {1:name, 2:age, 3:location} + * / \ + * {6:gpa} student emp {4:salary, 5:manager} + * \ / + * stud_emp {7:percent} + */ +static List * +MergeAttributes(List *schema, List *supers) +{ + List *entry; + List *inhSchema = NIL; + + /* + * Validates that there are no duplications. + * Validity checking of types occurs later. + */ + foreach (entry, schema) { + List *rest; + ColumnDef *coldef = lfirst(entry); + + foreach (rest, lnext(entry)) { + /* + * check for duplicated relation names + */ + ColumnDef *restdef = lfirst(rest); + + if (!strcmp(coldef->colname, restdef->colname)) { + elog(WARN, "attribute \"%s\" duplicated", + coldef->colname); + } + } + } + foreach (entry, supers) { + List *rest; + + foreach (rest, lnext(entry)) { + if (!strcmp(strVal(lfirst(entry)), strVal(lfirst(rest)))) { + elog(WARN, "relation \"%s\" duplicated", + strVal(lfirst(entry))); + } + } + } + + /* + * merge the inherited attributes into the schema + */ + foreach (entry, supers) { + char *name = strVal(lfirst(entry)); + Relation relation; + List *partialResult = NIL; + AttrNumber attrno; + TupleDesc tupleDesc; + + relation = heap_openr(name); + if (relation==NULL) { + elog(WARN, + "MergeAttr: Can't inherit from non-existent superclass '%s'", + name); + } + tupleDesc = RelationGetTupleDescriptor(relation); + + for (attrno = relation->rd_rel->relnatts - 1; attrno >= 0; attrno--) { + AttributeTupleForm attribute = tupleDesc->attrs[attrno]; + char *attributeName; + char *attributeType; + HeapTuple tuple; + ColumnDef *def; + TypeName *typename; + + /* + * form name and type + */ + attributeName = (attribute->attname).data; + tuple = + SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(attribute->atttypid), + 0,0,0); + AssertState(HeapTupleIsValid(tuple)); + attributeType = + (((TypeTupleForm)GETSTRUCT(tuple))->typname).data; + /* + * check validity + * + */ + if (checkAttrExists(attributeName, attributeType, inhSchema) || + checkAttrExists(attributeName, attributeType, schema)) { + /* + * this entry already exists + */ + continue; + } + + /* + * add an entry to the schema + */ + def = makeNode(ColumnDef); + typename = makeNode(TypeName); + def->colname = pstrdup(attributeName); + typename->name = pstrdup(attributeType); + def->typename = typename; + partialResult = lcons(def, partialResult); + } + + /* + * iteration cleanup and result collection + */ + heap_close(relation); + + /* + * wants the inherited schema to appear in the order they are + * specified in CREATE TABLE + */ + inhSchema = nconc(inhSchema, partialResult); + } + + /* + * put the inherited schema before our the schema for this table + */ + schema = nconc(inhSchema, schema); + + return (schema); +} + +/* + * StoreCatalogInheritance -- + * Updates the system catalogs with proper inheritance information. + */ +static void +StoreCatalogInheritance(Oid relationId, List *supers) +{ + Relation relation; + TupleDesc desc; + int16 seqNumber; + List *entry; + List *idList; + HeapTuple tuple; + + /* ---------------- + * sanity checks + * ---------------- + */ + AssertArg(OidIsValid(relationId)); + + if (supers==NIL) + return; + + /* ---------------- + * Catalog INHERITS information. + * ---------------- + */ + relation = heap_openr( InheritsRelationName ); + desc = RelationGetTupleDescriptor(relation); + + seqNumber = 1; + idList = NIL; + foreach (entry, supers) { + Datum datum[ Natts_pg_inherits ]; + char nullarr[ Natts_pg_inherits ]; + + tuple = SearchSysCacheTuple(RELNAME, + PointerGetDatum(strVal(lfirst(entry))), + 0,0,0); + AssertArg(HeapTupleIsValid(tuple)); + + /* + * build idList for use below + */ + idList = lappendi(idList, tuple->t_oid); + + datum[0] = ObjectIdGetDatum(relationId); /* inhrel */ + datum[1] = ObjectIdGetDatum(tuple->t_oid); /* inhparent */ + datum[2] = Int16GetDatum(seqNumber); /* inhseqno */ + + nullarr[0] = ' '; + nullarr[1] = ' '; + nullarr[2] = ' '; + + tuple = heap_formtuple(desc,datum, nullarr); + + (void) heap_insert(relation, tuple); + pfree(tuple); + + seqNumber += 1; + } + + heap_close(relation); + + /* ---------------- + * Catalog IPL information. + * + * Algorithm: + * 0. list superclasses (by Oid) in order given (see idList). + * 1. append after each relationId, its superclasses, recursively. + * 3. remove all but last of duplicates. + * 4. store result. + * ---------------- + */ + + /* ---------------- + * 1. + * ---------------- + */ + foreach (entry, idList) { + HeapTuple tuple; + Oid id; + int16 number; + List *next; + List *current; + + id = (Oid)lfirsti(entry); + current = entry; + next = lnext(entry); + + for (number = 1; ; number += 1) { + tuple = SearchSysCacheTuple(INHRELID, + ObjectIdGetDatum(id), + Int16GetDatum(number), + 0,0); + + if (! HeapTupleIsValid(tuple)) + break; + + lnext(current) = + lconsi(((InheritsTupleForm) + GETSTRUCT(tuple))->inhparent, + NIL); + + current = lnext(current); + } + lnext(current) = next; + } + + /* ---------------- + * 2. + * ---------------- + */ + foreach (entry, idList) { + Oid name; + List *rest; + bool found = false; + + again: + name = lfirsti(entry); + foreach (rest, lnext(entry)) { + if (name == lfirsti(rest)) { + found = true; + break; + } + } + if (found) { + /* + * entry list must be of length >= 2 or else no match + * + * so, remove this entry. + */ + lfirst(entry) = lfirst(lnext(entry)); + lnext(entry) = lnext(lnext(entry)); + + found = false; + goto again; + } + } + + /* ---------------- + * 3. + * ---------------- + */ + relation = heap_openr( InheritancePrecidenceListRelationName ); + desc = RelationGetTupleDescriptor(relation); + + seqNumber = 1; + + foreach (entry, idList) { + Datum datum[ Natts_pg_ipl ]; + char nullarr[ Natts_pg_ipl ]; + + datum[0] = ObjectIdGetDatum(relationId); /* iplrel */ + datum[1] = ObjectIdGetDatum(lfirsti(entry)); + /*iplinherits*/ + datum[2] = Int16GetDatum(seqNumber); /* iplseqno */ + + nullarr[0] = ' '; + nullarr[1] = ' '; + nullarr[2] = ' '; + + tuple = heap_formtuple( desc, datum, nullarr); + + (void) heap_insert(relation, tuple); + pfree(tuple); + + seqNumber += 1; + } + + heap_close(relation); +} + +/* + * returns 1 if attribute already exists in schema, 0 otherwise. + */ +static int +checkAttrExists(char *attributeName, char *attributeType, List *schema) +{ + List *s; + + foreach (s, schema) { + ColumnDef *def = lfirst(s); + + if (!strcmp(attributeName, def->colname)) { + /* + * attribute exists. Make sure the types are the same. + */ + if (strcmp(attributeType, def->typename->name) != 0) { + elog(WARN, "%s and %s conflict for %s", + attributeType, def->typename->name, attributeName); + } + return 1; + } + } + return 0; +} + +/* + * MakeArchiveName + * make an archive rel name out of a regular rel name + * +* the CALLER is responsible for freeing the memory allocated + */ + +char* +MakeArchiveName(Oid relationId) +{ + char *arch; + + /* + * Archive relations are named a,XXXXX where XXXXX == the OID + * of the relation they archive. Create a string containing + * this name and find the reldesc for the archive relation. + */ + arch = palloc(NAMEDATALEN); + sprintf(arch, "a,%d",relationId); + + return arch; +} + diff --git a/src/backend/commands/creatinh.h b/src/backend/commands/creatinh.h new file mode 100644 index 00000000000..a86fd4ed82b --- /dev/null +++ b/src/backend/commands/creatinh.h @@ -0,0 +1,20 @@ +/*------------------------------------------------------------------------- + * + * creatinh.h-- + * prototypes for creatinh.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: creatinh.h,v 1.1.1.1 1996/07/09 06:21:20 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef CREATINH_H +#define CREATINH_H + +extern void DefineRelation(CreateStmt *stmt); +extern void RemoveRelation(char *name); +extern char* MakeArchiveName(Oid relid); + +#endif /* CREATINH_H */ diff --git a/src/backend/commands/defind.c b/src/backend/commands/defind.c new file mode 100644 index 00000000000..da797e23cbb --- /dev/null +++ b/src/backend/commands/defind.c @@ -0,0 +1,505 @@ +/*------------------------------------------------------------------------- + * + * defind.c-- + * POSTGRES define, extend and remove index code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/defind.c,v 1.1.1.1 1996/07/09 06:21:20 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "access/attnum.h" +#include "access/genam.h" +#include "access/heapam.h" +#include "access/htup.h" +#include "access/funcindex.h" +#include "utils/builtins.h" +#include "utils/syscache.h" +#include "catalog/index.h" +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/relcache.h" +#include "utils/lsyscache.h" + +#include "commands/defrem.h" +#include "parser/parsetree.h" /* for getrelid() */ + +#include "optimizer/prep.h" +#include "optimizer/clauses.h" +#include "storage/lmgr.h" + +#define IsFuncIndex(ATTR_LIST) (((IndexElem*)lfirst(ATTR_LIST))->args!=NULL) + +/* non-export function prototypes */ +static void CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid); +static void CheckPredExpr(Node *predicate, List *rangeTable, + Oid baseRelOid); +static void +CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid); +static void FuncIndexArgs(IndexElem *funcIndex, AttrNumber *attNumP, + Oid *argTypes, Oid *opOidP, Oid relId); +static void NormIndexAttrs(List *attList, AttrNumber *attNumP, + Oid *opOidP, Oid relId); + +/* + * DefineIndex -- + * Creates a new index. + * + * 'attributeList' is a list of IndexElem specifying either a functional + * index or a list of attributes to index on. + * 'parameterList' is a list of ParamString specified in the with clause. + * 'predicate' is the qual specified in the where clause. + * 'rangetable' is for the predicate + * + * Exceptions: + * XXX + */ +void +DefineIndex(char *heapRelationName, + char *indexRelationName, + char *accessMethodName, + List *attributeList, + List *parameterList, + Expr *predicate, + List *rangetable) +{ + Oid *classObjectId; + Oid accessMethodId; + Oid relationId; + int numberOfAttributes; + AttrNumber *attributeNumberA; + HeapTuple tuple; + uint16 parameterCount = 0; + Datum *parameterA = NULL; + FuncIndexInfo fInfo; + List *cnfPred = NULL; + + + /* + * Handle attributes + */ + numberOfAttributes = length(attributeList); + if (numberOfAttributes <= 0) { + elog(WARN, "DefineIndex: must specify at least one attribute"); + } + + /* + * compute heap relation id + */ + tuple = SearchSysCacheTuple(RELNAME, + PointerGetDatum(heapRelationName), + 0,0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "DefineIndex: %s relation not found", + heapRelationName); + } + relationId = tuple->t_oid; + + /* + * compute access method id + */ + tuple = SearchSysCacheTuple(AMNAME, PointerGetDatum(accessMethodName), + 0,0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "DefineIndex: %s access method not found", + accessMethodName); + } + accessMethodId = tuple->t_oid; + + + /* + * Handle parameters + * [param list is now different (NOT USED, really) - ay 10/94] + */ + + + /* + * Convert the partial-index predicate from parsetree form to plan + * form, so it can be readily evaluated during index creation. + * Note: "predicate" comes in as a list containing (1) the predicate + * itself (a where_clause), and (2) a corresponding range table. + * + * [(1) is 'predicate' and (2) is 'rangetable' now. - ay 10/94] + */ + if (predicate != NULL && rangetable != NIL) { + cnfPred = cnfify((Expr*)copyObject(predicate), true); + fix_opids(cnfPred); + CheckPredicate(cnfPred, rangetable, relationId); + } + + if (IsFuncIndex(attributeList)) { + IndexElem *funcIndex= lfirst(attributeList); + int nargs; + + nargs = length(funcIndex->args); + if (nargs > INDEX_MAX_KEYS) { + elog(WARN, + "Too many args to function, limit of %d", + INDEX_MAX_KEYS); + } + + FIsetnArgs(&fInfo,nargs); + + strcpy(FIgetname(&fInfo), funcIndex->name); + + attributeNumberA = + (AttrNumber *)palloc(nargs * sizeof attributeNumberA[0]); + + classObjectId = (Oid *)palloc(sizeof classObjectId[0]); + + + FuncIndexArgs(funcIndex, attributeNumberA, + &(FIgetArg(&fInfo, 0)), + classObjectId, relationId); + + index_create(heapRelationName, + indexRelationName, + &fInfo, accessMethodId, + numberOfAttributes, attributeNumberA, + classObjectId, parameterCount, parameterA, (Node*)cnfPred); + }else { + attributeNumberA = + (AttrNumber *)palloc(numberOfAttributes * + sizeof attributeNumberA[0]); + + classObjectId = + (Oid *)palloc(numberOfAttributes * sizeof classObjectId[0]); + + NormIndexAttrs(attributeList, attributeNumberA, + classObjectId, relationId); + + index_create(heapRelationName, indexRelationName, NULL, + accessMethodId, numberOfAttributes, attributeNumberA, + classObjectId, parameterCount, parameterA, (Node*)cnfPred); + } +} + + +/* + * ExtendIndex -- + * Extends a partial index. + * + * Exceptions: + * XXX + */ +void +ExtendIndex(char *indexRelationName, Expr *predicate, List *rangetable) +{ + Oid *classObjectId; + Oid accessMethodId; + Oid indexId, relationId; + Oid indproc; + int numberOfAttributes; + AttrNumber *attributeNumberA; + HeapTuple tuple; + FuncIndexInfo fInfo; + FuncIndexInfo *funcInfo = NULL; + IndexTupleForm index; + Node *oldPred = NULL; + List *cnfPred = NULL; + PredInfo *predInfo; + Relation heapRelation; + Relation indexRelation; + int i; + + /* + * compute index relation id and access method id + */ + tuple = SearchSysCacheTuple(RELNAME, PointerGetDatum(indexRelationName), + 0,0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "ExtendIndex: %s index not found", + indexRelationName); + } + indexId = tuple->t_oid; + accessMethodId = ((Form_pg_class) GETSTRUCT(tuple))->relam; + + /* + * find pg_index tuple + */ + tuple = SearchSysCacheTuple(INDEXRELID, + ObjectIdGetDatum(indexId), + 0,0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "ExtendIndex: %s is not an index", + indexRelationName); + } + + /* + * Extract info from the pg_index tuple + */ + index = (IndexTupleForm)GETSTRUCT(tuple); + Assert(index->indexrelid == indexId); + relationId = index->indrelid; + indproc = index->indproc; + + for (i=0; iindkey[i] == 0) break; + numberOfAttributes = i; + + if (VARSIZE(&index->indpred) != 0) { + char *predString; + + predString = fmgr(F_TEXTOUT, &index->indpred); + oldPred = stringToNode(predString); + pfree(predString); + } + if (oldPred == NULL) + elog(WARN, "ExtendIndex: %s is not a partial index", + indexRelationName); + + /* + * Convert the extension predicate from parsetree form to plan + * form, so it can be readily evaluated during index creation. + * Note: "predicate" comes in as a list containing (1) the predicate + * itself (a where_clause), and (2) a corresponding range table. + */ + if (rangetable != NIL) { + cnfPred = cnfify((Expr*)copyObject(predicate), true); + fix_opids(cnfPred); + CheckPredicate(cnfPred, rangetable, relationId); + } + + /* make predInfo list to pass to index_build */ + predInfo = (PredInfo*)palloc(sizeof(PredInfo)); + predInfo->pred = (Node*)cnfPred; + predInfo->oldPred = oldPred; + + attributeNumberA = + (AttrNumber *)palloc(numberOfAttributes* + sizeof attributeNumberA[0]); + classObjectId = + (Oid *)palloc(numberOfAttributes * sizeof classObjectId[0]); + + + for (i=0; iindkey[i]; + classObjectId[i] = index->indclass[i]; + } + + if (indproc != InvalidOid) { + funcInfo = &fInfo; +/* FIgetnArgs(funcInfo) = numberOfAttributes; */ + FIsetnArgs(funcInfo,numberOfAttributes); + + tuple = SearchSysCacheTuple(PROOID, + ObjectIdGetDatum(indproc), + 0,0,0); + if (!HeapTupleIsValid(tuple)) + elog(WARN, "ExtendIndex: index procedure not found"); + + namecpy(&(funcInfo->funcName), + &(((Form_pg_proc) GETSTRUCT(tuple))->proname)); + + FIsetProcOid(funcInfo,tuple->t_oid); + } + + heapRelation = heap_open(relationId); + indexRelation = index_open(indexId); + + RelationSetLockForWrite(heapRelation); + + InitIndexStrategy(numberOfAttributes, indexRelation, accessMethodId); + + index_build(heapRelation, indexRelation, numberOfAttributes, + attributeNumberA, 0, NULL, funcInfo, predInfo); +} + + +/* + * CheckPredicate + * Checks that the given list of partial-index predicates refer + * (via the given range table) only to the given base relation oid, + * and that they're in a form the planner can handle, i.e., + * boolean combinations of "ATTR OP CONST" (yes, for now, the ATTR + * has to be on the left). + */ + +static void +CheckPredicate(List *predList, List *rangeTable, Oid baseRelOid) +{ + List *item; + + foreach (item, predList) { + CheckPredExpr(lfirst(item), rangeTable, baseRelOid); + } +} + +static void +CheckPredExpr(Node *predicate, List *rangeTable, Oid baseRelOid) +{ + List *clauses = NIL, *clause; + + if (is_opclause(predicate)) { + CheckPredClause((Expr*)predicate, rangeTable, baseRelOid); + return; + } else if (or_clause(predicate)) + clauses = ((Expr*)predicate)->args; + else if (and_clause(predicate)) + clauses = ((Expr*)predicate)->args; + else + elog(WARN, "Unsupported partial-index predicate expression type"); + + foreach (clause, clauses) { + CheckPredExpr(lfirst(clause), rangeTable, baseRelOid); + } +} + +static void +CheckPredClause(Expr *predicate, List *rangeTable, Oid baseRelOid) +{ + Var *pred_var; + Const *pred_const; + + pred_var = (Var *)get_leftop(predicate); + pred_const = (Const *)get_rightop(predicate); + + if (!IsA(predicate->oper,Oper) || + !IsA(pred_var,Var) || + !IsA(pred_const,Const)) { + elog(WARN, "Unsupported partial-index predicate clause type"); + } + + if (getrelid(pred_var->varno, rangeTable) != baseRelOid) + elog(WARN, + "Partial-index predicates may refer only to the base relation"); +} + + +static void +FuncIndexArgs(IndexElem *funcIndex, + AttrNumber *attNumP, + Oid *argTypes, + Oid *opOidP, + Oid relId) +{ + List *rest; + HeapTuple tuple; + AttributeTupleForm att; + + tuple = SearchSysCacheTuple(CLANAME, + PointerGetDatum(funcIndex->class), + 0,0,0); + + if (!HeapTupleIsValid(tuple)) + { + elog(WARN, "DefineIndex: %s class not found", + funcIndex->class); + } + *opOidP = tuple->t_oid; + + memset(argTypes, 0, 8 * sizeof(Oid)); + + /* + * process the function arguments + */ + for (rest=funcIndex->args; rest != NIL; rest = lnext(rest)) { + char *arg; + + arg = strVal(lfirst(rest)); + + tuple = SearchSysCacheTuple(ATTNAME, + ObjectIdGetDatum(relId), + PointerGetDatum(arg),0,0); + + if (!HeapTupleIsValid(tuple)) { + elog(WARN, + "DefineIndex: attribute \"%s\" not found", + arg); + } + att = (AttributeTupleForm)GETSTRUCT(tuple); + *attNumP++ = att->attnum; + *argTypes++ = att->atttypid; + } +} + +static void +NormIndexAttrs(List *attList, /* list of IndexElem's */ + AttrNumber *attNumP, + Oid *opOidP, + Oid relId) +{ + List *rest; + HeapTuple tuple; + + /* + * process attributeList + */ + + for (rest=attList; rest != NIL; rest = lnext(rest)) { + IndexElem *attribute; + + attribute = lfirst(rest); + + if (attribute->class == NULL) { + elog(WARN, + "DefineIndex: default index class unsupported"); + } + + if (attribute->name == NULL) + elog(WARN, "missing attribute for define index"); + + tuple = SearchSysCacheTuple(ATTNAME, + ObjectIdGetDatum(relId), + PointerGetDatum(attribute->name), + 0,0); + if (!HeapTupleIsValid(tuple)) { + elog(WARN, + "DefineIndex: attribute \"%s\" not found", + attribute->name); + } + *attNumP++ = ((AttributeTupleForm)GETSTRUCT(tuple))->attnum; + + tuple = SearchSysCacheTuple(CLANAME, + PointerGetDatum(attribute->class), + 0,0,0); + + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "DefineIndex: %s class not found", + attribute->class); + } + *opOidP++ = tuple->t_oid; + } +} + +/* + * RemoveIndex -- + * Deletes an index. + * + * Exceptions: + * BadArg if name is invalid. + * "WARN" if index nonexistant. + * ... + */ +void +RemoveIndex(char *name) +{ + HeapTuple tuple; + + tuple = SearchSysCacheTuple(RELNAME, + PointerGetDatum(name), + 0,0,0); + + if (!HeapTupleIsValid(tuple)) { + elog(WARN, "index \"%s\" nonexistant", name); + } + + if (((Form_pg_class)GETSTRUCT(tuple))->relkind != RELKIND_INDEX) { + elog(WARN, "relation \"%s\" is of type \"%c\"", + name, + ((Form_pg_class)GETSTRUCT(tuple))->relkind); + } + + index_destroy(tuple->t_oid); +} diff --git a/src/backend/commands/define.c b/src/backend/commands/define.c new file mode 100644 index 00000000000..4ba38c793c7 --- /dev/null +++ b/src/backend/commands/define.c @@ -0,0 +1,564 @@ +/*------------------------------------------------------------------------- + * + * define.c-- + * POSTGRES "define" utility code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.1.1.1 1996/07/09 06:21:20 scrappy Exp $ + * + * DESCRIPTION + * The "DefineFoo" routines take the parse tree and pick out the + * appropriate arguments/flags, passing the results to the + * corresponding "FooDefine" routines (in src/catalog) that do + * the actual catalog-munging. + * + * NOTES + * These things must be defined and committed in the following order: + * "define function": + * input/output, recv/send procedures + * "define type": + * type + * "define operator": + * operators + * + * Most of the parse-tree manipulation routines are defined in + * commands/manip.c. + * + *------------------------------------------------------------------------- + */ +#include +#include +#include + +#include "postgres.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "utils/tqual.h" +#include "catalog/catname.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "utils/syscache.h" +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "fmgr.h" /* for fmgr */ + +#include "utils/builtins.h" /* prototype for textin() */ + +#include "utils/elog.h" +#include "utils/palloc.h" +#include "commands/defrem.h" +#include "optimizer/xfunc.h" +#include "tcop/dest.h" + +static char *defGetString(DefElem *def); +static int defGetTypeLength(DefElem *def); + +#define DEFAULT_TYPDELIM ',' + +/* + * DefineFunction -- + * Registers a new function. + * + */ +void +DefineFunction(ProcedureStmt *stmt, CommandDest dest) +{ + List *parameters = stmt->withClause; + char *proname = stmt->funcname; + char* probin_str; + char* prosrc_str; + char *prorettype; + char *languageName; + bool canCache; + bool trusted = TRUE; + List *argList; + int32 byte_pct, perbyte_cpu, percall_cpu, outin_ratio; + bool returnsSet; + int i; + + /* ---------------- + * figure out the language and convert it to lowercase. + * ---------------- + */ + languageName = stmt->language; + for (i = 0; i < NAMEDATALEN && languageName[i]; ++i) { + languageName[i] = tolower(languageName[i]); + } + + /* ---------------- + * handle "returntype = X". The function could return a singleton + * value or a set of values. Figure out which. + * ---------------- + */ + if (nodeTag(stmt->returnType)==T_TypeName) { + TypeName *setType = (TypeName *)stmt->returnType; + /* a set of values */ + prorettype = setType->name, + returnsSet = true; + }else { + /* singleton */ + prorettype = strVal(stmt->returnType); + returnsSet = false; + } + + /* Next attributes are only defined for C functions */ + if ( strcmp(languageName, "c") == 0 || + strcmp(languageName, "internal") == 0 ) { + List *pl; + + /* the defaults */ + canCache = FALSE; + byte_pct = BYTE_PCT; + perbyte_cpu = PERBYTE_CPU; + percall_cpu = PERCALL_CPU; + outin_ratio = OUTIN_RATIO; + + foreach(pl, parameters) { + int count; + char *ptr; + ParamString *param = (ParamString*)lfirst(pl); + + if (!strcasecmp(param->name, "isacachable")) { + /* ---------------- + * handle "[ iscachable ]": figure out if Postquel functions + * are cacheable automagically? + * ---------------- + */ + canCache = TRUE; + }else if (!strcasecmp(param->name, "trusted")) { + /* + * we don't have untrusted functions any more. The 4.2 + * implementation is lousy anyway so I took it out. + * -ay 10/94 + */ + elog(WARN, "untrusted function has been decommissioned."); + }else if (!strcasecmp(param->name, "byte_pct")) { + /* + ** handle expensive function parameters + */ + byte_pct = atoi(param->val); + }else if (!strcasecmp(param->name, "perbyte_cpu")) { + if (!sscanf(param->val, "%d", &perbyte_cpu)) { + for (count = 0, ptr = param->val; *ptr != '\0'; ptr++) { + if (*ptr == '!') { + count++; + } + } + perbyte_cpu = (int) pow(10.0, (double) count); + } + }else if (!strcasecmp(param->name, "percall_cpu")) { + if (!sscanf(param->val, "%d", &percall_cpu)) { + for (count = 0, ptr = param->val; *ptr != '\0'; ptr++) { + if (*ptr == '!') { + count++; + } + } + percall_cpu = (int) pow(10.0, (double) count); + } + }else if (!strcasecmp(param->name, "outin_ratio")) { + outin_ratio = atoi(param->val); + } + } + } else if (!strcmp(languageName, "sql")) { + canCache = false; + trusted = true; + + /* query optimizer groks sql, these are meaningless */ + perbyte_cpu = percall_cpu = 0; + byte_pct = outin_ratio = 100; + } else { + elog(WARN, "DefineFunction: language '%s' is not supported", + languageName); + } + + /* ---------------- + * handle "[ arg is (...) ]" + * XXX fix optional arg handling below + * ---------------- + */ + argList = stmt->defArgs; + + if ( strcmp(languageName, "c") == 0 || + strcmp(languageName, "internal") == 0 ) { + prosrc_str = "-"; + probin_str = stmt->as; + } else { + prosrc_str = stmt->as; + probin_str = "-"; + } + + /* C is stored uppercase in pg_language */ + if (!strcmp(languageName, "c")) { + languageName[0] = 'C'; + } + + /* ---------------- + * now have ProcedureDefine do all the work.. + * ---------------- + */ + ProcedureCreate(proname, + returnsSet, + prorettype, + languageName, + prosrc_str, /* converted to text later */ + probin_str, /* converted to text later */ + canCache, + trusted, + byte_pct, + perbyte_cpu, + percall_cpu, + outin_ratio, + argList, + dest); + +} + +/* -------------------------------- + * DefineOperator-- + * + * this function extracts all the information from the + * parameter list generated by the parser and then has + * OperatorCreate() do all the actual work. + * + * 'parameters' is a list of DefElem + * -------------------------------- + */ +void +DefineOperator(char *oprName, + List *parameters) +{ + uint16 precedence=0; /* operator precedence */ + bool canHash=false; /* operator hashes */ + bool isLeftAssociative=true; /* operator is left associative */ + char *functionName=NULL; /* function for operator */ + char *typeName1=NULL; /* first type name */ + char *typeName2=NULL; /* second type name */ + char *commutatorName=NULL; /* optional commutator operator name */ + char *negatorName=NULL; /* optional negator operator name */ + char *restrictionName=NULL; /* optional restrict. sel. procedure */ + char *joinName=NULL; /* optional join sel. procedure name */ + char *sortName1=NULL; /* optional first sort operator */ + char *sortName2=NULL; /* optional second sort operator */ + List *pl; + + /* + * loop over the definition list and extract the information we need. + */ + foreach (pl, parameters) { + DefElem *defel = (DefElem *)lfirst(pl); + + if (!strcasecmp(defel->defname, "leftarg")) { + /* see gram.y, must be setof */ + if (nodeTag(defel->arg)==T_TypeName) + elog(WARN, "setof type not implemented for leftarg"); + + if (nodeTag(defel->arg)==T_String) { + typeName1 = defGetString(defel); + }else { + elog(WARN, "type for leftarg is malformed."); + } + } else if (!strcasecmp(defel->defname, "rightarg")) { + /* see gram.y, must be setof */ + if (nodeTag(defel->arg)==T_TypeName) + elog(WARN, "setof type not implemented for rightarg"); + + if (nodeTag(defel->arg)==T_String) { + typeName2 = defGetString(defel); + }else { + elog(WARN, "type for rightarg is malformed."); + } + } else if (!strcasecmp(defel->defname, "procedure")) { + functionName = defGetString(defel); + } else if (!strcasecmp(defel->defname, "precedence")) { + /* NOT IMPLEMENTED (never worked in v4.2) */ + elog(NOTICE, "CREATE OPERATOR: precedence not implemented"); + } else if (!strcasecmp(defel->defname, "associativity")) { + /* NOT IMPLEMENTED (never worked in v4.2) */ + elog(NOTICE, "CREATE OPERATOR: associativity not implemented"); + } else if (!strcasecmp(defel->defname, "commutator")) { + commutatorName = defGetString(defel); + } else if (!strcasecmp(defel->defname, "negator")) { + negatorName = defGetString(defel); + } else if (!strcasecmp(defel->defname, "restrict")) { + restrictionName = defGetString(defel); + } else if (!strcasecmp(defel->defname, "join")) { + joinName = defGetString(defel); + } else if (!strcasecmp(defel->defname, "hashes")) { + canHash = TRUE; + } else if (!strcasecmp(defel->defname, "sort1")) { + /* ---------------- + * XXX ( ... [ , sort1 = oprname ] [ , sort2 = oprname ] ... ) + * XXX is undocumented in the reference manual source as of + * 89/8/22. + * ---------------- + */ + sortName1 = defGetString(defel); + } else if (!strcasecmp(defel->defname, "sort2")) { + sortName2 = defGetString(defel); + } else { + elog(NOTICE, "DefineOperator: attribute \"%s\" not recognized", + defel->defname); + } + } + + /* + * make sure we have our required definitions + */ + if (functionName==NULL) { + elog(WARN, "Define: \"procedure\" unspecified"); + } + + /* ---------------- + * now have OperatorCreate do all the work.. + * ---------------- + */ + OperatorCreate(oprName, /* operator name */ + typeName1, /* first type name */ + typeName2, /* second type name */ + functionName, /* function for operator */ + precedence, /* operator precedence */ + isLeftAssociative, /* operator is left associative */ + commutatorName, /* optional commutator operator name */ + negatorName, /* optional negator operator name */ + restrictionName, /* optional restrict. sel. procedure */ + joinName, /* optional join sel. procedure name */ + canHash, /* operator hashes */ + sortName1, /* optional first sort operator */ + sortName2); /* optional second sort operator */ + +} + +/* ------------------- + * DefineAggregate + * ------------------ + */ +void +DefineAggregate(char *aggName, List *parameters) + +{ + char *stepfunc1Name = NULL; + char *stepfunc2Name = NULL; + char *finalfuncName = NULL; + char *baseType = NULL; + char *stepfunc1Type = NULL; + char *stepfunc2Type = NULL; + char *init1 = NULL; + char *init2 = NULL; + List *pl; + + foreach (pl, parameters) { + DefElem *defel = (DefElem *)lfirst(pl); + + /* + * sfunc1 + */ + if (!strcasecmp(defel->defname, "sfunc1")) { + stepfunc1Name = defGetString(defel); + } else if (!strcasecmp(defel->defname, "basetype")) { + baseType = defGetString(defel); + } else if (!strcasecmp(defel->defname, "stype1")) { + stepfunc1Type = defGetString(defel); + + /* + * sfunc2 + */ + } else if (!strcasecmp(defel->defname, "sfunc2")) { + stepfunc2Name = defGetString(defel); + } else if (!strcasecmp(defel->defname, "stype2")) { + stepfunc2Type = defGetString(defel); + /* + * final + */ + } else if (!strcasecmp(defel->defname, "finalfunc")) { + finalfuncName = defGetString(defel); + /* + * initial conditions + */ + } else if (!strcasecmp(defel->defname, "initcond1")) { + init1 = defGetString(defel); + } else if (!strcasecmp(defel->defname, "initcond2")) { + init2 = defGetString(defel); + } else { + elog(NOTICE, "DefineAggregate: attribute \"%s\" not recognized", + defel->defname); + } + } + + /* + * make sure we have our required definitions + */ + if (baseType==NULL) + elog(WARN, "Define: \"basetype\" unspecified"); + if (stepfunc1Name!=NULL) { + if (stepfunc1Type==NULL) + elog(WARN, "Define: \"stype1\" unspecified"); + } + if (stepfunc2Name!=NULL) { + if (stepfunc2Type==NULL) + elog(WARN, "Define: \"stype2\" unspecified"); + } + + /* + * Most of the argument-checking is done inside of AggregateCreate + */ + AggregateCreate(aggName, /* aggregate name */ + stepfunc1Name, /* first step function name */ + stepfunc2Name, /* second step function name */ + finalfuncName, /* final function name */ + baseType, /* type of object being aggregated */ + stepfunc1Type, /* return type of first function */ + stepfunc2Type, /* return type of second function */ + init1, /* first initial condition */ + init2); /* second initial condition */ + + /* XXX free palloc'd memory */ +} + +/* + * DefineType -- + * Registers a new type. + * + */ +void +DefineType(char *typeName, List *parameters) +{ + int16 internalLength= 0; /* int2 */ + int16 externalLength= 0; /* int2 */ + char *elemName = NULL; + char *inputName = NULL; + char *outputName = NULL; + char *sendName = NULL; + char *receiveName = NULL; + char *defaultValue = NULL; /* Datum */ + bool byValue = false; + char delimiter = DEFAULT_TYPDELIM; + char *shadow_type; + List *pl; + char alignment = 'i'; /* default alignment */ + + /* + * Type names can only be 15 characters long, so that the shadow type + * can be created using the 16th character as necessary. + */ + if (strlen(typeName) >= (NAMEDATALEN - 1)) { + elog(WARN, "DefineType: type names must be %d characters or less", + NAMEDATALEN - 1); + } + + foreach(pl, parameters) { + DefElem *defel = (DefElem*)lfirst(pl); + + if (!strcasecmp(defel->defname, "internallength")) { + internalLength = defGetTypeLength(defel); + }else if (!strcasecmp(defel->defname, "externallength")) { + externalLength = defGetTypeLength(defel); + }else if (!strcasecmp(defel->defname, "input")) { + inputName = defGetString(defel); + }else if (!strcasecmp(defel->defname, "output")) { + outputName = defGetString(defel); + }else if (!strcasecmp(defel->defname, "send")) { + sendName = defGetString(defel); + }else if (!strcasecmp(defel->defname, "delimiter")) { + char *p = defGetString(defel); + delimiter = p[0]; + }else if (!strcasecmp(defel->defname, "receive")) { + receiveName = defGetString(defel); + }else if (!strcasecmp(defel->defname, "element")) { + elemName = defGetString(defel); + }else if (!strcasecmp(defel->defname, "default")) { + defaultValue = defGetString(defel); + }else if (!strcasecmp(defel->defname, "passedbyvalue")) { + byValue = true; + }else if (!strcasecmp(defel->defname, "alignment")) { + char *a = defGetString(defel); + if (!strcasecmp(a, "double")) { + alignment = 'd'; + } else if (!strcasecmp(a, "int")) { + alignment = 'i'; + } else { + elog(WARN, "DefineType: \"%s\" alignment not recognized", + a); + } + }else { + elog(NOTICE, "DefineType: attribute \"%s\" not recognized", + defel->defname); + } + } + + /* + * make sure we have our required definitions + */ + if (inputName==NULL) + elog(WARN, "Define: \"input\" unspecified"); + if (outputName==NULL) + elog(WARN, "Define: \"output\" unspecified"); + + /* ---------------- + * now have TypeCreate do all the real work. + * ---------------- + */ + (void) TypeCreate(typeName, /* type name */ + InvalidOid, /* relation oid (n/a here) */ + internalLength, /* internal size */ + externalLength, /* external size */ + 'b', /* type-type (base type) */ + delimiter, /* array element delimiter */ + inputName, /* input procedure */ + outputName, /* output procedure */ + sendName, /* send procedure */ + receiveName, /* receive procedure */ + elemName, /* element type name */ + defaultValue, /* default type value */ + byValue, /* passed by value */ + alignment); + + /* ---------------- + * When we create a true type (as opposed to a complex type) + * we need to have an shadow array entry for it in pg_type as well. + * ---------------- + */ + shadow_type = makeArrayTypeName(typeName); + + (void) TypeCreate(shadow_type, /* type name */ + InvalidOid, /* relation oid (n/a here) */ + -1, /* internal size */ + -1, /* external size */ + 'b', /* type-type (base type) */ + DEFAULT_TYPDELIM, /* array element delimiter */ + "array_in", /* input procedure */ + "array_out", /* output procedure */ + "array_out", /* send procedure */ + "array_in", /* receive procedure */ + typeName, /* element type name */ + defaultValue, /* default type value */ + false, /* never passed by value */ + alignment); + + pfree(shadow_type); +} + +static char * +defGetString(DefElem *def) +{ + if (nodeTag(def->arg)!=T_String) + elog(WARN, "Define: \"%s\" = what?", def->defname); + return (strVal(def->arg)); +} + +static int +defGetTypeLength(DefElem *def) +{ + if (nodeTag(def->arg)==T_Integer) + return (intVal(def->arg)); + else if (nodeTag(def->arg)==T_String && + !strcasecmp(strVal(def->arg),"variable")) + return -1; /* variable length */ + + elog(WARN, "Define: \"%s\" = what?", def->defname); + return -1; +} diff --git a/src/backend/commands/defrem.h b/src/backend/commands/defrem.h new file mode 100644 index 00000000000..3658dc50085 --- /dev/null +++ b/src/backend/commands/defrem.h @@ -0,0 +1,53 @@ +/*------------------------------------------------------------------------- + * + * defrem.h-- + * POSTGRES define and remove utility definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: defrem.h,v 1.1.1.1 1996/07/09 06:21:20 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef DEFREM_H +#define DEFREM_H + +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/parsenodes.h" +#include "tcop/dest.h" + +/* + * prototypes in defind.c + */ +extern void DefineIndex(char *heapRelationName, + char *indexRelationName, + char *accessMethodName, + List *attributeList, + List *parameterList, Expr *predicate, + List *rangetable); +extern void ExtendIndex(char *indexRelationName, + Expr *predicate, + List *rangetable); +extern void RemoveIndex(char *name); + +/* + * prototypes in define.c + */ +extern void DefineFunction(ProcedureStmt *nameargsexe, CommandDest dest); +extern void DefineOperator(char *name, List *parameters); +extern void DefineAggregate(char *name, List *parameters); +extern void DefineType(char *name, List *parameters); + +/* + * prototypes in remove.c + */ +extern void RemoveFunction(char *functionName, int nargs, List *argNameList); +extern void RemoveOperator(char *operatorName, + char *typeName1, char *typeName2); +extern void RemoveType(char *typeName); +extern void RemoveAggregate(char *aggName); + +#endif /* DEFREM_H */ diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c new file mode 100644 index 00000000000..a37f0f9cf4b --- /dev/null +++ b/src/backend/commands/explain.c @@ -0,0 +1,219 @@ +/*------------------------------------------------------------------------- + * + * explain.c-- + * Explain the query execution plan + * + * Copyright (c) 1994-5, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "parser/catalog_utils.h" +#include "parser/parse_query.h" /* for MakeTimeRange() */ +#include "nodes/plannodes.h" +#include "tcop/tcopprot.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "lib/stringinfo.h" +#include "commands/explain.h" +#include "optimizer/planner.h" +#include "access/xact.h" + +typedef struct ExplainState { + /* options */ + int printCost; /* print cost */ + int printNodes; /* do nodeToString() instead */ + /* other states */ + List *rtable; /* range table */ +} ExplainState; + +static char *Explain_PlanToString(Plan *plan, ExplainState *es); + +/* + * ExplainQuery - + * print out the execution plan for a given query + * + */ +void +ExplainQuery(Query *query, List *options, CommandDest dest) +{ + char *s; + Plan *plan; + ExplainState *es; + int len; + + if (IsAbortedTransactionBlockState()) { + char *tag = "*ABORT STATE*"; + EndCommand(tag, dest); + + elog(NOTICE, "(transaction aborted): %s", + "queries ignored until END"); + + return; + } + + /* plan the queries (XXX we've ignored rewrite!!) */ + plan = planner(query); + + /* pg_plan could have failed */ + if (plan == NULL) + return; + + es = (ExplainState*)malloc(sizeof(ExplainState)); + memset(es, 0, sizeof(ExplainState)); + + /* parse options */ + while (options) { + char *ostr = strVal(lfirst(options)); + if (!strcasecmp(ostr, "cost")) + es->printCost = 1; + else if (!strcasecmp(ostr, "full_plan")) + es->printNodes = 1; + + options = lnext(options); + } + es->rtable = query->rtable; + + if (es->printNodes) { + s = nodeToString(plan); + } else { + s = Explain_PlanToString(plan, es); + } + + /* output the plan */ + len = strlen(s); + elog(NOTICE, "QUERY PLAN:\n\n%.*s", ELOG_MAXLEN-64, s); + len -= ELOG_MAXLEN-64; + while (len > 0) { + s += ELOG_MAXLEN-64; + elog(NOTICE, "%.*s", ELOG_MAXLEN-64, s); + len -= ELOG_MAXLEN-64; + } + free(es); +} + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * explain_outNode - + * converts a Node into ascii string and append it to 'str' + */ +static void +explain_outNode(StringInfo str, Plan *plan, int indent, ExplainState *es) +{ + char *pname; + char buf[1000]; + int i; + + if (plan==NULL) { + appendStringInfo(str, "\n"); + return; + } + + switch(nodeTag(plan)) { + case T_Result: + pname = "Result"; + break; + case T_Append: + pname = "Append"; + break; + case T_NestLoop: + pname = "Nested Loop"; + break; + case T_MergeJoin: + pname = "Merge Join"; + break; + case T_HashJoin: + pname = "Hash Join"; + break; + case T_SeqScan: + pname = "Seq Scan"; + break; + case T_IndexScan: + pname = "Index Scan"; + break; + case T_Temp: + pname = "Temp Scan"; + break; + case T_Sort: + pname = "Sort"; + break; + case T_Agg: + pname = "Aggregate"; + break; + case T_Unique: + pname = "Unique"; + break; + case T_Hash: + pname = "Hash"; + break; + case T_Tee: + pname = "Tee"; + break; + default: + break; + } + + for(i=0; i < indent; i++) + appendStringInfo(str, " "); + + appendStringInfo(str, pname); + switch(nodeTag(plan)) { + case T_SeqScan: + case T_IndexScan: + if (((Scan*)plan)->scanrelid > 0) { + RangeTblEntry *rte = nth(((Scan*)plan)->scanrelid-1, es->rtable); + sprintf(buf, " on %.*s", NAMEDATALEN, rte->refname); + appendStringInfo(str, buf); + } + break; + default: + break; + } + if (es->printCost) { + sprintf(buf, " (cost=%.2f size=%d width=%d)", + plan->cost, plan->plan_size, plan->plan_width); + appendStringInfo(str, buf); + } + appendStringInfo(str, "\n"); + + /* lefttree */ + if (outerPlan(plan)) { + for(i=0; i < indent; i++) + appendStringInfo(str, " "); + appendStringInfo(str, " -> "); + explain_outNode(str, outerPlan(plan), indent+1, es); + } + + /* righttree */ + if (innerPlan(plan)) { + for(i=0; i < indent; i++) + appendStringInfo(str, " "); + appendStringInfo(str, " -> "); + explain_outNode(str, innerPlan(plan), indent+1, es); + } + return; +} + +static char * +Explain_PlanToString(Plan *plan, ExplainState *es) +{ + StringInfo str; + char *s; + + if (plan==NULL) + return ""; + Assert(plan!=NULL); + str = makeStringInfo(); + explain_outNode(str, plan, 0, es); + s = str->data; + pfree(str); + + return s; +} diff --git a/src/backend/commands/explain.h b/src/backend/commands/explain.h new file mode 100644 index 00000000000..e0848bb7711 --- /dev/null +++ b/src/backend/commands/explain.h @@ -0,0 +1,17 @@ +/*------------------------------------------------------------------------- + * + * explain.h-- + * prototypes for explain.c + * + * Copyright (c) 1994-5, Regents of the University of California + * + * $Id: explain.h,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXPLAIN_H +#define EXPLAIN_H + +extern void ExplainQuery(Query *query, List *options, CommandDest dest); + +#endif /* EXPLAIN_H*/ diff --git a/src/backend/commands/purge.c b/src/backend/commands/purge.c new file mode 100644 index 00000000000..b8b8317ab96 --- /dev/null +++ b/src/backend/commands/purge.c @@ -0,0 +1,168 @@ +/*------------------------------------------------------------------------- + * + * purge.c-- + * the POSTGRES purge command. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/purge.c,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + * Note: + * XXX There are many instances of int32 instead of ...Time. These + * should be changed once it is decided the signed'ness will be. + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "access/heapam.h" +#include "access/xact.h" +#include "utils/tqual.h" /* for NowTimeQual */ +#include "catalog/catname.h" +#include "catalog/indexing.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/nabstime.h" + +#include "catalog/pg_class.h" +#include "commands/purge.h" +#include "utils/builtins.h" /* for isreltime() */ + +static char cmdname[] = "RelationPurge"; + +#define RELATIVE 01 +#define ABSOLUTE 02 + +int32 +RelationPurge(char *relationName, + char *absoluteTimeString, + char *relativeTimeString) +{ + register i; + AbsoluteTime absoluteTime = INVALID_ABSTIME; + RelativeTime relativeTime = INVALID_RELTIME; + bits8 dateTag; + Relation relation; + HeapScanDesc scan; + static ScanKeyData key[1] = { + { 0, Anum_pg_class_relname, F_NAMEEQ } + }; + Buffer buffer; + HeapTuple newTuple, oldTuple; + AbsoluteTime currentTime; + char *values[Natts_pg_class]; + char nulls[Natts_pg_class]; + char replace[Natts_pg_class]; + Relation idescs[Num_pg_class_indices]; + + /* + * XXX for some reason getmyrelids (in inval.c) barfs when + * you heap_replace tuples from these classes. i thought + * setheapoverride would fix it but it didn't. for now, + * just disallow purge on these classes. + */ + if (strcmp(RelationRelationName, relationName) == 0 || + strcmp(AttributeRelationName, relationName) == 0 || + strcmp(AccessMethodRelationName, relationName) == 0 || + strcmp(AccessMethodOperatorRelationName, relationName) == 0) { + elog(WARN, "%s: cannot purge catalog \"%s\"", + cmdname, relationName); + } + + if (PointerIsValid(absoluteTimeString)) { + absoluteTime = (int32) nabstimein(absoluteTimeString); + absoluteTimeString[0] = '\0'; + if (absoluteTime == INVALID_ABSTIME) { + elog(NOTICE, "%s: bad absolute time string \"%s\"", + cmdname, absoluteTimeString); + elog(WARN, "purge not executed"); + } + } + +#ifdef PURGEDEBUG + elog(DEBUG, "%s: absolute time `%s' is %d.", + cmdname, absoluteTimeString, absoluteTime); +#endif /* defined(PURGEDEBUG) */ + + if (PointerIsValid(relativeTimeString)) { + if (isreltime(relativeTimeString, NULL, NULL, NULL) != 1) { + elog(WARN, "%s: bad relative time string \"%s\"", + cmdname, relativeTimeString); + } + relativeTime = reltimein(relativeTimeString); + +#ifdef PURGEDEBUG + elog(DEBUG, "%s: relative time `%s' is %d.", + cmdname, relativeTimeString, relativeTime); +#endif /* defined(PURGEDEBUG) */ + } + + /* + * Find the RELATION relation tuple for the given relation. + */ + relation = heap_openr(RelationRelationName); + key[0].sk_argument = PointerGetDatum(relationName); + fmgr_info(key[0].sk_procedure, &key[0].sk_func, &key[0].sk_nargs); + + scan = heap_beginscan(relation, 0, NowTimeQual, 1, key); + oldTuple = heap_getnext(scan, 0, &buffer); + if (!HeapTupleIsValid(oldTuple)) { + heap_endscan(scan); + heap_close(relation); + elog(WARN, "%s: no such relation: %s", cmdname, relationName); + return(0); + } + + /* + * Dig around in the tuple. + */ + currentTime = GetCurrentTransactionStartTime(); + if (!RelativeTimeIsValid(relativeTime)) { + dateTag = ABSOLUTE; + if (!AbsoluteTimeIsValid(absoluteTime)) + absoluteTime = currentTime; + } else if (!AbsoluteTimeIsValid(absoluteTime)) + dateTag = RELATIVE; + else + dateTag = ABSOLUTE | RELATIVE; + + for (i = 0; i < Natts_pg_class; ++i) { + nulls[i] = heap_attisnull(oldTuple, i+1) ? 'n' : ' '; + values[i] = NULL; + replace[i] = ' '; + } + if (dateTag & ABSOLUTE) { + values[Anum_pg_class_relexpires-1] = + (char *) UInt32GetDatum(absoluteTime); + replace[Anum_pg_class_relexpires-1] = 'r'; + } + if (dateTag & RELATIVE) { + values[Anum_pg_class_relpreserved-1] = + (char *) UInt32GetDatum(relativeTime); + replace[Anum_pg_class_relpreserved-1] = 'r'; + } + + /* + * Change the RELATION relation tuple for the given relation. + */ + newTuple = heap_modifytuple(oldTuple, buffer, relation, (Datum*)values, + nulls, replace); + + /* XXX How do you detect an insertion error?? */ + (void) heap_replace(relation, &newTuple->t_ctid, newTuple); + + /* keep the system catalog indices current */ + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_class_indices, relation, newTuple); + CatalogCloseIndices(Num_pg_class_indices, idescs); + + pfree(newTuple); + + heap_endscan(scan); + heap_close(relation); + return(1); +} + diff --git a/src/backend/commands/purge.h b/src/backend/commands/purge.h new file mode 100644 index 00000000000..20174182880 --- /dev/null +++ b/src/backend/commands/purge.h @@ -0,0 +1,20 @@ +/*------------------------------------------------------------------------- + * + * purge.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: purge.h,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PURGE_H +#define PURGE_H + +extern int32 RelationPurge(char *relationName, + char *absoluteTimeString, + char *relativeTimeString); + +#endif /* PURGE_H */ diff --git a/src/backend/commands/recipe.c b/src/backend/commands/recipe.c new file mode 100644 index 00000000000..97d0df6d379 --- /dev/null +++ b/src/backend/commands/recipe.c @@ -0,0 +1,1181 @@ +/*------------------------------------------------------------------------- + * + * recipe.c-- + * routines for handling execution of Tioga recipes + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/recipe.c,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + + +#include "include/postgres.h" +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" +#include "nodes/execnodes.h" +#include "nodes/pg_list.h" +#include "nodes/makefuncs.h" +#include "catalog/pg_type.h" +#include "commands/recipe.h" +#include "libpq/libpq-be.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/geo-decls.h" +#include "utils/relcache.h" /* for RelationNameGetRelation*/ +#include "parser/parse_query.h" +#include "rewrite/rewriteHandler.h" +#include "rewrite/rewriteManip.h" +#include "tcop/pquery.h" +#include "tcop/dest.h" +#include "optimizer/planner.h" +#include "executor/executor.h" + +/* from tcop/postgres.c */ +extern CommandDest whereToSendOutput; + +#ifndef TIOGA + +void beginRecipe(RecipeStmt *stmt) { + elog(NOTICE,"You must compile with TIOGA defined in order to use recipes\n"); +} +#else + +#include "tioga/tgRecipe.h" + +#define DEBUG_RECIPE 1 + +/* structure to keep track of the tee node plans */ +typedef struct _teePlanInfo { + char* tpi_relName; + Query* tpi_parsetree; + Plan* tpi_plan; +} TeePlanInfo; + +typedef struct _teeInfo { + int num; + TeePlanInfo *val; +} TeeInfo; + +QueryTreeList *appendQlist(QueryTreeList *q1, QueryTreeList *q2); +void OffsetVarAttno(Node* node, int varno, int offset); + +static void appendTeeQuery(TeeInfo *teeInfo, + QueryTreeList *q, + char* teeNodeName); + +static Plan* replaceTeeScans(Plan* plan, + Query* parsetree, + TeeInfo *teeInfo); +static void replaceSeqScan(Plan* plan, + Plan* parent, + int rt_ind, + Plan* tplan); + +static void tg_rewriteQuery(TgRecipe* r, TgNode *n, + QueryTreeList *q, + QueryTreeList *inputQlist); +static Node *tg_replaceNumberedParam(Node* expression, + int pnum, + int rt_ind, + char *teeRelName); +static Node *tg_rewriteParamsInExpr(Node *expression, + QueryTreeList *inputQlist); +static QueryTreeList *tg_parseSubQuery(TgRecipe* r, + TgNode* n, + TeeInfo* teeInfo); +static QueryTreeList* tg_parseTeeNode(TgRecipe *r, + TgNode *n, + int i, + QueryTreeList *qList, + TeeInfo* teeInfo); + + +/* + The Tioga recipe rewrite algorithm: + + To parse a Tioga recipe, we start from an eye node and go backwards through + its input nodes. To rewrite a Tioga node, we do the following: + + 1) parse the node we're at in the standard way (calling parser() ) + 2) rewrite its input nodes recursively using Tioga rewrite + 3) now, with the rewritten input parse trees and the original parse tree + of the node, we rewrite the the node. + To do the rewrite, we use the target lists, range tables, and + qualifications of the input parse trees +*/ + +/* + * beginRecipe: + * this is the main function to recipe execution + * this function is invoked for EXECUTE RECIPE ... statements + * + * takes in a RecipeStmt structure from the parser + * and returns a list of cursor names + */ + +void +beginRecipe(RecipeStmt* stmt) +{ + TgRecipe* r; + int i; + QueryTreeList *qList; + char portalName[1024]; + + Plan *plan; + TupleDesc attinfo; + QueryDesc *queryDesc; + Query *parsetree; + + int numTees; + TeeInfo* teeInfo; + + /* retrieveRecipe() reads the recipe from the database + and returns a TgRecipe* structure we can work with */ + + r = retrieveRecipe(stmt->recipeName); + + if (r == NULL) return; + + /* find the number of tees in the recipe */ + numTees = r->tees->num; + + if (numTees > 0) { + /* allocate a teePlan structure */ + teeInfo = (TeeInfo*)malloc(sizeof(TeeInfo)); + teeInfo->num = numTees; + teeInfo->val = (TeePlanInfo*)malloc(numTees * sizeof(TeePlanInfo)); + for (i=0;ival[i].tpi_relName = r->tees->val[i]->nodeName; + teeInfo->val[i].tpi_parsetree = NULL; + teeInfo->val[i].tpi_plan = NULL; + } + } else + teeInfo = NULL; + + /* + * for each viewer in the recipe, go backwards from each viewer input + * and generate a plan. Attach the plan to cursors. + **/ + for (i=0;ieyes->num;i++) { + TgNodePtr e; + + e = r->eyes->val[i]; + if (e->inNodes->num > 1) { + elog(NOTICE, + "beginRecipe: Currently eyes cannot have more than one input"); + } + if (e->inNodes->num == 0) { + /* no input to this eye, skip it */ + continue; + } + +#ifdef DEBUG_RECIPE + elog(NOTICE,"beginRecipe: eyes[%d] = %s\n", i, e->nodeName); +#endif /* DEBUG_RECIPE */ + + qList = tg_parseSubQuery(r,e->inNodes->val[0], teeInfo); + + if (qList == NULL) { + /* eye is directly connected to a tee node */ + /* XXX TODO: handle this case */ + } + + /* now, plan the queries */ + /* should really do everything pg_plan() does, but for now, + we skip the rule rewrite and time qual stuff */ + + /* ---------------------------------------------------------- + * 1) plan the main query, everything from an eye node back to + a Tee + * ---------------------------------------------------------- */ + parsetree = qList->qtrees[0]; + + /* before we plan, we want to see all the changes + we did, during the rewrite phase, such as + creating the tee tables, + setheapoverride() allows us to see the changes */ + setheapoverride(true); + plan = planner(parsetree); + + /* ---------------------------------------------------------- + * 2) plan the tee queries, (subgraphs rooted from a Tee) + by the time the eye is processed, all tees that contribute + to that eye will have been included in the teeInfo list + * ---------------------------------------------------------- */ + if (teeInfo) { + int t; + Plan* tplan; + Tee* newplan; + + for (t=0; tnum;t++) { + if (teeInfo->val[t].tpi_plan == NULL) { + /* plan it in the usual fashion */ + tplan = planner(teeInfo->val[t].tpi_parsetree); + + /* now add a tee node to the root of the plan */ +elog(NOTICE, "adding tee plan node to the root of the %s\n", + teeInfo->val[t].tpi_relName); + newplan = (Tee*)makeNode(Tee); + newplan->plan.targetlist = tplan->targetlist; + newplan->plan.qual = NULL; /* tplan->qual; */ + newplan->plan.lefttree = tplan; + newplan->plan.righttree = NULL; + newplan->leftParent = NULL; + newplan->rightParent = NULL; + /* the range table of the tee is the range table + of the tplan */ + newplan->rtentries = teeInfo->val[t].tpi_parsetree->rtable; + strcpy(newplan->teeTableName, + teeInfo->val[t].tpi_relName); + teeInfo->val[t].tpi_plan = (Plan*)newplan; + } + } + + /* ---------------------------------------------------------- + * 3) replace the tee table scans in the main plan with + actual tee plannodes + * ---------------------------------------------------------- */ + + plan = replaceTeeScans(plan, parsetree, teeInfo); + + } /* if (teeInfo) */ + + setheapoverride(false); + + /* define a portal for this viewer input */ + /* for now, eyes can only have one input */ + sprintf(portalName, "%s%d",e->nodeName,0); + + queryDesc = CreateQueryDesc(parsetree, + plan, + whereToSendOutput); + /* ---------------- + * call ExecStart to prepare the plan for execution + * ---------------- + */ + attinfo = ExecutorStart(queryDesc,NULL); + + ProcessPortal(portalName, + parsetree, + plan, + attinfo, + whereToSendOutput); +elog(NOTICE, "beginRecipe: cursor named %s is now available", portalName); + } + +} + + + +/* + * tg_rewriteQuery - + * r - the recipe being rewritten + * n - the node that we're current at + * q - a QueryTree List containing the parse tree of the node + * inputQlist - the parsetrees of its input nodes, + * the size of inputQlist must be the same as the + * number of input nodes. Some elements in the inpuQlist + * may be null if the inputs to those nodes are unconnected + * + * this is the main routine for rewriting the recipe queries + * the original query tree 'q' is modified + */ + +static void +tg_rewriteQuery(TgRecipe* r, + TgNode *n, + QueryTreeList *q, + QueryTreeList *inputQlist) +{ + Query* orig; + Query* inputQ; + int i; + List *rtable; + List *input_rtable; + int rt_length; + + /* orig is the original parse tree of the node */ + orig = q->qtrees[0]; + + + /*------------------------------------------------------------------- + step 1: + + form a combined range table from all the range tables in the original + query as well as the input nodes + + form a combined qualification from the qual in the original plus + the quals of the input nodes + ------------------------------------------------------------------- + */ + + /* start with the original range table */ + rtable = orig->rtable; + rt_length = length(rtable); + + for (i=0;iinNodes->num;i++) { + if (n->inNodes->val[i] != NULL && + n->inNodes->val[i]->nodeType != TG_TEE_NODE) { + inputQ = inputQlist->qtrees[i]; + input_rtable = inputQ->rtable; + + /* need to offset the var nodes in the qual and targetlist + because they are indexed off the original rtable */ + OffsetVarNodes((Node*)inputQ->qual, rt_length); + OffsetVarNodes((Node*)inputQ->targetList, rt_length); + + /* append the range tables from the children nodes */ + rtable = nconc (rtable, input_rtable); + + /* append the qualifications of the child node into the + original qual list */ + AddQual(orig, inputQ->qual); + } + } + orig->rtable = rtable; + + /* step 2: + rewrite the target list of the original parse tree + if there are any references to params, replace them with + the appropriate target list entry of the children node + */ + if (orig->targetList != NIL) { + List *tl; + TargetEntry *tle; + + foreach (tl, orig->targetList) { + tle = lfirst(tl); + if (tle->resdom != NULL) { + tle->expr = tg_rewriteParamsInExpr(tle->expr, inputQlist); + } + } + } + + /* step 3: + rewrite the qual of the original parse tree + if there are any references to params, replace them with + the appropriate target list entry of the children node + */ + if (orig->qual) { + if (nodeTag(orig->qual) == T_List) { + elog(WARN, "tg_rewriteQuery: Whoa! why is my qual a List???"); + } + orig->qual = tg_rewriteParamsInExpr(orig->qual, inputQlist); + } + + /* at this point, we're done with the rewrite, the querytreelist q + has been modified */ + +} + + +/* tg_replaceNumberedParam: + + this procedure replaces the specified numbered param with a + reference to a range table + + this procedure recursively calls itself + + it returns a (possibly modified) Node*. + +*/ +static Node* +tg_replaceNumberedParam(Node *expression, + int pnum, /* the number of the parameter */ + int rt_ind, /* the range table index */ + char *teeRelName) /* the relname of the tee table */ +{ + TargetEntry *param_tle; + Param* p; + Var *newVar,*oldVar; + + if (expression == NULL) return NULL; + + switch (nodeTag(expression)) { + case T_Param: + { + /* the node is a parameter, + substitute the entry from the target list of the child that + corresponds to the parameter number*/ + p = (Param*)expression; + + /* we only deal with the case of numbered parameters */ + if (p->paramkind == PARAM_NUM && p->paramid == pnum) { + + if (p->param_tlist) { + /* we have a parameter with an attribute like $N.foo + so replace it with a new var node */ + + /* param tlist can only have one entry in them! */ + param_tle = (TargetEntry*)(lfirst(p->param_tlist)); + oldVar = (Var*)param_tle->expr; + oldVar->varno = rt_ind; + oldVar->varnoold = rt_ind; + return (Node*)oldVar; + } else { + /* we have $N without the .foo */ + bool defined; + bool isRel; + /* TODO here, we need to check to see whether the type of the + tee is a complex type (relation) or a simple type */ + /* if it is a simple type, then we need to get the "result" + attribute from the tee relation */ + + isRel = (typeid_get_relid(p->paramtype) != 0); + if (isRel) { + newVar = makeVar(rt_ind, + 0, /* the whole tuple */ + TypeGet(teeRelName,&defined), + rt_ind, + 0); + return (Node*)newVar; + } else + newVar = makeVar(rt_ind, + 1, /* just the first field, which is 'result' */ + TypeGet(teeRelName,&defined), + rt_ind, + 0); + return (Node*)newVar; + + } + } + else { + elog(NOTICE, "tg_replaceNumberedParam: unexpected paramkind value of %d", p->paramkind); + } + } + break; + case T_Expr: + { + /* the node is an expression, we need to recursively + call ourselves until we find parameter nodes */ + List *l; + Expr *expr = (Expr*)expression; + List *newArgs; + + /* we have to make a new args lists because Params + can be replaced by Var nodes in tg_replaceNumberedParam()*/ + newArgs = NIL; + + /* we only care about argument to expressions, + it doesn't matter when the opType is */ + /* recursively rewrite the arguments of this expression */ + foreach (l, expr->args) { + newArgs = lappend(newArgs, + tg_replaceNumberedParam(lfirst(l), + pnum, + rt_ind, + teeRelName)); + } + /* change the arguments of the expression */ + expr->args = newArgs; + } + break; + default: + { + /* ignore other expr types */ + } + } + + return expression; +} + + + + + +/* tg_rewriteParamsInExpr: + + rewrite the params in expressions by using the targetlist entries + from the input parsetrees + + this procedure recursively calls itself + + it returns a (possibly modified) Node*. + +*/ +static Node* +tg_rewriteParamsInExpr(Node *expression, QueryTreeList *inputQlist) +{ + List *tl; + TargetEntry *param_tle, *tle; + Param* p; + int childno; + char *resname; + + if (expression == NULL) return NULL; + + switch (nodeTag(expression)) { + case T_Param: + { + /* the node is a parameter, + substitute the entry from the target list of the child that + corresponds to the parameter number*/ + p = (Param*)expression; + + /* we only deal with the case of numbered parameters */ + if (p->paramkind == PARAM_NUM) { + /* paramid's start from 1*/ + childno = p->paramid - 1; + + if (p->param_tlist) { + /* we have a parameter with an attribute like $N.foo + so match the resname "foo" against the target list + of the (N-1)th inputQlist */ + + /* param tlist can only have one entry in them! */ + param_tle = (TargetEntry*)(lfirst(p->param_tlist)); + resname = param_tle->resdom->resname; + + if (inputQlist->qtrees[childno]) { + foreach (tl, inputQlist->qtrees[childno]->targetList) { + tle = lfirst(tl); + if (strcmp(resname, tle->resdom->resname) == 0) { + return tle->expr; + } + } + } + else { + elog(WARN,"tg_rewriteParamsInExpr:can't substitute for parameter %d when that input is unconnected", p->paramid); + } + + } else { + /* we have $N without the .foo */ + /* use the first resdom in the targetlist of the */ + /* appropriate child query */ + tl = inputQlist->qtrees[childno]->targetList; + tle = lfirst(tl); + return tle->expr; + } + } + else { + elog(NOTICE, "tg_rewriteParamsInExpr: unexpected paramkind value of %d", p->paramkind); + } + } + break; + case T_Expr: + { + /* the node is an expression, we need to recursively + call ourselves until we find parameter nodes */ + List *l; + Expr *expr = (Expr*)expression; + List *newArgs; + + /* we have to make a new args lists because Params + can be replaced by Var nodes in tg_rewriteParamsInExpr()*/ + newArgs = NIL; + + /* we only care about argument to expressions, + it doesn't matter when the opType is */ + /* recursively rewrite the arguments of this expression */ + foreach (l, expr->args) { + newArgs = lappend(newArgs, + tg_rewriteParamsInExpr(lfirst(l), inputQlist)); + } + /* change the arguments of the expression */ + expr->args = newArgs; + } + break; + default: + { + /* ignore other expr types */ + } + } + + return expression; +} + + + +/* + getParamTypes: + given an element, finds its parameter types. + the typev array argument is set to the parameter types. + the parameterCount is returned + + this code is very similar to ProcedureDefine() in pg_proc.c +*/ +static int +getParamTypes (TgElement *elem, Oid typev[]) +{ + /* this code is similar to ProcedureDefine() */ + int16 parameterCount; + bool defined; + Oid toid; + char *t; + int i,j; + + parameterCount = 0; + for (i=0;i<8;i++) { + typev[i] = 0; + } + for (j=0;jinTypes->num;j++) { + if (parameterCount == 8) { + elog(WARN, + "getParamTypes: Ingredients cannot take > 8 arguments"); + } + t = elem->inTypes->val[j]; + if (strcmp(t,"opaque") == 0) { + elog(WARN, + "getParamTypes: Ingredient functions cannot take type 'opaque'"); + } else { + toid = TypeGet(elem->inTypes->val[j], &defined); + if (!OidIsValid(toid)) { + elog(WARN, "getParamTypes: arg type '%s' is not defined",t); + } + if (!defined) { + elog(NOTICE, "getParamTypes: arg type '%s' is only a shell",t); + } + } + typev[parameterCount++] = toid; + } + + return parameterCount; +} + + +/* + * tg_parseTeeNode + * + * handles the parsing of the tee node + * + * + */ + +static QueryTreeList* +tg_parseTeeNode(TgRecipe *r, + TgNode *n, /* the tee node */ + int i, /* which input this node is to its parent */ + QueryTreeList *qList, + TeeInfo* teeInfo) + +{ + QueryTreeList *q; + char* tt; + int rt_ind; + Query* orig; + + /* the input Node is a tee node, so we need to do the following: + * we need to parse the child of the tee node, + we add that to our query tree list + * we need the name of the tee node table + the tee node table is the table into which the tee node + may materialize results. Call it TT + * we add a range table to our existing query with TT in it + * we need to replace the parameter $i with TT + (otherwise the optimizer won't know to use the table + on expression containining $i) + After that rewrite, the optimizer will generate + sequential scans of TT + + Later, in the glue phase, we replace all instances of TT + sequential scans with the actual Tee node + */ + q = tg_parseSubQuery(r,n, teeInfo); + + /* tt is the name of the tee node table */ + tt = n->nodeName; + + if (q) + appendTeeQuery(teeInfo,q,tt); + + orig = qList->qtrees[0]; + rt_ind = RangeTablePosn(orig->rtable,tt); + /* check to see that this table is not part of + the range table already. This usually only + happens if multiple inputs are connected to the + same Tee. */ + if (rt_ind == 0) { + orig->rtable = lappend(orig->rtable, + makeRangeTableEntry(tt, + FALSE, + NULL, + tt)); + rt_ind = length(orig->rtable); + } + + orig->qual = tg_replaceNumberedParam(orig->qual, + i+1, /* params start at 1*/ + rt_ind, + tt); + return qList; +} + + +/* + * tg_parseSubQuery: + * go backwards from a node and parse the query + * + * the result parse tree is passed back + * + * could return NULL if trying to parse a teeNode + * that's already been processed by another parent + * + */ + +static QueryTreeList* +tg_parseSubQuery(TgRecipe* r, TgNode* n, TeeInfo* teeInfo) +{ + TgElement *elem; + char* funcName; + Oid typev[8]; /* eight arguments maximum */ + int i; + int parameterCount; + + QueryTreeList *qList; /* the parse tree of the nodeElement */ + QueryTreeList *inputQlist; /* the list of parse trees for the + inputs to this node */ + QueryTreeList *q; + Oid relid; + TgNode* child; + Relation rel; + unsigned int len; + TupleDesc tupdesc; + + qList = NULL; + + if (n->nodeType == TG_INGRED_NODE) { + /* parse each ingredient node in turn */ + + elem = n->nodeElem; + switch (elem->srcLang) { + case TG_SQL: + { + /* for SQL ingredients, the SQL query is contained in the + 'src' field */ + +#ifdef DEBUG_RECIPE +elog(NOTICE,"calling parser with %s",elem->src); +#endif /* DEBUG_RECIPE */ + + parameterCount = getParamTypes(elem,typev); + + qList = parser(elem->src,typev,parameterCount); + + if (qList->len > 1) { + elog(NOTICE, + "tg_parseSubQuery: parser produced > 1 query tree"); + } + } + break; + case TG_C: + { + /* C ingredients are registered functions in postgres */ + /* we create a new query string by using the function name + (found in the 'src' field) and adding parameters to it + so if the function was FOOBAR and took in two arguments, + we would create a string + select FOOBAR($1,$2) + */ + char newquery[1000]; + + funcName = elem->src; + parameterCount = getParamTypes(elem,typev); + + if (parameterCount > 0) { + int i; + sprintf(newquery,"select %s($1",funcName); + for (i=1;ilen > 1) { + elog(NOTICE, + "tg_parseSubQuery: parser produced > 1 query tree"); + } + } + break; + case TG_RECIPE_GRAPH: + elog(NOTICE,"tg_parseSubQuery: can't parse recipe graph ingredients yet!"); + break; + case TG_COMPILED: + elog(NOTICE,"tg_parseSubQuery: can't parse compiled ingredients yet!"); + break; + default: + elog(NOTICE,"tg_parseSubQuery: unknown srcLang: %d",elem->srcLang); + } + + /* parse each of the subrecipes that are input to this node*/ + + if (n->inNodes->num > 0) { + inputQlist = malloc(sizeof(QueryTreeList)); + inputQlist->len = n->inNodes->num + 1 ; + inputQlist->qtrees = (Query**)malloc(inputQlist->len * sizeof(Query*)); + for (i=0;iinNodes->num;i++) { + + inputQlist->qtrees[i] = NULL; + if (n->inNodes->val[i]) { + if (n->inNodes->val[i]->nodeType == TG_TEE_NODE) { + qList = tg_parseTeeNode(r,n->inNodes->val[i], + i,qList,teeInfo); + } + else + { /* input node is not a Tee */ + q = tg_parseSubQuery(r,n->inNodes->val[i], + teeInfo); + Assert (q->len == 1); + inputQlist->qtrees[i] = q->qtrees[0]; + } + } + } + + /* now, we have all the query trees from our input nodes */ + /* transform the original parse tree appropriately */ + tg_rewriteQuery(r,n,qList,inputQlist); + } + } + else if (n->nodeType == TG_EYE_NODE) { + /* if we hit an eye, we need to stop and make what we have + into a subrecipe query block*/ + elog(NOTICE,"tg_parseSubQuery: can't handle eye nodes yet"); + } + else if (n->nodeType == TG_TEE_NODE) { + /* if we hit a tee, check to see if the parsing has been done + for this tee already by the other parent */ + + rel = RelationNameGetRelation(n->nodeName); + if (RelationIsValid(rel)) { + /* this tee has already been visited, + no need to do any further processing */ + return NULL; + } else { + /* we need to process the child of the tee first, */ + child = n->inNodes->val[0]; + + if (child->nodeType == TG_TEE_NODE) { + /* nested Tee nodes */ + qList = tg_parseTeeNode(r,child,0,qList,teeInfo); + return qList; + } + + Assert (child != NULL); + + /* parse the input node */ + q = tg_parseSubQuery(r,child, teeInfo); + Assert (q->len == 1); + + /* add the parsed query to the main list of queries */ + qList = appendQlist(qList,q); + + /* need to create the tee table here */ + /* the tee table created is used both for materializing the values + at the tee node, and for parsing and optimization. + The optimization needs to have a real table before it will + consider scans on it */ + + /* first, find the type of the tuples being produced by the + tee. The type is the same as the output type of + the child node. + + NOTE: we are assuming that the child node only has a single + output here! */ + getParamTypes(child->nodeElem,typev); + + /* the output type is either a complex type, + (and is thus a relation) or is a simple type */ + + rel = RelationNameGetRelation(child->nodeElem->outTypes->val[0]); + + if (RelationIsValid(rel)) { + /* for complex types, create new relation with the same + tuple descriptor as the output table type*/ + len = length(q->qtrees[0]->targetList); + tupdesc = rel->rd_att; + + relid = heap_create(child->nodeElem->outTypes->val[0], + NULL, /* XXX */ + 'n', + DEFAULT_SMGR, + tupdesc); + } + else { + /* we have to create a relation with one attribute of + the simple base type. That attribute will have + an attr name of "result" */ + /*NOTE: ignore array types for the time being */ + + len = 1; + tupdesc = CreateTemplateTupleDesc(len); + + if ( !TupleDescInitEntry(tupdesc,1, + "result", + NULL, + 0, false)) { + elog(NOTICE,"tg_parseSubQuery: unexpected result from TupleDescInitEntry"); + } else { + relid = heap_create(child->nodeElem->outTypes->val[0], + NULL, /* XXX */ + 'n', + DEFAULT_SMGR, + tupdesc); + } + } + } + } + else if (n->nodeType == TG_RECIPE_NODE) { + elog(NOTICE,"tg_parseSubQuery: can't handle embedded recipes yet!"); + } else + elog (NOTICE, "unknown nodeType: %d", n->nodeType); + + return qList; +} + +/* + * OffsetVarAttno - + * recursively find all the var nodes with the specified varno + * and offset their varattno with the offset + * + * code is similar to OffsetVarNodes in rewriteManip.c + */ + +void +OffsetVarAttno(Node* node, int varno, int offset) +{ + if (node == NULL) return; + switch (nodeTag(node)) { + case T_TargetEntry: + { + TargetEntry *tle = (TargetEntry *)node; + OffsetVarAttno(tle->expr, varno, offset); + } + break; + case T_Expr: + { + Expr *expr = (Expr*)node; + OffsetVarAttno((Node*)expr->args, varno, offset); + } + break; + case T_Var: + { + Var *var = (Var*)node; + if (var->varno == varno) + var->varattno += offset; + } + break; + case T_List: + { + List *l; + + foreach(l, (List*)node) { + OffsetVarAttno(lfirst(l), varno, offset); + } + } + break; + default: + /* ignore the others */ + break; + } +} + +/* + * appendQlist + * add the contents of a QueryTreeList q2 to the end of the QueryTreeList + * q1 + * + * returns a new querytree list + */ + +QueryTreeList* +appendQlist(QueryTreeList *q1, QueryTreeList *q2) +{ + QueryTreeList* newq; + int i,j; + int newlen; + + if (q1 == NULL) + return q2; + + if (q2 == NULL) + return q1; + + newlen = q1->len + q2->len; + newq = (QueryTreeList*)malloc(sizeof(QueryTreeList)); + newq->len = newlen; + newq->qtrees = (Query**)malloc(newlen * sizeof(Query*)); + for (i=0;ilen;i++) + newq->qtrees[i] = q1->qtrees[i]; + for (j=0;jlen;j++) { + newq->qtrees[i + j] = q2->qtrees[j]; + } + return newq; +} + +/* + * appendTeeQuery + * + * modify the query field of the teeInfo list of the particular tee node + */ +static void +appendTeeQuery(TeeInfo *teeInfo, QueryTreeList *q, char* teeNodeName) +{ + int i; + + Assert(teeInfo); + + for (i=0;inum;i++) { + if ( strcmp(teeInfo->val[i].tpi_relName, teeNodeName) == 0) { + + Assert(q->len == 1); + teeInfo->val[i].tpi_parsetree = q->qtrees[0]; + return; + } + } + elog(NOTICE, "appendTeeQuery: teeNodeName '%s' not found in teeInfo"); +} + + + +/* + * replaceSeqScan + * replaces sequential scans of a specified relation with the tee plan + * the relation is specified by its index in the range table, rt_ind + * + * returns the modified plan + * the offset_attno is the offset that needs to be added to the parent's + * qual or targetlist because the child plan has been replaced with a tee node + */ +static void +replaceSeqScan(Plan* plan, Plan* parent, + int rt_ind, Plan* tplan) +{ + Scan* snode; + Tee* teePlan; + Result* newPlan; + + if (plan == NULL) { + return; + } + + if (plan->type == T_SeqScan) { + snode = (Scan*)plan; + if (snode->scanrelid == rt_ind) { + /* found the sequential scan that should be replaced + with the tplan. */ + /* we replace the plan, but we also need to modify its parent*/ + + /* replace the sequential scan with a Result node + the reason we use a result node is so that we get the proper + projection behavior. The Result node is simply (ab)used as + a projection node */ + + newPlan = makeNode(Result); + newPlan->plan.cost = 0.0; + newPlan->plan.state = (EState*)NULL; + newPlan->plan.targetlist = plan->targetlist; + newPlan->plan.lefttree = tplan; + newPlan->plan.righttree = NULL; + newPlan->resconstantqual = NULL; + newPlan->resstate = NULL; + + /* change all the varno's to 1*/ + ChangeVarNodes((Node*)newPlan->plan.targetlist, + snode->scanrelid, 1); + + if (parent) { + teePlan = (Tee*)tplan; + + if (parent->lefttree == plan) + parent->lefttree = (Plan*)newPlan; + else + parent->righttree = (Plan*)newPlan; + + + if (teePlan->leftParent == NULL) + teePlan->leftParent = (Plan*)newPlan; + else + teePlan->rightParent = (Plan*)newPlan; + +/* comment for now to test out executor-stuff + if (parent->state) { + ExecInitNode((Plan*)newPlan, parent->state, (Plan*)newPlan); + } +*/ + } + } + + } else { + if (plan->lefttree) { + replaceSeqScan(plan->lefttree, plan, rt_ind, tplan); + } + if (plan->righttree) { + replaceSeqScan(plan->righttree, plan, rt_ind, tplan); + } + } +} + +/* + * replaceTeeScans + * places the sequential scans of the Tee table with + * a connection to the actual tee plan node + */ +static Plan* +replaceTeeScans(Plan* plan, Query* parsetree, TeeInfo *teeInfo) +{ + + int i; + List* rtable; + RangeTblEntry *rte; + char prefix[5]; + int rt_ind; + Plan* tplan; + + rtable = parsetree->rtable; + if (rtable == NULL) + return plan; + + /* look through the range table for the tee relation entry, + that will give use the varno we need to detect which + sequential scans need to be replaced with tee nodes*/ + + rt_ind = 0; + while (rtable != NIL) { + rte = lfirst(rtable); + rtable = lnext(rtable); + rt_ind++; /* range table references in varno fields start w/ 1 */ + + /* look for the "tee_" prefix in the refname, + also check to see that the relname and the refname are the same + this should eliminate any user-specified table and leave + us with the tee table entries only*/ + if ((strlen(rte->refname) < 4) || + (strcmp (rte->relname, rte->refname) != 0)) + continue; + strncpy(prefix,rte->refname,4); + prefix[4] = '\0'; + if (strcmp(prefix,"tee_") == 0) { + /* okay, we found a tee node entry in the range table */ + + /* find the appropriate plan in the teeInfo list */ + tplan = NULL; + for (i=0;inum;i++) { + if (strcmp(teeInfo->val[i].tpi_relName, + rte->refname) == 0) { + tplan = teeInfo->val[i].tpi_plan; + } + } + if (tplan == NULL) { + elog(NOTICE, "replaceTeeScans didn't find the corresponding tee plan"); } + + /* replace the sequential scan node with that var number + with the tee plan node */ + replaceSeqScan(plan, NULL, rt_ind, tplan); + } + } + + return plan; +} + + + +#endif /* TIOGA */ diff --git a/src/backend/commands/recipe.h b/src/backend/commands/recipe.h new file mode 100644 index 00000000000..62fcc314a34 --- /dev/null +++ b/src/backend/commands/recipe.h @@ -0,0 +1,17 @@ +/*------------------------------------------------------------------------- + * + * recipe.h-- + * recipe handling routines + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: recipe.h,v 1.1.1.1 1996/07/09 06:21:21 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RECIPE_H +#define RECIPE_H + +extern void beginRecipe(RecipeStmt* stmt); + +#endif /* RECIPE_H */ diff --git a/src/backend/commands/remove.c b/src/backend/commands/remove.c new file mode 100644 index 00000000000..95830c6cc08 --- /dev/null +++ b/src/backend/commands/remove.c @@ -0,0 +1,435 @@ +/*------------------------------------------------------------------------- + * + * remove.c-- + * POSTGRES remove (function | type | operator ) utilty code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/remove.c,v 1.1.1.1 1996/07/09 06:21:22 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "c.h" + +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/htup.h" +#include "access/skey.h" +#include "utils/builtins.h" +#include "utils/tqual.h" /* for NowTimeQual */ +#include "catalog/catname.h" +#include "commands/defrem.h" +#include "utils/elog.h" + +#include "miscadmin.h" + +#include "catalog/pg_aggregate.h" +#include "catalog/pg_language.h" +#include "catalog/pg_operator.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "utils/syscache.h" +#include "parser/catalog_utils.h" +#include "storage/bufmgr.h" +#include "fmgr.h" + +/* + * RemoveOperator -- + * Deletes an operator. + * + * Exceptions: + * BadArg if name is invalid. + * BadArg if type1 is invalid. + * "WARN" if operator nonexistant. + * ... + */ +void +RemoveOperator(char *operatorName, /* operator name */ + char *typeName1, /* first type name */ + char *typeName2) /* optional second type name */ +{ + Relation relation; + HeapScanDesc scan; + HeapTuple tup; + Oid typeId1 = InvalidOid; + Oid typeId2 = InvalidOid; + bool defined; + ItemPointerData itemPointerData; + Buffer buffer; + ScanKeyData operatorKey[3]; + char *userName; + + if (typeName1) { + typeId1 = TypeGet(typeName1, &defined); + if (!OidIsValid(typeId1)) { + elog(WARN, "RemoveOperator: type '%s' does not exist", typeName1); + return; + } + } + + if (typeName2) { + typeId2 = TypeGet(typeName2, &defined); + if (!OidIsValid(typeId2)) { + elog(WARN, "RemoveOperator: type '%s' does not exist", typeName2); + return; + } + } + + ScanKeyEntryInitialize(&operatorKey[0], 0x0, + Anum_pg_operator_oprname, + NameEqualRegProcedure, + PointerGetDatum(operatorName)); + + ScanKeyEntryInitialize(&operatorKey[1], 0x0, + Anum_pg_operator_oprleft, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(typeId1)); + + ScanKeyEntryInitialize(&operatorKey[2], 0x0, + Anum_pg_operator_oprright, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(typeId2)); + + relation = heap_openr(OperatorRelationName); + scan = heap_beginscan(relation, 0, NowTimeQual, 3, operatorKey); + tup = heap_getnext(scan, 0, &buffer); + if (HeapTupleIsValid(tup)) { +#ifndef NO_SECURITY + userName = GetPgUserName(); + if (!pg_ownercheck(userName, + (char *) ObjectIdGetDatum(tup->t_oid), + OPROID)) + elog(WARN, "RemoveOperator: operator '%s': permission denied", + operatorName); +#endif + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + heap_delete(relation, &itemPointerData); + } else { + if (OidIsValid(typeId1) && OidIsValid(typeId2)) { + elog(WARN, "RemoveOperator: binary operator '%s' taking '%s' and '%s' does not exist", + operatorName, + typeName1, + typeName2); + } else if (OidIsValid(typeId1)) { + elog(WARN, "RemoveOperator: right unary operator '%s' taking '%s' does not exist", + operatorName, + typeName1); + } else { + elog(WARN, "RemoveOperator: left unary operator '%s' taking '%s' does not exist", + operatorName, + typeName2); + } + } + heap_endscan(scan); + heap_close(relation); +} + +#ifdef NOTYET +/* + * this stuff is to support removing all reference to a type + * don't use it - pma 2/1/94 + */ +/* + * SingleOpOperatorRemove + * Removes all operators that have operands or a result of type 'typeOid'. + */ +static void +SingleOpOperatorRemove(Oid typeOid) +{ + Relation rdesc; + ScanKeyData key[3]; + HeapScanDesc sdesc; + HeapTuple tup; + ItemPointerData itemPointerData; + Buffer buffer; + static attnums[3] = { 7, 8, 9 }; /* left, right, return */ + register i; + + ScanKeyEntryInitialize(&key[0], + 0, 0, ObjectIdEqualRegProcedure, (Datum)typeOid); + rdesc = heap_openr(OperatorRelationName); + for (i = 0; i < 3; ++i) { + key[0].sk_attno = attnums[i]; + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key); + while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer))) { + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + /* XXX LOCK not being passed */ + heap_delete(rdesc, &itemPointerData); + } + heap_endscan(sdesc); + } + heap_close(rdesc); +} + +/* + * AttributeAndRelationRemove + * Removes all entries in the attribute and relation relations + * that contain entries of type 'typeOid'. + * Currently nothing calls this code, it is untested. + */ +static void +AttributeAndRelationRemove(Oid typeOid) +{ + struct oidlist { + Oid reloid; + struct oidlist *next; + }; + struct oidlist *oidptr, *optr; + Relation rdesc; + ScanKeyData key[1]; + HeapScanDesc sdesc; + HeapTuple tup; + ItemPointerData itemPointerData; + Buffer buffer; + + /* + * Get the oid's of the relations to be removed by scanning the + * entire attribute relation. + * We don't need to remove the attributes here, + * because amdestroy will remove all attributes of the relation. + * XXX should check for duplicate relations + */ + + ScanKeyEntryInitialize(&key[0], + 0, 3, ObjectIdEqualRegProcedure, (Datum)typeOid); + + oidptr = (struct oidlist *) palloc(sizeof(*oidptr)); + oidptr->next = NULL; + optr = oidptr; + rdesc = heap_openr(AttributeRelationName); + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key); + while (PointerIsValid(tup = heap_getnext(sdesc, 0, &buffer))) { + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + optr->reloid = ((AttributeTupleForm)GETSTRUCT(tup))->attrelid; + optr->next = (struct oidlist *) palloc(sizeof(*oidptr)); + optr = optr->next; + } + optr->next = NULL; + heap_endscan(sdesc); + heap_close(rdesc); + + + ScanKeyEntryInitialize(&key[0], 0, + ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, (Datum)0); + optr = oidptr; + rdesc = heap_openr(RelationRelationName); + while (PointerIsValid((char *) optr->next)) { + key[0].sk_argument = (Datum) (optr++)->reloid; + sdesc = heap_beginscan(rdesc, 0, NowTimeQual, 1, key); + tup = heap_getnext(sdesc, 0, &buffer); + if (PointerIsValid(tup)) { + char *name; + + name = (((Form_pg_class)GETSTRUCT(tup))->relname).data; + heap_destroy(name); + } + } + heap_endscan(sdesc); + heap_close(rdesc); +} +#endif /* NOTYET */ + +/* + * TypeRemove + * Removes the type 'typeName' and all attributes and relations that + * use it. + */ +void +RemoveType(char *typeName) /* type name to be removed */ +{ + Relation relation; + HeapScanDesc scan; + HeapTuple tup; + Oid typeOid; + ItemPointerData itemPointerData; + static ScanKeyData typeKey[1] = { + { 0, Anum_pg_type_typname, NameEqualRegProcedure } + }; + char *shadow_type; + char *userName; + +#ifndef NO_SECURITY + userName = GetPgUserName(); + if (!pg_ownercheck(userName, typeName, TYPNAME)) + elog(WARN, "RemoveType: type '%s': permission denied", + typeName); +#endif + + relation = heap_openr(TypeRelationName); + fmgr_info(typeKey[0].sk_procedure, &typeKey[0].sk_func, + &typeKey[0].sk_nargs); + + /* Delete the primary type */ + + typeKey[0].sk_argument = PointerGetDatum(typeName); + + scan = heap_beginscan(relation, 0, NowTimeQual, 1, typeKey); + tup = heap_getnext(scan, 0, (Buffer *) 0); + if (!HeapTupleIsValid(tup)) { + heap_endscan(scan); + heap_close(relation); + elog(WARN, "RemoveType: type '%s' does not exist", + typeName); + } + typeOid = tup->t_oid; + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + heap_delete(relation, &itemPointerData); + heap_endscan(scan); + + /* Now, Delete the "array of" that type */ + shadow_type = makeArrayTypeName(typeName); + typeKey[0].sk_argument = NameGetDatum(shadow_type); + + scan = heap_beginscan(relation, 0, NowTimeQual, + 1, (ScanKey) typeKey); + tup = heap_getnext(scan, 0, (Buffer *) 0); + + if (!HeapTupleIsValid(tup)) + { + elog(WARN, "RemoveType: type '%s': array stub not found", + typeName); + } + typeOid = tup->t_oid; + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + heap_delete(relation, &itemPointerData); + heap_endscan(scan); + + heap_close(relation); +} + +/* + * RemoveFunction -- + * Deletes a function. + * + * Exceptions: + * BadArg if name is invalid. + * "WARN" if function nonexistant. + * ... + */ +void +RemoveFunction(char *functionName, /* function name to be removed */ + int nargs, + List *argNameList /* list of TypeNames */) +{ + Relation relation; + HeapScanDesc scan; + HeapTuple tup; + Buffer buffer = InvalidBuffer; + bool bufferUsed = FALSE; + Oid argList[8]; + Form_pg_proc the_proc; + ItemPointerData itemPointerData; + static ScanKeyData key[3] = { + { 0, Anum_pg_proc_proname, NameEqualRegProcedure } + }; + char *userName; + char *typename; + int i; + + memset(argList, 0, 8 * sizeof(Oid)); + for (i=0; iname; */ + typename = strVal(lfirst(argNameList)); + argNameList = lnext(argNameList); + + if (strcmp(typename, "opaque") == 0) + argList[i] = 0; + else { + tup = SearchSysCacheTuple(TYPNAME, PointerGetDatum(typename), + 0,0,0); + + if (!HeapTupleIsValid(tup)) { + elog(WARN, "RemoveFunction: type '%s' not found",typename); + } + argList[i] = tup->t_oid; + } + } + + tup = SearchSysCacheTuple(PRONAME, PointerGetDatum(functionName), + Int32GetDatum(nargs), + PointerGetDatum(argList),0); + if (!HeapTupleIsValid(tup)) + func_error("RemoveFunction", functionName, nargs, (int*)argList); + +#ifndef NO_SECURITY + userName = GetPgUserName(); + if (!pg_func_ownercheck(userName, functionName, nargs, argList)) { + elog(WARN, "RemoveFunction: function '%s': permission denied", + functionName); + } +#endif + + key[0].sk_argument = PointerGetDatum(functionName); + + fmgr_info(key[0].sk_procedure, &key[0].sk_func, &key[0].sk_nargs); + + relation = heap_openr(ProcedureRelationName); + scan = heap_beginscan(relation, 0, NowTimeQual, 1, key); + + do { /* hope this is ok because it's indexed */ + if (bufferUsed) { + ReleaseBuffer(buffer); + bufferUsed = FALSE; + } + tup = heap_getnext(scan, 0, (Buffer *) &buffer); + if (!HeapTupleIsValid(tup)) + break; + bufferUsed = TRUE; + the_proc = (Form_pg_proc) GETSTRUCT(tup); + } while ( (namestrcmp(&(the_proc->proname), functionName) == 0) && + (the_proc->pronargs != nargs || + !oid8eq(&(the_proc->proargtypes[0]), &argList[0]))); + + + if (!HeapTupleIsValid(tup) || namestrcmp(&(the_proc->proname), + functionName) != 0) + { + heap_endscan(scan); + heap_close(relation); + func_error("RemoveFunction", functionName,nargs, (int*)argList); + } + + /* ok, function has been found */ + + if (the_proc->prolang == INTERNALlanguageId) + elog(WARN, "RemoveFunction: function \"%-.*s\" is built-in", + NAMEDATALEN, functionName); + + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + heap_delete(relation, &itemPointerData); + heap_endscan(scan); + heap_close(relation); +} + +void +RemoveAggregate(char *aggName) +{ + Relation relation; + HeapScanDesc scan; + HeapTuple tup; + ItemPointerData itemPointerData; + static ScanKeyData key[3] = { + { 0, Anum_pg_aggregate_aggname, NameEqualRegProcedure } + }; + + key[0].sk_argument = PointerGetDatum(aggName); + + fmgr_info(key[0].sk_procedure, &key[0].sk_func, &key[0].sk_nargs); + relation = heap_openr(AggregateRelationName); + scan = heap_beginscan(relation, 0, NowTimeQual, 1, key); + tup = heap_getnext(scan, 0, (Buffer *) 0); + if (!HeapTupleIsValid(tup)) { + heap_endscan(scan); + heap_close(relation); + elog(WARN, "RemoveAggregate: aggregate '%s' does not exist", + aggName); + } + ItemPointerCopy(&tup->t_ctid, &itemPointerData); + heap_delete(relation, &itemPointerData); + heap_endscan(scan); + heap_close(relation); +} diff --git a/src/backend/commands/rename.c b/src/backend/commands/rename.c new file mode 100644 index 00000000000..83dc8944eac --- /dev/null +++ b/src/backend/commands/rename.c @@ -0,0 +1,275 @@ +/*------------------------------------------------------------------------- + * + * rename.c-- + * renameatt() and renamerel() reside here. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/Attic/rename.c,v 1.1.1.1 1996/07/09 06:21:22 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "access/skey.h" +#include "utils/builtins.h" +#include "utils/tqual.h" + +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "catalog/indexing.h" +#include "catalog/catalog.h" + +#include "commands/copy.h" + +#include "executor/execdefs.h" /* for EXEC_{FOR,BACK,FDEBUG,BDEBUG} */ + +#include "storage/buf.h" +#include "storage/itemptr.h" + +#include "miscadmin.h" +#include "utils/portal.h" +#include "tcop/dest.h" +#include "commands/command.h" + +#include "utils/excid.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" +#include "utils/rel.h" + +#include "catalog/pg_attribute.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_class.h" + +#include "optimizer/internal.h" +#include "optimizer/prep.h" /* for find_all_inheritors */ + +#ifndef NO_SECURITY +#include "utils/acl.h" +#include "utils/syscache.h" +#endif /* !NO_SECURITY */ + +/* + * renameatt - changes the name of a attribute in a relation + * + * Attname attribute is changed in attribute catalog. + * No record of the previous attname is kept (correct?). + * + * get proper reldesc from relation catalog (if not arg) + * scan attribute catalog + * for name conflict (within rel) + * for original attribute (if not arg) + * modify attname in attribute tuple + * insert modified attribute in attribute catalog + * delete original attribute from attribute catalog + * + * XXX Renaming an indexed attribute must (eventually) also change + * the attribute name in the associated indexes. + */ +void +renameatt(char *relname, + char *oldattname, + char *newattname, + char *userName, + int recurse) +{ + Relation relrdesc, attrdesc; + HeapTuple reltup, oldatttup, newatttup; + ItemPointerData oldTID; + Relation idescs[Num_pg_attr_indices]; + + /* + * permissions checking. this would normally be done in utility.c, + * but this particular routine is recursive. + * + * normally, only the owner of a class can change its schema. + */ + if (IsSystemRelationName(relname)) + elog(WARN, "renameatt: class \"%-.*s\" is a system catalog", + NAMEDATALEN, relname); +#ifndef NO_SECURITY + if (!IsBootstrapProcessingMode() && + !pg_ownercheck(userName, relname, RELNAME)) + elog(WARN, "renameatt: you do not own class \"%-.*s\"", + NAMEDATALEN, relname); +#endif + + /* + * if the 'recurse' flag is set then we are supposed to rename this + * attribute in all classes that inherit from 'relname' (as well as + * in 'relname'). + * + * any permissions or problems with duplicate attributes will cause + * the whole transaction to abort, which is what we want -- all or + * nothing. + */ + if (recurse) { + Oid myrelid, childrelid; + List *child, *children; + + relrdesc = heap_openr(relname); + if (!RelationIsValid(relrdesc)) { + elog(WARN, "renameatt: unknown relation: \"%-.*s\"", + NAMEDATALEN, relname); + } + myrelid = relrdesc->rd_id; + heap_close(relrdesc); + + /* this routine is actually in the planner */ + children = find_all_inheritors(lconsi(myrelid, NIL), NIL); + + + /* + * find_all_inheritors does the recursive search of the + * inheritance hierarchy, so all we have to do is process + * all of the relids in the list that it returns. + */ + foreach (child, children) { + char *childname; + + childrelid = lfirsti(child); + if (childrelid == myrelid) + continue; + relrdesc = heap_open(childrelid); + if (!RelationIsValid(relrdesc)) { + elog(WARN, "renameatt: can't find catalog entry for inheriting class with oid %d", + childrelid); + } + childname = (relrdesc->rd_rel->relname).data; + heap_close(relrdesc); + renameatt(childname, oldattname, newattname, + userName, 0); /* no more recursion! */ + } + } + + relrdesc = heap_openr(RelationRelationName); + reltup = ClassNameIndexScan(relrdesc, relname); + if (!PointerIsValid(reltup)) { + heap_close(relrdesc); + elog(WARN, "renameatt: relation \"%-.*s\" nonexistent", + NAMEDATALEN, relname); + return; + } + heap_close(relrdesc); + + attrdesc = heap_openr(AttributeRelationName); + oldatttup = AttributeNameIndexScan(attrdesc, reltup->t_oid, oldattname); + if (!PointerIsValid(oldatttup)) { + heap_close(attrdesc); + elog(WARN, "renameatt: attribute \"%-.*s\" nonexistent", + NAMEDATALEN, oldattname); + } + if (((AttributeTupleForm ) GETSTRUCT(oldatttup))->attnum < 0) { + elog(WARN, "renameatt: system attribute \"%-.*s\" not renamed", + NAMEDATALEN, oldattname); + } + + newatttup = AttributeNameIndexScan(attrdesc, reltup->t_oid, newattname); + if (PointerIsValid(newatttup)) { + pfree(oldatttup); + heap_close(attrdesc); + elog(WARN, "renameatt: attribute \"%-.*s\" exists", + NAMEDATALEN, newattname); + } + + namestrcpy(&(((AttributeTupleForm)(GETSTRUCT(oldatttup)))->attname), + newattname); + oldTID = oldatttup->t_ctid; + + /* insert "fixed" tuple */ + (void) heap_replace(attrdesc, &oldTID, oldatttup); + + /* keep system catalog indices current */ + CatalogOpenIndices(Num_pg_attr_indices, Name_pg_attr_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_attr_indices, attrdesc, oldatttup); + CatalogCloseIndices(Num_pg_attr_indices, idescs); + + heap_close(attrdesc); + pfree(oldatttup); +} + +/* + * renamerel - change the name of a relation + * + * Relname attribute is changed in relation catalog. + * No record of the previous relname is kept (correct?). + * + * scan relation catalog + * for name conflict + * for original relation (if not arg) + * modify relname in relation tuple + * insert modified relation in relation catalog + * delete original relation from relation catalog + * + * XXX Will currently lose track of a relation if it is unable to + * properly replace the new relation tuple. + */ +void +renamerel(char oldrelname[], char newrelname[]) +{ + Relation relrdesc; /* for RELATION relation */ + HeapTuple oldreltup, newreltup; + ItemPointerData oldTID; + char oldpath[MAXPGPATH], newpath[MAXPGPATH]; + Relation idescs[Num_pg_class_indices]; + + if (IsSystemRelationName(oldrelname)) { + elog(WARN, "renamerel: system relation \"%-.*s\" not renamed", + NAMEDATALEN, oldrelname); + return; + } + if (IsSystemRelationName(newrelname)) { + elog(WARN, "renamerel: Illegal class name: \"%-.*s\" -- pg_ is reserved for system catalogs", + NAMEDATALEN, newrelname); + return; + } + + relrdesc = heap_openr(RelationRelationName); + oldreltup = ClassNameIndexScan(relrdesc, oldrelname); + + if (!PointerIsValid(oldreltup)) { + heap_close(relrdesc); + elog(WARN, "renamerel: relation \"%-.*s\" does not exist", + NAMEDATALEN, oldrelname); + } + + newreltup = ClassNameIndexScan(relrdesc, newrelname); + if (PointerIsValid(newreltup)) { + pfree(oldreltup); + heap_close(relrdesc); + elog(WARN, "renamerel: relation \"%-.*s\" exists", + NAMEDATALEN, newrelname); + } + + /* rename the directory first, so if this fails the rename's not done */ + (void) strcpy(oldpath, relpath(oldrelname)); + (void) strcpy(newpath, relpath(newrelname)); + if (rename(oldpath, newpath) < 0) + elog(WARN, "renamerel: unable to rename file: %m"); + + memmove((char *) (((Form_pg_class) GETSTRUCT(oldreltup))->relname.data), + newrelname, + NAMEDATALEN); + oldTID = oldreltup->t_ctid; + + /* insert fixed rel tuple */ + (void) heap_replace(relrdesc, &oldTID, oldreltup); + + /* keep the system catalog indices current */ + CatalogOpenIndices(Num_pg_class_indices, Name_pg_class_indices, idescs); + CatalogIndexInsert(idescs, Num_pg_class_indices, relrdesc, oldreltup); + CatalogCloseIndices(Num_pg_class_indices, idescs); + + pfree(oldreltup); + heap_close(relrdesc); +} diff --git a/src/backend/commands/rename.h b/src/backend/commands/rename.h new file mode 100644 index 00000000000..c3889e12f89 --- /dev/null +++ b/src/backend/commands/rename.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * rename.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: rename.h,v 1.1.1.1 1996/07/09 06:21:22 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RENAME_H +#define RENAME_H + +extern void renameatt(char *relname, + char *oldattname, + char *newattname, + char *userName, int recurse); + +extern void renamerel(char *oldrelname, + char *newrelname); + +#endif /* RENAME_H */ diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c new file mode 100644 index 00000000000..7e1514cd2a3 --- /dev/null +++ b/src/backend/commands/vacuum.c @@ -0,0 +1,853 @@ +/*------------------------------------------------------------------------- + * + * vacuum.c-- + * the postgres vacuum cleaner + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.1.1.1 1996/07/09 06:21:22 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" +#include "utils/portal.h" + +#include "access/genam.h" +#include "access/heapam.h" +#include "access/xact.h" +#include "storage/bufmgr.h" +#include "access/transam.h" +#include "utils/tqual.h" +#include "access/htup.h" + +#include "catalog/pg_index.h" +#include "catalog/catname.h" +#include "catalog/pg_class.h" +#include "catalog/pg_proc.h" + +#include "storage/fd.h" /* for O_ */ +#include "storage/itemid.h" +#include "storage/bufmgr.h" +#include "storage/bufpage.h" +#include "storage/smgr.h" + +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" + +#include "commands/vacuum.h" + +bool VacuumRunning = false; + +/* non-export function prototypes */ +static void _vc_init(char *vacrel); +static void _vc_shutdown(char *vacrel); +static void _vc_vacuum(char *vacrel); +static VRelList _vc_getrels(Portal p, char *vacrel); +static void _vc_vacone(Portal p, VRelList curvrl); +static void _vc_vacheap(Portal p, VRelList curvrl, Relation onerel); +static void _vc_vacindices(VRelList curvrl, Relation onerel); +static void _vc_vaconeind(VRelList curvrl, Relation indrel); +static void _vc_updstats(Oid relid, int npages, int ntuples, bool hasindex); +static void _vc_setpagelock(Relation rel, BlockNumber blkno); +static bool _vc_ontidlist(ItemPointer itemptr, VTidList tidlist); +static void _vc_reaptid(Portal p, VRelList curvrl, BlockNumber blkno, + OffsetNumber offnum); +static void _vc_free(Portal p, VRelList vrl); +static Relation _vc_getarchrel(Relation heaprel); +static void _vc_archive(Relation archrel, HeapTuple htup); +static bool _vc_isarchrel(char *rname); + +void +vacuum(char *vacrel) +{ + /* initialize vacuum cleaner */ + _vc_init(vacrel); + + /* vacuum the database */ + _vc_vacuum(vacrel); + + /* clean up */ + _vc_shutdown(vacrel); +} + +/* + * _vc_init(), _vc_shutdown() -- start up and shut down the vacuum cleaner. + * + * We run exactly one vacuum cleaner at a time. We use the file system + * to guarantee an exclusive lock on vacuuming, since a single vacuum + * cleaner instantiation crosses transaction boundaries, and we'd lose + * postgres-style locks at the end of every transaction. + * + * The strangeness with committing and starting transactions in the + * init and shutdown routines is due to the fact that the vacuum cleaner + * is invoked via a sql command, and so is already executing inside + * a transaction. We need to leave ourselves in a predictable state + * on entry and exit to the vacuum cleaner. We commit the transaction + * started in PostgresMain() inside _vc_init(), and start one in + * _vc_shutdown() to match the commit waiting for us back in + * PostgresMain(). + */ +static void +_vc_init(char *vacrel) +{ + int fd; + + if ((fd = open("pg_vlock", O_CREAT|O_EXCL, 0600)) < 0) + elog(WARN, "can't create lock file -- another vacuum cleaner running?"); + + close(fd); + + /* + * By here, exclusive open on the lock file succeeded. If we abort + * for any reason during vacuuming, we need to remove the lock file. + * This global variable is checked in the transaction manager on xact + * abort, and the routine vc_abort() is called if necessary. + */ + + VacuumRunning = true; + + /* matches the StartTransaction in PostgresMain() */ + CommitTransactionCommand(); +} + +static void +_vc_shutdown(char *vacrel) +{ + /* on entry, not in a transaction */ + if (unlink("pg_vlock") < 0) + elog(WARN, "vacuum: can't destroy lock file!"); + + /* okay, we're done */ + VacuumRunning = false; + + /* matches the CommitTransaction in PostgresMain() */ + StartTransactionCommand(); +} + +void +vc_abort() +{ + /* on abort, remove the vacuum cleaner lock file */ + (void) unlink("pg_vlock"); + + VacuumRunning = false; +} + +/* + * _vc_vacuum() -- vacuum the database. + * + * This routine builds a list of relations to vacuum, and then calls + * code that vacuums them one at a time. We are careful to vacuum each + * relation in a separate transaction in order to avoid holding too many + * locks at one time. + */ +static void +_vc_vacuum(char *vacrel) +{ + VRelList vrl, cur; + char *pname; + Portal p; + + /* + * Create a portal for safe memory across transctions. We need to + * palloc the name space for it because our hash function expects + * the name to be on a longword boundary. CreatePortal copies the + * name to safe storage for us. + */ + + pname = (char *) palloc(strlen(VACPNAME) + 1); + strcpy(pname, VACPNAME); + p = CreatePortal(pname); + pfree(pname); + + /* get list of relations */ + vrl = _vc_getrels(p, vacrel); + + /* vacuum each heap relation */ + for (cur = vrl; cur != (VRelList) NULL; cur = cur->vrl_next) + _vc_vacone(p, cur); + + _vc_free(p, vrl); + + PortalDestroy(&p); +} + +static VRelList +_vc_getrels(Portal p, char *vacrel) +{ + Relation pgclass; + TupleDesc pgcdesc; + HeapScanDesc pgcscan; + HeapTuple pgctup; + Buffer buf; + PortalVariableMemory portalmem; + MemoryContext old; + VRelList vrl, cur; + Datum d; + char *rname; + int16 smgrno; + bool n; + ScanKeyData pgckey; + + StartTransactionCommand(); + + if (vacrel) { + ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relname, + NameEqualRegProcedure, + PointerGetDatum(vacrel)); + } else { + ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relkind, + CharacterEqualRegProcedure, CharGetDatum('r')); + } + + portalmem = PortalGetVariableMemory(p); + vrl = (VRelList) NULL; + + pgclass = heap_openr(RelationRelationName); + pgcdesc = RelationGetTupleDescriptor(pgclass); + + pgcscan = heap_beginscan(pgclass, false, NowTimeQual, 1, &pgckey); + + while (HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &buf))) { + + /* + * We have to be careful not to vacuum the archive (since it + * already contains vacuumed tuples), and not to vacuum + * relations on write-once storage managers like the Sony + * jukebox at Berkeley. + */ + + d = (Datum) heap_getattr(pgctup, buf, Anum_pg_class_relname, + pgcdesc, &n); + rname = (char*)d; + + /* skip archive relations */ + if (_vc_isarchrel(rname)) { + ReleaseBuffer(buf); + continue; + } + + d = (Datum) heap_getattr(pgctup, buf, Anum_pg_class_relsmgr, + pgcdesc, &n); + smgrno = DatumGetInt16(d); + + /* skip write-once storage managers */ + if (smgriswo(smgrno)) { + ReleaseBuffer(buf); + continue; + } + + /* get a relation list entry for this guy */ + old = MemoryContextSwitchTo((MemoryContext)portalmem); + if (vrl == (VRelList) NULL) { + vrl = cur = (VRelList) palloc(sizeof(VRelListData)); + } else { + cur->vrl_next = (VRelList) palloc(sizeof(VRelListData)); + cur = cur->vrl_next; + } + (void) MemoryContextSwitchTo(old); + + cur->vrl_relid = pgctup->t_oid; + cur->vrl_attlist = (VAttList) NULL; + cur->vrl_tidlist = (VTidList) NULL; + cur->vrl_npages = cur->vrl_ntups = 0; + cur->vrl_hasindex = false; + cur->vrl_next = (VRelList) NULL; + + /* wei hates it if you forget to do this */ + ReleaseBuffer(buf); + } + + heap_close(pgclass); + heap_endscan(pgcscan); + + CommitTransactionCommand(); + + return (vrl); +} + +/* + * _vc_vacone() -- vacuum one heap relation + * + * This routine vacuums a single heap, cleans out its indices, and + * updates its statistics npages and ntuples statistics. + * + * Doing one heap at a time incurs extra overhead, since we need to + * check that the heap exists again just before we vacuum it. The + * reason that we do this is so that vacuuming can be spread across + * many small transactions. Otherwise, two-phase locking would require + * us to lock the entire database during one pass of the vacuum cleaner. + */ +static void +_vc_vacone(Portal p, VRelList curvrl) +{ + Relation pgclass; + TupleDesc pgcdesc; + HeapTuple pgctup; + Buffer pgcbuf; + HeapScanDesc pgcscan; + Relation onerel; + ScanKeyData pgckey; + + StartTransactionCommand(); + + ScanKeyEntryInitialize(&pgckey, 0x0, ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(curvrl->vrl_relid)); + + pgclass = heap_openr(RelationRelationName); + pgcdesc = RelationGetTupleDescriptor(pgclass); + pgcscan = heap_beginscan(pgclass, false, NowTimeQual, 1, &pgckey); + + /* + * Race condition -- if the pg_class tuple has gone away since the + * last time we saw it, we don't need to vacuum it. + */ + + if (!HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &pgcbuf))) { + heap_endscan(pgcscan); + heap_close(pgclass); + CommitTransactionCommand(); + return; + } + + /* now open the class and vacuum it */ + onerel = heap_open(curvrl->vrl_relid); + + /* we require the relation to be locked until the indices are cleaned */ + RelationSetLockForWrite(onerel); + + /* vacuum it */ + _vc_vacheap(p, curvrl, onerel); + + /* if we vacuumed any heap tuples, vacuum the indices too */ + if (curvrl->vrl_tidlist != (VTidList) NULL) + _vc_vacindices(curvrl, onerel); + else + curvrl->vrl_hasindex = onerel->rd_rel->relhasindex; + + /* all done with this class */ + heap_close(onerel); + heap_endscan(pgcscan); + heap_close(pgclass); + + /* update statistics in pg_class */ + _vc_updstats(curvrl->vrl_relid, curvrl->vrl_npages, curvrl->vrl_ntups, + curvrl->vrl_hasindex); + + CommitTransactionCommand(); +} + +/* + * _vc_vacheap() -- vacuum an open heap relation + * + * This routine sets commit times, vacuums dead tuples, cleans up + * wasted space on the page, and maintains statistics on the number + * of live tuples in a heap. In addition, it records the tids of + * all tuples removed from the heap for any reason. These tids are + * used in a scan of indices on the relation to get rid of dead + * index tuples. + */ +static void +_vc_vacheap(Portal p, VRelList curvrl, Relation onerel) +{ + int nblocks, blkno; + ItemId itemid; + HeapTuple htup; + Buffer buf; + Page page; + OffsetNumber offnum, maxoff; + Relation archrel; + bool isarchived; + int nvac; + int ntups; + bool pgchanged, tupgone; + AbsoluteTime purgetime, expiretime; + RelativeTime preservetime; + + nvac = 0; + ntups = 0; + nblocks = RelationGetNumberOfBlocks(onerel); + + { + char *relname; + relname = (RelationGetRelationName(onerel))->data; + + if ( (strlen(relname) > 4) && + relname[0] == 'X' && + relname[1] == 'i' && + relname[2] == 'n' && + (relname[3] == 'v' || relname[3] == 'x')) + return; + } + + + /* if the relation has an archive, open it */ + if (onerel->rd_rel->relarch != 'n') { + isarchived = true; + archrel = _vc_getarchrel(onerel); + } else + isarchived = false; + + /* don't vacuum large objects for now. + something breaks when we do*/ + { + char *relname; + relname = (RelationGetRelationName(onerel))->data; + + if ( (strlen(relname) > 4) && + relname[0] == 'X' && + relname[1] == 'i' && + relname[2] == 'n' && + (relname[3] == 'v' || relname[3] == 'x')) + return; + } + + /* calculate the purge time: tuples that expired before this time + will be archived or deleted */ + purgetime = GetCurrentTransactionStartTime(); + expiretime = (AbsoluteTime)onerel->rd_rel->relexpires; + preservetime = (RelativeTime)onerel->rd_rel->relpreserved; + + if (RelativeTimeIsValid(preservetime) && (preservetime)) { + purgetime -= preservetime; + if (AbsoluteTimeIsBackwardCompatiblyValid(expiretime) && + expiretime > purgetime) + purgetime = expiretime; + } + + else if (AbsoluteTimeIsBackwardCompatiblyValid(expiretime)) + purgetime = expiretime; + + for (blkno = 0; blkno < nblocks; blkno++) { + buf = ReadBuffer(onerel, blkno); + page = BufferGetPage(buf); + + if (PageIsEmpty(page)) { + ReleaseBuffer(buf); + continue; + } + + pgchanged = false; + maxoff = PageGetMaxOffsetNumber(page); + for (offnum = FirstOffsetNumber; + offnum <= maxoff; + offnum = OffsetNumberNext(offnum)) { + itemid = PageGetItemId(page, offnum); + + if (!ItemIdIsUsed(itemid)) + continue; + + htup = (HeapTuple) PageGetItem(page, itemid); + tupgone = false; + + if (!AbsoluteTimeIsBackwardCompatiblyValid(htup->t_tmin) && + TransactionIdIsValid((TransactionId)htup->t_xmin)) { + + if (TransactionIdDidAbort(htup->t_xmin)) { + _vc_reaptid(p, curvrl, blkno, offnum); + pgchanged = true; + tupgone = true; + } else if (TransactionIdDidCommit(htup->t_xmin)) { + htup->t_tmin = TransactionIdGetCommitTime(htup->t_xmin); + pgchanged = true; + } + } + + if (TransactionIdIsValid((TransactionId)htup->t_xmax)) { + if (TransactionIdDidAbort(htup->t_xmax)) { + StoreInvalidTransactionId(&(htup->t_xmax)); + pgchanged = true; + } else if (TransactionIdDidCommit(htup->t_xmax)) { + if (!AbsoluteTimeIsBackwardCompatiblyReal(htup->t_tmax)) { + + htup->t_tmax = TransactionIdGetCommitTime(htup->t_xmax); + pgchanged = true; + } + + /* + * Reap the dead tuple if its expiration time is + * before purgetime. + */ + + if (!tupgone && htup->t_tmax < purgetime) { + _vc_reaptid(p, curvrl, blkno, offnum); + tupgone = true; + pgchanged = true; + } + } + } + + if (tupgone) { + ItemId lpp = &(((PageHeader) page)->pd_linp[offnum - 1]); + + /* write the tuple to the archive, if necessary */ + if (isarchived) + _vc_archive(archrel, htup); + + /* mark it unused */ + lpp->lp_flags &= ~LP_USED; + + ++nvac; + } else { + ntups++; + } + } + + if (pgchanged) { + PageRepairFragmentation(page); + WriteBuffer(buf); + } else { + ReleaseBuffer(buf); + } + } + + if (isarchived) + heap_close(archrel); + + /* save stats in the rel list for use later */ + curvrl->vrl_ntups = ntups; + curvrl->vrl_npages = nblocks; +} + +/* + * _vc_vacindices() -- vacuum all the indices for a particular heap relation. + * + * On entry, curvrl points at the relation currently being vacuumed. + * We already have a write lock on the relation, so we don't need to + * worry about anyone building an index on it while we're doing the + * vacuuming. The tid list for curvrl is sorted in reverse tid order: + * that is, tids on higher page numbers are before those on lower page + * numbers, and tids high on the page are before those low on the page. + * We use this ordering to cut down the search cost when we look at an + * index entry. + * + * We're executing inside the transaction that vacuumed the heap. + */ +static void +_vc_vacindices(VRelList curvrl, Relation onerel) +{ + Relation pgindex; + TupleDesc pgidesc; + HeapTuple pgitup; + HeapScanDesc pgiscan; + Buffer buf; + Relation indrel; + Oid indoid; + Datum d; + bool n; + int nindices; + ScanKeyData pgikey; + + /* see if we can dodge doing any work at all */ + if (!(onerel->rd_rel->relhasindex)) + return; + + nindices = 0; + + /* prepare a heap scan on the pg_index relation */ + pgindex = heap_openr(IndexRelationName); + pgidesc = RelationGetTupleDescriptor(pgindex); + + ScanKeyEntryInitialize(&pgikey, 0x0, Anum_pg_index_indrelid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(curvrl->vrl_relid)); + + pgiscan = heap_beginscan(pgindex, false, NowTimeQual, 1, &pgikey); + + /* vacuum all the indices */ + while (HeapTupleIsValid(pgitup = heap_getnext(pgiscan, 0, &buf))) { + d = (Datum) heap_getattr(pgitup, buf, Anum_pg_index_indexrelid, + pgidesc, &n); + indoid = DatumGetObjectId(d); + indrel = index_open(indoid); + _vc_vaconeind(curvrl, indrel); + heap_close(indrel); + nindices++; + } + + heap_endscan(pgiscan); + heap_close(pgindex); + + if (nindices > 0) + curvrl->vrl_hasindex = true; + else + curvrl->vrl_hasindex = false; +} + +/* + * _vc_vaconeind() -- vacuum one index relation. + * + * Curvrl is the VRelList entry for the heap we're currently vacuuming. + * It's locked. The vrl_tidlist entry in curvrl is the list of deleted + * heap tids, sorted in reverse (page, offset) order. Onerel is an + * index relation on the vacuumed heap. We don't set locks on the index + * relation here, since the indexed access methods support locking at + * different granularities. We let them handle it. + * + * Finally, we arrange to update the index relation's statistics in + * pg_class. + */ +static void +_vc_vaconeind(VRelList curvrl, Relation indrel) +{ + RetrieveIndexResult res; + IndexScanDesc iscan; + ItemPointer heapptr; + int nvac; + int nitups; + int nipages; + + /* walk through the entire index */ + iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL); + nvac = 0; + nitups = 0; + + while ((res = index_getnext(iscan, ForwardScanDirection)) + != (RetrieveIndexResult) NULL) { + heapptr = &res->heap_iptr; + + if (_vc_ontidlist(heapptr, curvrl->vrl_tidlist)) { +#if 0 + elog(DEBUG, "<%x,%x> -> <%x,%x>", + ItemPointerGetBlockNumber(&(res->index_iptr)), + ItemPointerGetOffsetNumber(&(res->index_iptr)), + ItemPointerGetBlockNumber(&(res->heap_iptr)), + ItemPointerGetOffsetNumber(&(res->heap_iptr))); +#endif + ++nvac; + index_delete(indrel, &res->index_iptr); + } else { + nitups++; + } + + /* be tidy */ + pfree(res); + } + + index_endscan(iscan); + + /* now update statistics in pg_class */ + nipages = RelationGetNumberOfBlocks(indrel); + _vc_updstats(indrel->rd_id, nipages, nitups, false); +} + +/* + * _vc_updstats() -- update pg_class statistics for one relation + * + * This routine works for both index and heap relation entries in + * pg_class. We violate no-overwrite semantics here by storing new + * values for ntuples, npages, and hasindex directly in the pg_class + * tuple that's already on the page. The reason for this is that if + * we updated these tuples in the usual way, then every tuple in pg_class + * would be replaced every day. This would make planning and executing + * historical queries very expensive. + */ +static void +_vc_updstats(Oid relid, int npages, int ntuples, bool hasindex) +{ + Relation rd; + HeapScanDesc sdesc; + HeapTuple tup; + Buffer buf; + Form_pg_class pgcform; + ScanKeyData skey; + + /* + * update number of tuples and number of pages in pg_class + */ + ScanKeyEntryInitialize(&skey, 0x0, ObjectIdAttributeNumber, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(relid)); + + rd = heap_openr(RelationRelationName); + sdesc = heap_beginscan(rd, false, NowTimeQual, 1, &skey); + + if (!HeapTupleIsValid(tup = heap_getnext(sdesc, 0, &buf))) + elog(WARN, "pg_class entry for relid %d vanished during vacuuming", + relid); + + /* overwrite the existing statistics in the tuple */ + _vc_setpagelock(rd, BufferGetBlockNumber(buf)); + pgcform = (Form_pg_class) GETSTRUCT(tup); + pgcform->reltuples = ntuples; + pgcform->relpages = npages; + pgcform->relhasindex = hasindex; + + /* XXX -- after write, should invalidate relcache in other backends */ + WriteNoReleaseBuffer(buf); + + /* that's all, folks */ + heap_endscan(sdesc); + heap_close(rd); + +} + +static void _vc_setpagelock(Relation rel, BlockNumber blkno) +{ + ItemPointerData itm; + + ItemPointerSet(&itm, blkno, 1); + + RelationSetLockForWritePage(rel, &itm); +} + +/* + * _vc_ontidlist() -- is a particular tid on the supplied tid list? + * + * Tidlist is sorted in reverse (page, offset) order. + */ +static bool +_vc_ontidlist(ItemPointer itemptr, VTidList tidlist) +{ + BlockNumber ibkno; + OffsetNumber ioffno; + ItemPointer check; + BlockNumber ckbkno; + OffsetNumber ckoffno; + + ibkno = ItemPointerGetBlockNumber(itemptr); + ioffno = ItemPointerGetOffsetNumber(itemptr); + + while (tidlist != (VTidList) NULL) { + check = &(tidlist->vtl_tid); + ckbkno = ItemPointerGetBlockNumber(check); + ckoffno = ItemPointerGetOffsetNumber(check); + + /* see if we've looked far enough down the list */ + if ((ckbkno < ibkno) || (ckbkno == ibkno && ckoffno < ioffno)) + return (false); + + /* see if we have a match */ + if (ckbkno == ibkno && ckoffno == ioffno) + return (true); + + /* check next */ + tidlist = tidlist->vtl_next; + } + + /* ran off the end of the list without finding a match */ + return (false); +} + +/* + * _vc_reaptid() -- save a tid on the list of reaped tids for the current + * entry on the vacuum relation list. + * + * As a side effect of the way that the vacuuming loop for a given + * relation works, the tids of vacuumed tuples wind up in reverse + * order in the list -- highest tid on a page is first, and higher + * pages come before lower pages. This is important later when we + * vacuum the indices, as it gives us a way of stopping the search + * for a tid if we notice we've passed the page it would be on. + */ +static void +_vc_reaptid(Portal p, + VRelList curvrl, + BlockNumber blkno, + OffsetNumber offnum) +{ + PortalVariableMemory pmem; + MemoryContext old; + VTidList newvtl; + + /* allocate a VTidListData entry in the portal memory context */ + pmem = PortalGetVariableMemory(p); + old = MemoryContextSwitchTo((MemoryContext) pmem); + newvtl = (VTidList) palloc(sizeof(VTidListData)); + MemoryContextSwitchTo(old); + + /* fill it in */ + ItemPointerSet(&(newvtl->vtl_tid), blkno, offnum); + newvtl->vtl_next = curvrl->vrl_tidlist; + curvrl->vrl_tidlist = newvtl; +} + +static void +_vc_free(Portal p, VRelList vrl) +{ + VRelList p_vrl; + VAttList p_val, val; + VTidList p_vtl, vtl; + MemoryContext old; + PortalVariableMemory pmem; + + pmem = PortalGetVariableMemory(p); + old = MemoryContextSwitchTo((MemoryContext)pmem); + + while (vrl != (VRelList) NULL) { + + /* free attribute list */ + val = vrl->vrl_attlist; + while (val != (VAttList) NULL) { + p_val = val; + val = val->val_next; + pfree(p_val); + } + + /* free tid list */ + vtl = vrl->vrl_tidlist; + while (vtl != (VTidList) NULL) { + p_vtl = vtl; + vtl = vtl->vtl_next; + pfree(p_vtl); + } + + /* free rel list entry */ + p_vrl = vrl; + vrl = vrl->vrl_next; + pfree(p_vrl); + } + + (void) MemoryContextSwitchTo(old); +} + +/* + * _vc_getarchrel() -- open the archive relation for a heap relation + * + * The archive relation is named 'a,XXXXX' for the heap relation + * whose relid is XXXXX. + */ + +#define ARCHIVE_PREFIX "a," + +static Relation +_vc_getarchrel(Relation heaprel) +{ + Relation archrel; + char *archrelname; + + archrelname = palloc(sizeof(ARCHIVE_PREFIX) + NAMEDATALEN); /* bogus */ + sprintf(archrelname, "%s%d", ARCHIVE_PREFIX, heaprel->rd_id); + + archrel = heap_openr(archrelname); + + pfree(archrelname); + return (archrel); +} + +/* + * _vc_archive() -- write a tuple to an archive relation + * + * In the future, this will invoke the archived accessd method. For + * now, archive relations are on mag disk. + */ +static void +_vc_archive(Relation archrel, HeapTuple htup) +{ + doinsert(archrel, htup); +} + +static bool +_vc_isarchrel(char *rname) +{ + if (strncmp(ARCHIVE_PREFIX, rname,strlen(ARCHIVE_PREFIX)) == 0) + return (true); + + return (false); +} diff --git a/src/backend/commands/vacuum.h b/src/backend/commands/vacuum.h new file mode 100644 index 00000000000..f5994d7d6d5 --- /dev/null +++ b/src/backend/commands/vacuum.h @@ -0,0 +1,48 @@ +/*------------------------------------------------------------------------- + * + * vacuum.h-- + * header file for postgres vacuum cleaner + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: vacuum.h,v 1.1.1.1 1996/07/09 06:21:23 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef VACUUM_H +#define VACUUM_H + +typedef struct VAttListData { + int val_dummy; + struct VAttListData *val_next; +} VAttListData; + +typedef VAttListData *VAttList; + +typedef struct VTidListData { + ItemPointerData vtl_tid; + struct VTidListData *vtl_next; +} VTidListData; + +typedef VTidListData *VTidList; + +typedef struct VRelListData { + Oid vrl_relid; + VAttList vrl_attlist; + VTidList vrl_tidlist; + int vrl_ntups; + int vrl_npages; + bool vrl_hasindex; + struct VRelListData *vrl_next; +} VRelListData; + +typedef VRelListData *VRelList; + +extern bool VacuumRunning; + +extern void vc_abort(void); +extern void vacuum(char *vacrel); + + +#endif /* VACUUM_H */ diff --git a/src/backend/commands/version.h b/src/backend/commands/version.h new file mode 100644 index 00000000000..20d49d2c0c7 --- /dev/null +++ b/src/backend/commands/version.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * version.h-- + * Header file for versions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: version.h,v 1.1.1.1 1996/07/09 06:21:23 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef VERSION_H +#define VERSION_H + +#include "postgres.h" +#include "nodes/pg_list.h" + +extern void DefineVersion(char *name, char *fromRelname, char *date); +extern void VersionCreate(char *vname, char *bname); +extern void VersionAppend(char *vname, char *bname); +extern void VersionRetrieve(char *vname, char *bname, char *snapshot); +extern void VersionDelete(char *vname, char *bname, char *snapshot); +extern void VersionReplace(char *vname, char *bname, char *snapshot); + +#endif /* VERSION_H */ diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c new file mode 100644 index 00000000000..f6023ca08de --- /dev/null +++ b/src/backend/commands/view.c @@ -0,0 +1,325 @@ +/*------------------------------------------------------------------------- + * + * view.c-- + * use rewrite rules to construct views + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/commands/view.c,v 1.1.1.1 1996/07/09 06:21:22 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include /* for sprintf() */ +#include "postgres.h" +#include "access/heapam.h" +#include "access/xact.h" +#include "utils/builtins.h" +#include "utils/syscache.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" +#include "nodes/parsenodes.h" +#include "parser/catalog_utils.h" +#include "parser/parse_query.h" +#include "rewrite/rewriteDefine.h" +#include "rewrite/rewriteHandler.h" +#include "rewrite/rewriteManip.h" +#include "rewrite/rewriteRemove.h" +#include "commands/creatinh.h" + +/*--------------------------------------------------------------------- + * DefineVirtualRelation + * + * Create the "view" relation. + * `DefineRelation' does all the work, we just provide the correct + * arguments! + * + * If the relation already exists, then 'DefineRelation' will abort + * the xact... + *--------------------------------------------------------------------- + */ +static void +DefineVirtualRelation(char *relname, List *tlist) +{ + CreateStmt createStmt; + List *attrList, *t; + TargetEntry *entry; + Resdom *res; + char *resname; + char *restypename; + + /* + * create a list with one entry per attribute of this relation. + * Each entry is a two element list. The first element is the + * name of the attribute (a string) and the second the name of the type + * (NOTE: a string, not a type id!). + */ + attrList = NIL; + if (tlist!=NIL) { + foreach (t, tlist ) { + ColumnDef *def = makeNode(ColumnDef); + TypeName *typename; + + /* + * find the names of the attribute & its type + */ + entry = lfirst(t); + res = entry->resdom; + resname = res->resname; + restypename = tname(get_id_type((long)res->restype)); + + typename = makeNode(TypeName); + + typename->name = pstrdup(restypename); + def->colname = pstrdup(resname); + + def->typename = typename; + + attrList = lappend(attrList, def); + } + } else { + elog ( WARN, "attempted to define virtual relation with no attrs"); + } + + /* + * now create the parametesr for keys/inheritance etc. + * All of them are nil... + */ + createStmt.relname = relname; + createStmt.tableElts = attrList; +/* createStmt.tableType = NULL;*/ + createStmt.inhRelnames = NIL; + createStmt.archiveType = ARCH_NONE; + createStmt.location = -1; + createStmt.archiveLoc = -1; + + /* + * finally create the relation... + */ + DefineRelation(&createStmt); +} + +/*------------------------------------------------------------------ + * makeViewRetrieveRuleName + * + * Given a view name, returns the name for the 'on retrieve to "view"' + * rule. + * This routine is called when defining/removing a view. + * + * NOTE: it quarantees that the name is at most 15 chars long + * + * XXX it also means viewName cannot be 16 chars long! - ay 11/94 + *------------------------------------------------------------------ + */ +char * +MakeRetrieveViewRuleName(char *viewName) +{ +/* + char buf[100]; + + memset(buf, 0, sizeof(buf)); + sprintf(buf, "_RET%.*s", NAMEDATALEN, viewName->data); + buf[15] = '\0'; + namestrcpy(rule_name, buf); +*/ + + char *buf; + buf = palloc(strlen(viewName) + 5); + sprintf(buf, "_RET%s",viewName); + return buf; +} + +static RuleStmt * +FormViewRetrieveRule(char *viewName, Query *viewParse) +{ + RuleStmt *rule; + char *rname; + Attr *attr; + + /* + * Create a RuleStmt that corresponds to the suitable + * rewrite rule args for DefineQueryRewrite(); + */ + rule = makeNode(RuleStmt); + rname = MakeRetrieveViewRuleName(viewName); + + attr = makeNode(Attr); + attr->relname = pstrdup(viewName); +/* attr->refname = pstrdup(viewName);*/ + rule->rulename = pstrdup(rname); + rule->whereClause = NULL; + rule->event = CMD_SELECT; + rule->object = attr; + rule->instead = true; + rule->actions = lcons(viewParse, NIL); + + return rule; +} + +static void +DefineViewRules(char *viewName, Query *viewParse) +{ + RuleStmt *retrieve_rule = NULL; +#ifdef NOTYET + RuleStmt *replace_rule = NULL; + RuleStmt *append_rule = NULL; + RuleStmt *delete_rule = NULL; +#endif + + retrieve_rule = + FormViewRetrieveRule(viewName, viewParse); + +#ifdef NOTYET + + replace_rule = + FormViewReplaceRule(viewName, viewParse); + append_rule = + FormViewAppendRule(viewName, viewParse); + delete_rule = + FormViewDeleteRule(viewName, viewParse); + +#endif + + DefineQueryRewrite(retrieve_rule); + +#ifdef NOTYET + DefineQueryRewrite(replace_rule); + DefineQueryRewrite(append_rule); + DefineQueryRewrite(delete_rule); +#endif + +} + +/*--------------------------------------------------------------- + * UpdateRangeTableOfViewParse + * + * Update the range table of the given parsetree. + * This update consists of adding two new entries IN THE BEGINNING + * of the range table (otherwise the rule system will die a slow, + * horrible and painful death, and we do not want that now, do we?) + * one for the CURRENT relation and one for the NEW one (both of + * them refer in fact to the "view" relation). + * + * Of course we must also increase the 'varnos' of all the Var nodes + * by 2... + * + * NOTE: these are destructive changes. It would be difficult to + * make a complete copy of the parse tree and make the changes + * in the copy. + *--------------------------------------------------------------- + */ +static void +UpdateRangeTableOfViewParse(char *viewName, Query *viewParse) +{ + List *old_rt; + List *new_rt; + RangeTblEntry *rt_entry1, *rt_entry2; + + /* + * first offset all var nodes by 2 + */ + OffsetVarNodes((Node*)viewParse->targetList, 2); + OffsetVarNodes(viewParse->qual, 2); + + /* + * find the old range table... + */ + old_rt = viewParse->rtable; + + /* + * create the 2 new range table entries and form the new + * range table... + * CURRENT first, then NEW.... + */ + rt_entry1 = + makeRangeTableEntry((char*)viewName, FALSE, NULL, "*CURRENT*"); + rt_entry2 = + makeRangeTableEntry((char*)viewName, FALSE, NULL, "*NEW*"); + new_rt = lcons(rt_entry2, old_rt); + new_rt = lcons(rt_entry1, new_rt); + + /* + * Now the tricky part.... + * Update the range table in place... Be careful here, or + * hell breaks loooooooooooooOOOOOOOOOOOOOOOOOOSE! + */ + viewParse->rtable = new_rt; +} + +/*------------------------------------------------------------------- + * DefineView + * + * - takes a "viewname", "parsetree" pair and then + * 1) construct the "virtual" relation + * 2) commit the command but NOT the transaction, + * so that the relation exists + * before the rules are defined. + * 2) define the "n" rules specified in the PRS2 paper + * over the "virtual" relation + *------------------------------------------------------------------- + */ +void +DefineView(char *viewName, Query *viewParse) +{ + List *viewTlist; + + viewTlist = viewParse->targetList; + + /* + * Create the "view" relation + * NOTE: if it already exists, the xaxt will be aborted. + */ + DefineVirtualRelation(viewName, viewTlist); + + /* + * The relation we have just created is not visible + * to any other commands running with the same transaction & + * command id. + * So, increment the command id counter (but do NOT pfree any + * memory!!!!) + */ + CommandCounterIncrement(); + + /* + * The range table of 'viewParse' does not contain entries + * for the "CURRENT" and "NEW" relations. + * So... add them! + * NOTE: we make the update in place! After this call 'viewParse' + * will never be what it used to be... + */ + UpdateRangeTableOfViewParse(viewName, viewParse); + DefineViewRules(viewName, viewParse); +} + +/*------------------------------------------------------------------ + * RemoveView + * + * Remove a view given its name + *------------------------------------------------------------------ + */ +void +RemoveView(char *viewName) +{ + char* rname; + + /* + * first remove all the "view" rules... + * Currently we only have one! + */ + rname = MakeRetrieveViewRuleName(viewName); + RemoveRewriteRule(rname); + + /* + * we don't really need that, but just in case... + */ + CommandCounterIncrement(); + + /* + * now remove the relation. + */ + heap_destroy(viewName); + pfree(rname); +} diff --git a/src/backend/commands/view.h b/src/backend/commands/view.h new file mode 100644 index 00000000000..15151237715 --- /dev/null +++ b/src/backend/commands/view.h @@ -0,0 +1,20 @@ +/*------------------------------------------------------------------------- + * + * view.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: view.h,v 1.1.1.1 1996/07/09 06:21:23 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef VIEW_H +#define VIEW_H + +extern char *MakeRetrieveViewRuleName(char *view_name); +extern void DefineView(char *view_name, Query *view_parse); +extern void RemoveView(char *view_name); + +#endif /* VIEW_H */ diff --git a/src/backend/executor/Makefile.inc b/src/backend/executor/Makefile.inc new file mode 100644 index 00000000000..211e725cec4 --- /dev/null +++ b/src/backend/executor/Makefile.inc @@ -0,0 +1,29 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the executor module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/executor/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:24 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:= $(VPATH):$(CURDIR)/executor + +SRCS_EXECUTOR= execAmi.c execFlatten.c execJunk.c execMain.c \ + execProcnode.c execQual.c execScan.c execTuples.c \ + execUtils.c functions.c nodeAppend.c nodeAgg.c nodeHash.c \ + nodeHashjoin.c nodeIndexscan.c nodeMaterial.c nodeMergejoin.c \ + nodeNestloop.c nodeResult.c nodeSeqscan.c nodeSort.c \ + nodeUnique.c nodeTee.c nodeGroup.c + +HEADERS+= execFlatten.h execdebug.h execdefs.h execdesc.h \ + executor.h functions.h hashjoin.h nodeAgg.h nodeAppend.h \ + nodeHash.h nodeHashjoin.h nodeIndexscan.h nodeMaterial.h \ + nodeMergejoin.h nodeNestloop.h nodeResult.h \ + nodeSeqscan.h nodeSort.h nodeUnique.h tuptable.h nodeTee.h \ + nodeGroup.h + diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c new file mode 100644 index 00000000000..08d3e70d8b9 --- /dev/null +++ b/src/backend/executor/execAmi.c @@ -0,0 +1,439 @@ +/*------------------------------------------------------------------------- + * + * execAmi.c-- + * miscellanious executor access method routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execAmi.c,v 1.1.1.1 1996/07/09 06:21:24 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * + * ExecOpenScanR \ / amopen + * ExecBeginScan \ / ambeginscan + * ExecCloseR \ / amclose + * ExecInsert \ executor interface / aminsert + * ExecReScanNode / to access methods \ amrescan + * ExecReScanR / \ amrescan + * ExecMarkPos / \ ammarkpos + * ExecRestrPos / \ amrestpos + * + * ExecCreatR function to create temporary relations + * + */ +#include /* for sprintf() */ +#include "executor/executor.h" +#include "storage/smgr.h" +#include "executor/nodeSeqscan.h" +#include "executor/nodeIndexscan.h" +#include "executor/nodeSort.h" +#include "executor/nodeTee.h" +#include "optimizer/internal.h" /* for _TEMP_RELATION_ID_ */ + +/* ---------------------------------------------------------------- + * ExecOpenScanR + * + * old comments: + * Parameters: + * relation -- relation to be opened and scanned. + * nkeys -- number of keys + * skeys -- keys to restrict scanning + * isindex -- if this is true, the relation is the relid of + * an index relation, else it is an index into the + * range table. + * Returns the relation as(relDesc scanDesc) + * If this structure is changed, need to modify the access macros + * defined in execInt.h. + * ---------------------------------------------------------------- + */ +void +ExecOpenScanR(Oid relOid, + int nkeys, + ScanKey skeys, + bool isindex, + ScanDirection dir, + TimeQual timeRange, + Relation *returnRelation, /* return */ + Pointer *returnScanDesc) /* return */ +{ + Relation relation; + Pointer scanDesc; + + /* ---------------- + * note: scanDesc returned by ExecBeginScan can be either + * a HeapScanDesc or an IndexScanDesc so for now we + * make it a Pointer. There should be a better scan + * abstraction someday -cim 9/9/89 + * ---------------- + */ + relation = ExecOpenR(relOid, isindex); + scanDesc = ExecBeginScan(relation, + nkeys, + skeys, + isindex, + dir, + timeRange); + + if (returnRelation != NULL) + *returnRelation = relation; + if (scanDesc != NULL) + *returnScanDesc = scanDesc; +} + +/* ---------------------------------------------------------------- + * ExecOpenR + * + * returns a relation descriptor given an object id. + * ---------------------------------------------------------------- + */ +Relation +ExecOpenR(Oid relationOid, bool isindex) +{ + Relation relation; + relation = (Relation) NULL; + + /* ---------------- + * open the relation with the correct call depending + * on whether this is a heap relation or an index relation. + * ---------------- + */ + if (isindex) { + relation = index_open(relationOid); + } else + relation = heap_open(relationOid); + + if (relation == NULL) + elog(DEBUG, "ExecOpenR: relation == NULL, heap_open failed."); + + return relation; +} + +/* ---------------------------------------------------------------- + * ExecBeginScan + * + * beginscans a relation in current direction. + * + * XXX fix parameters to AMbeginscan (and btbeginscan) + * currently we need to pass a flag stating whether + * or not the scan should begin at an endpoint of + * the relation.. Right now we always pass false + * -cim 9/14/89 + * ---------------------------------------------------------------- + */ +Pointer +ExecBeginScan(Relation relation, + int nkeys, + ScanKey skeys, + bool isindex, + ScanDirection dir, + TimeQual time_range) +{ + Pointer scanDesc; + + scanDesc = NULL; + + /* ---------------- + * open the appropriate type of scan. + * + * Note: ambeginscan()'s second arg is a boolean indicating + * that the scan should be done in reverse.. That is, + * if you pass it true, then the scan is backward. + * ---------------- + */ + if (isindex) { + scanDesc = (Pointer) index_beginscan(relation, + false, /* see above comment */ + nkeys, + skeys); + } else { + scanDesc = (Pointer) heap_beginscan(relation, + ScanDirectionIsBackward(dir), + time_range, + nkeys, + skeys); + } + + if (scanDesc == NULL) + elog(DEBUG, "ExecBeginScan: scanDesc = NULL, heap_beginscan failed."); + + + return scanDesc; +} + +/* ---------------------------------------------------------------- + * ExecCloseR + * + * closes the relation and scan descriptor for a scan or sort + * node. Also closes index relations and scans for index scans. + * + * old comments + * closes the relation indicated in 'relID' + * ---------------------------------------------------------------- + */ +void +ExecCloseR(Plan *node) +{ + CommonScanState *state; + Relation relation; + HeapScanDesc scanDesc; + + /* ---------------- + * shut down the heap scan and close the heap relation + * ---------------- + */ + switch (nodeTag(node)) { + + case T_SeqScan: + state = ((SeqScan *)node)->scanstate; + break; + + case T_IndexScan: + state = ((IndexScan *)node)->scan.scanstate; + break; + + case T_Material: + state = &(((Material *)node)->matstate->csstate); + break; + + case T_Sort: + state = &(((Sort *)node)->sortstate->csstate); + break; + + case T_Agg: + state = &(((Agg *)node)->aggstate->csstate); + break; + + default: + elog(DEBUG, "ExecCloseR: not a scan, material, or sort node!"); + return; + } + + relation = state->css_currentRelation; + scanDesc = state->css_currentScanDesc; + + if (scanDesc != NULL) + heap_endscan(scanDesc); + + if (relation != NULL) + heap_close(relation); + + /* ---------------- + * if this is an index scan then we have to take care + * of the index relations as well.. + * ---------------- + */ + if (nodeTag(node) == T_IndexScan) { + IndexScan *iscan= (IndexScan *)node; + IndexScanState *indexstate; + int numIndices; + RelationPtr indexRelationDescs; + IndexScanDescPtr indexScanDescs; + int i; + + indexstate = iscan->indxstate; + numIndices = indexstate->iss_NumIndices; + indexRelationDescs = indexstate->iss_RelationDescs; + indexScanDescs = indexstate->iss_ScanDescs; + + for (i = 0; iiterexpr; + + /* + * Really Iter nodes are only needed for C functions, postquel function + * by their nature return 1 result at a time. For now we are only worrying + * about postquel functions, c functions will come later. + */ + return ExecEvalExpr(expression, econtext, resultIsNull, iterIsDone); +} + +void +ExecEvalFjoin(TargetEntry *tlist, + ExprContext *econtext, + bool *isNullVect, + bool *fj_isDone) +{ + +#ifdef SETS_FIXED + bool isDone; + int curNode; + List *tlistP; + + Fjoin *fjNode = tlist->fjoin; + DatumPtr resVect = fjNode->fj_results; + BoolPtr alwaysDone = fjNode->fj_alwaysDone; + + if (fj_isDone) *fj_isDone = false; + /* + * For the next tuple produced by the plan, we need to re-initialize + * the Fjoin node. + */ + if (!fjNode->fj_initialized) + { + /* + * Initialize all of the Outer nodes + */ + curNode = 1; + foreach(tlistP, lnext(tlist)) + { + TargetEntry *tle = lfirst(tlistP); + + resVect[curNode] = ExecEvalIter((Iter*)tle->expr, + econtext, + &isNullVect[curNode], + &isDone); + if (isDone) + isNullVect[curNode] = alwaysDone[curNode] = true; + else + alwaysDone[curNode] = false; + + curNode++; + } + + /* + * Initialize the inner node + */ + resVect[0] = ExecEvalIter((Iter*)fjNode->fj_innerNode->expr, + econtext, + &isNullVect[0], + &isDone); + if (isDone) + isNullVect[0] = alwaysDone[0] = true; + else + alwaysDone[0] = false; + + /* + * Mark the Fjoin as initialized now. + */ + fjNode->fj_initialized = TRUE; + + /* + * If the inner node is always done, then we are done for now + */ + if (isDone) + return; + } + else + { + /* + * If we're already initialized, all we need to do is get the + * next inner result and pair it up with the existing outer node + * result vector. Watch out for the degenerate case, where the + * inner node never returns results. + */ + + /* + * Fill in nulls for every function that is always done. + */ + for (curNode=fjNode->fj_nNodes-1; curNode >= 0; curNode--) + isNullVect[curNode] = alwaysDone[curNode]; + + if (alwaysDone[0] == true) + { + *fj_isDone = FjoinBumpOuterNodes(tlist, + econtext, + resVect, + isNullVect); + return; + } + else + resVect[0] = ExecEvalIter((Iter*)fjNode->fj_innerNode->expr, + econtext, + &isNullVect[0], + &isDone); + } + + /* + * if the inner node is done + */ + if (isDone) + { + *fj_isDone = FjoinBumpOuterNodes(tlist, + econtext, + resVect, + isNullVect); + if (*fj_isDone) + return; + + resVect[0] = ExecEvalIter((Iter*)fjNode->fj_innerNode->expr, + econtext, + &isNullVect[0], + &isDone); + + } +#endif + return; +} + +bool +FjoinBumpOuterNodes(TargetEntry *tlist, + ExprContext *econtext, + DatumPtr results, + char *nulls) +{ +#ifdef SETS_FIXED + bool funcIsDone = true; + Fjoin *fjNode = tlist->fjoin; + char *alwaysDone = fjNode->fj_alwaysDone; + List *outerList = lnext(tlist); + List *trailers = lnext(tlist); + int trailNode = 1; + int curNode = 1; + + /* + * Run through list of functions until we get to one that isn't yet + * done returning values. Watch out for funcs that are always done. + */ + while ((funcIsDone == true) && (outerList != NIL)) + { + TargetEntry *tle = lfirst(outerList); + + if (alwaysDone[curNode] == true) + nulls[curNode] = 'n'; + else + results[curNode] = ExecEvalIter((Iter)tle->expr, + econtext, + &nulls[curNode], + &funcIsDone); + curNode++; + outerList = lnext(outerList); + } + + /* + * If every function is done, then we are done flattening. + * Mark the Fjoin node unitialized, it is time to get the + * next tuple from the plan and redo all of the flattening. + */ + if (funcIsDone) + { + set_fj_initialized(fjNode, false); + return (true); + } + + /* + * We found a function that wasn't done. Now re-run every function + * before it. As usual watch out for functions that are always done. + */ + trailNode = 1; + while (trailNode != curNode-1) + { + TargetEntry *tle = lfirst(trailers); + + if (alwaysDone[trailNode] != true) + results[trailNode] = ExecEvalIter((Iter)tle->expr, + econtext, + &nulls[trailNode], + &funcIsDone); + trailNode++; + trailers = lnext(trailers); + } + return false; +#endif + return false; +} diff --git a/src/backend/executor/execFlatten.h b/src/backend/executor/execFlatten.h new file mode 100644 index 00000000000..fe06823619f --- /dev/null +++ b/src/backend/executor/execFlatten.h @@ -0,0 +1,26 @@ +/*------------------------------------------------------------------------- + * + * execFlatten.h-- + * prototypes for execFlatten.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: execFlatten.h,v 1.1.1.1 1996/07/09 06:21:24 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECFLATTEN_H +#define EXECFLATTEN_H + +extern Datum ExecEvalIter(Iter *iterNode, ExprContext *econtext, bool *resultIsNull, bool *iterIsDone); + +extern void ExecEvalFjoin(TargetEntry *tlist, ExprContext *econtext, bool *isNullVect, bool *fj_isDone); + +extern bool FjoinBumpOuterNodes(TargetEntry *tlist, ExprContext *econtext, DatumPtr results, char *nulls); + + +#endif /* EXECFLATTEN_H */ + + + diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c new file mode 100644 index 00000000000..7ee1543299a --- /dev/null +++ b/src/backend/executor/execJunk.c @@ -0,0 +1,389 @@ +/*------------------------------------------------------------------------- + * + * junk.c-- + * Junk attribute support stuff.... + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execJunk.c,v 1.1.1.1 1996/07/09 06:21:24 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "utils/palloc.h" +#include "executor/executor.h" +#include "nodes/relation.h" +#include "optimizer/tlist.h" /* for MakeTLE */ + +/*------------------------------------------------------------------------- + * XXX this stuff should be rewritten to take advantage + * of ExecProject() and the ProjectionInfo node. + * -cim 6/3/91 + * + * An attribute of a tuple living inside the executor, can be + * either a normal attribute or a "junk" attribute. "junk" attributes + * never make it out of the executor, i.e. they are never printed, + * returned or stored in disk. Their only purpose in life is to + * store some information useful only to the executor, mainly the values + * of some system attributes like "ctid" or rule locks. + * + * The general idea is the following: A target list consists of a list of + * Resdom nodes & expression pairs. Each Resdom node has an attribute + * called 'resjunk'. If the value of this attribute is 1 then the + * corresponding attribute is a "junk" attribute. + * + * When we initialize a plan we call 'ExecInitJunkFilter' to create + * and store the appropriate information in the 'es_junkFilter' attribute of + * EState. + * + * We then execute the plan ignoring the "resjunk" attributes. + * + * Finally, when at the top level we get back a tuple, we can call + * 'ExecGetJunkAttribute' to retrieve the value of the junk attributes we + * are interested in, and 'ExecRemoveJunk' to remove all the junk attributes + * from a tuple. This new "clean" tuple is then printed, replaced, deleted + * or inserted. + * + *------------------------------------------------------------------------- + */ + +/*------------------------------------------------------------------------- + * ExecInitJunkFilter + * + * Initialize the Junk filter. + *------------------------------------------------------------------------- + */ +JunkFilter * +ExecInitJunkFilter(List *targetList) +{ + JunkFilter *junkfilter; + List *cleanTargetList; + int len, cleanLength; + TupleDesc tupType, cleanTupType; + List *t; + TargetEntry *tle; + Resdom *resdom, *cleanResdom; + int resjunk; + AttrNumber cleanResno; + AttrNumber *cleanMap; + Size size; + Node *expr; + + /* --------------------- + * First find the "clean" target list, i.e. all the entries + * in the original target list which have a zero 'resjunk' + * NOTE: make copy of the Resdom nodes, because we have + * to change the 'resno's... + * --------------------- + */ + cleanTargetList = NIL; + cleanResno = 1; + + foreach (t, targetList) { + TargetEntry *rtarget = lfirst(t); + if (rtarget->resdom != NULL) { + resdom = rtarget->resdom; + expr = rtarget->expr; + resjunk = resdom->resjunk; + if (resjunk == 0) { + /* + * make a copy of the resdom node, changing its resno. + */ + cleanResdom = (Resdom *) copyObject(resdom); + cleanResdom->resno = cleanResno; + cleanResno ++; + /* + * create a new target list entry + */ + tle = makeNode(TargetEntry); + tle->resdom = cleanResdom; + tle->expr = expr; + cleanTargetList = lappend(cleanTargetList, tle); + } + } + else { +#ifdef SETS_FIXED + List *fjListP; + Fjoin *cleanFjoin; + List *cleanFjList; + List *fjList = lfirst(t); + Fjoin *fjNode = (Fjoin *)tl_node(fjList); + + cleanFjoin = (Fjoin)copyObject((Node) fjNode); + cleanFjList = lcons(cleanFjoin, NIL); + + resdom = (Resdom) lfirst(get_fj_innerNode(fjNode)); + expr = lsecond(get_fj_innerNode(fjNode)); + cleanResdom = (Resdom) copyObject((Node) resdom); + set_resno(cleanResdom, cleanResno); + cleanResno++; + tle = (List) MakeTLE(cleanResdom, (Expr) expr); + set_fj_innerNode(cleanFjoin, tle); + + foreach(fjListP, lnext(fjList)) { + TargetEntry *tle = lfirst(fjListP); + + resdom = tle->resdom; + expr = tle->expr; + cleanResdom = (Resdom*) copyObject((Node) resdom); + cleanResno++; + cleanResdom->Resno = cleanResno; + /* + * create a new target list entry + */ + tle = (List) MakeTLE(cleanResdom, (Expr) expr); + cleanFjList = lappend(cleanFjList, tle); + } + lappend(cleanTargetList, cleanFjList); +#endif + } + } + + /* --------------------- + * Now calculate the tuple types for the original and the clean tuple + * + * XXX ExecTypeFromTL should be used sparingly. Don't we already + * have the tupType corresponding to the targetlist we are passed? + * -cim 5/31/91 + * --------------------- + */ + tupType = (TupleDesc) ExecTypeFromTL(targetList); + cleanTupType = (TupleDesc) ExecTypeFromTL(cleanTargetList); + + len = ExecTargetListLength(targetList); + cleanLength = ExecTargetListLength(cleanTargetList); + + /* --------------------- + * Now calculate the "map" between the original tuples attributes + * and the "clean" tuple's attributes. + * + * The "map" is an array of "cleanLength" attribute numbers, i.e. + * one entry for every attribute of the "clean" tuple. + * The value of this entry is the attribute number of the corresponding + * attribute of the "original" tuple. + * --------------------- + */ + if (cleanLength > 0) { + size = cleanLength * sizeof(AttrNumber); + cleanMap = (AttrNumber*) palloc(size); + cleanResno = 1; + foreach (t, targetList) { + TargetEntry *tle = lfirst(t); + if (tle->resdom != NULL) { + resdom = tle->resdom; + expr = tle->expr; + resjunk = resdom->resjunk; + if (resjunk == 0) { + cleanMap[cleanResno-1] = resdom->resno; + cleanResno ++; + } + } else { +#ifdef SETS_FIXED + List fjListP; + List fjList = lfirst(t); + Fjoin fjNode = (Fjoin)lfirst(fjList); + + /* what the hell is this????? */ + resdom = (Resdom) lfirst(get_fj_innerNode(fjNode)); +#endif + + cleanMap[cleanResno-1] = tle->resdom->resno; + cleanResno++; + +#ifdef SETS_FIXED + foreach(fjListP, lnext(fjList)) { + TargetEntry *tle = lfirst(fjListP); + + resdom = tle->resdom; + cleanMap[cleanResno-1] = resdom->resno; + cleanResno++; + } +#endif + } + } + } else { + cleanMap = NULL; + } + + /* --------------------- + * Finally create and initialize the JunkFilter. + * --------------------- + */ + junkfilter = makeNode(JunkFilter); + + junkfilter->jf_targetList = targetList; + junkfilter->jf_length = len; + junkfilter->jf_tupType = tupType; + junkfilter->jf_cleanTargetList = cleanTargetList; + junkfilter->jf_cleanLength = cleanLength; + junkfilter->jf_cleanTupType = cleanTupType; + junkfilter->jf_cleanMap = cleanMap; + + return(junkfilter); + +} + +/*------------------------------------------------------------------------- + * ExecGetJunkAttribute + * + * Given a tuple (slot), the junk filter and a junk attribute's name, + * extract & return the value of this attribute. + * + * It returns false iff no junk attribute with such name was found. + * + * NOTE: isNull might be NULL ! + *------------------------------------------------------------------------- + */ +bool +ExecGetJunkAttribute(JunkFilter *junkfilter, + TupleTableSlot *slot, + char *attrName, + Datum *value, + bool *isNull) +{ + List *targetList; + List *t; + Resdom *resdom; + AttrNumber resno; + char *resname; + int resjunk; + TupleDesc tupType; + HeapTuple tuple; + + /* --------------------- + * first look in the junkfilter's target list for + * an attribute with the given name + * --------------------- + */ + resno = InvalidAttrNumber; + targetList = junkfilter->jf_targetList; + + foreach (t, targetList) { + TargetEntry *tle = lfirst(t); + resdom = tle->resdom; + resname = resdom->resname; + resjunk = resdom->resjunk; + if (resjunk != 0 && (strcmp(resname, attrName) == 0)) { + /* We found it ! */ + resno = resdom->resno; + break; + } + } + + if (resno == InvalidAttrNumber) { + /* Ooops! We couldn't find this attribute... */ + return(false); + } + + /* --------------------- + * Now extract the attribute value from the tuple. + * --------------------- + */ + tuple = slot->val; + tupType = (TupleDesc) junkfilter->jf_tupType; + + *value = (Datum) + heap_getattr(tuple, InvalidBuffer, resno, tupType, isNull); + + return true; +} + +/*------------------------------------------------------------------------- + * ExecRemoveJunk + * + * Construct and return a tuple with all the junk attributes removed. + *------------------------------------------------------------------------- + */ +HeapTuple +ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot) +{ + HeapTuple tuple; + HeapTuple cleanTuple; + AttrNumber *cleanMap; + TupleDesc cleanTupType; + TupleDesc tupType; + int cleanLength; + bool isNull; + int i; + Size size; + Datum *values; + char *nulls; + Datum values_array[64]; + char nulls_array[64]; + + /* ---------------- + * get info from the slot and the junk filter + * ---------------- + */ + tuple = slot->val; + + tupType = (TupleDesc) junkfilter->jf_tupType; + cleanTupType = (TupleDesc) junkfilter->jf_cleanTupType; + cleanLength = junkfilter->jf_cleanLength; + cleanMap = junkfilter->jf_cleanMap; + + /* --------------------- + * Handle the trivial case first. + * --------------------- + */ + if (cleanLength == 0) + return (HeapTuple) NULL; + + /* --------------------- + * Create the arrays that will hold the attribute values + * and the null information for the new "clean" tuple. + * + * Note: we use memory on the stack to optimize things when + * we are dealing with a small number of tuples. + * for large tuples we just use palloc. + * --------------------- + */ + if (cleanLength > 64) { + size = cleanLength * sizeof(Datum); + values = (Datum *) palloc(size); + + size = cleanLength * sizeof(char); + nulls = (char *) palloc(size); + } else { + values = values_array; + nulls = nulls_array; + } + + /* --------------------- + * Exctract one by one all the values of the "clean" tuple. + * --------------------- + */ + for (i=0; i 64) { + pfree(values); + pfree(nulls); + } + + return(cleanTuple); +} + diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c new file mode 100644 index 00000000000..07aac34accb --- /dev/null +++ b/src/backend/executor/execMain.c @@ -0,0 +1,1023 @@ +/*------------------------------------------------------------------------- + * + * execMain.c-- + * top level executor interface routines + * + * INTERFACE ROUTINES + * ExecutorStart() + * ExecutorRun() + * ExecutorEnd() + * + * The old ExecutorMain() has been replaced by ExecutorStart(), + * ExecutorRun() and ExecutorEnd() + * + * These three procedures are the external interfaces to the executor. + * In each case, the query descriptor and the execution state is required + * as arguments + * + * ExecutorStart() must be called at the beginning of any execution of any + * query plan and ExecutorEnd() should always be called at the end of + * execution of a plan. + * + * ExecutorRun accepts 'feature' and 'count' arguments that specify whether + * the plan is to be executed forwards, backwards, and for how many tuples. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "executor/executor.h" +#include "utils/builtins.h" +#include "utils/palloc.h" +#include "utils/acl.h" +#include "parser/parsetree.h" /* rt_fetch() */ +#include "storage/bufmgr.h" +#include "commands/async.h" +/* #include "access/localam.h" */ +#include "optimizer/var.h" + + +/* decls for local routines only used within this module */ +static void ExecCheckPerms(CmdType operation, int resultRelation, List *rangeTable, + Query *parseTree); +static TupleDesc InitPlan(CmdType operation, Query *parseTree, + Plan *plan, EState *estate); +static void EndPlan(Plan *plan, EState *estate); +static TupleTableSlot *ExecutePlan(EState *estate, Plan *plan, + Query *parseTree, CmdType operation, + int numberTuples, int direction, + void (*printfunc)()); +static void ExecRetrieve(TupleTableSlot *slot, void (*printfunc)(), + Relation intoRelationDesc); +static void ExecAppend(TupleTableSlot *slot,ItemPointer tupleid, + EState *estate); +static void ExecDelete(TupleTableSlot *slot, ItemPointer tupleid, + EState *estate); +static void ExecReplace(TupleTableSlot *slot, ItemPointer tupleid, + EState *estate, Query *parseTree); + +/* end of local decls */ + +/* ---------------------------------------------------------------- + * ExecutorStart + * + * This routine must be called at the beginning of any execution of any + * query plan + * + * returns (AttrInfo*) which describes the attributes of the tuples to + * be returned by the query. + * + * ---------------------------------------------------------------- + */ +TupleDesc +ExecutorStart(QueryDesc *queryDesc, EState *estate) +{ + TupleDesc result; + + /* sanity checks */ + Assert(queryDesc!=NULL); + + result = InitPlan(queryDesc->operation, + queryDesc->parsetree, + queryDesc->plantree, + estate); + + /* reset buffer refcount. the current refcounts + * are saved and will be restored when ExecutorEnd is called + * + * this makes sure that when ExecutorRun's are + * called recursively as for postquel functions, + * the buffers pinned by one ExecutorRun will not be + * unpinned by another ExecutorRun. + */ + BufferRefCountReset(estate->es_refcount); + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorRun + * + * This is the main routine of the executor module. It accepts + * the query descriptor from the traffic cop and executes the + * query plan. + * + * ExecutorStart must have been called already. + * + * the different features supported are: + * EXEC_RUN: retrieve all tuples in the forward direction + * EXEC_FOR: retrieve 'count' number of tuples in the forward dir + * EXEC_BACK: retrieve 'count' number of tuples in the backward dir + * EXEC_RETONE: return one tuple but don't 'retrieve' it + * used in postquel function processing + * + * + * ---------------------------------------------------------------- + */ +TupleTableSlot* +ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature, int count) +{ + CmdType operation; + Query *parseTree; + Plan *plan; + TupleTableSlot *result; + CommandDest dest; + void (*destination)(); + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(queryDesc!=NULL); + + /* ---------------- + * extract information from the query descriptor + * and the query feature. + * ---------------- + */ + operation = queryDesc->operation; + parseTree = queryDesc->parsetree; + plan = queryDesc->plantree; + dest = queryDesc->dest; + destination = (void (*)()) DestToFunction(dest); + + switch(feature) { + + case EXEC_RUN: + result = ExecutePlan(estate, + plan, + parseTree, + operation, + ALL_TUPLES, + EXEC_FRWD, + destination); + break; + case EXEC_FOR: + result = ExecutePlan(estate, + plan, + parseTree, + operation, + count, + EXEC_FRWD, + destination); + break; + + /* ---------------- + * retrieve next n "backward" tuples + * ---------------- + */ + case EXEC_BACK: + result = ExecutePlan(estate, + plan, + parseTree, + operation, + count, + EXEC_BKWD, + destination); + break; + + /* ---------------- + * return one tuple but don't "retrieve" it. + * (this is used by the rule manager..) -cim 9/14/89 + * ---------------- + */ + case EXEC_RETONE: + result = ExecutePlan(estate, + plan, + parseTree, + operation, + ONE_TUPLE, + EXEC_FRWD, + destination); + break; + default: + elog(DEBUG, "ExecutorRun: Unknown feature %d", feature); + break; + } + + return result; +} + +/* ---------------------------------------------------------------- + * ExecutorEnd + * + * This routine must be called at the end of any execution of any + * query plan + * + * returns (AttrInfo*) which describes the attributes of the tuples to + * be returned by the query. + * + * ---------------------------------------------------------------- + */ +void +ExecutorEnd(QueryDesc *queryDesc, EState *estate) +{ + /* sanity checks */ + Assert(queryDesc!=NULL); + + EndPlan(queryDesc->plantree, estate); + + /* restore saved refcounts. */ + BufferRefCountRestore(estate->es_refcount); +} + +/* =============================================================== + * =============================================================== + static routines follow + * =============================================================== + * =============================================================== + */ + +static void +ExecCheckPerms(CmdType operation, + int resultRelation, + List *rangeTable, + Query *parseTree) +{ + int i = 1; + Oid relid; + HeapTuple htp; + List *lp; + List *qvars, *tvars; + int32 ok = 1; + char *opstr; + NameData rname; + char *userName; + +#define CHECK(MODE) pg_aclcheck(rname.data, userName, MODE) + + userName = GetPgUserName(); + + foreach (lp, rangeTable) { + RangeTblEntry *rte = lfirst(lp); + + relid = rte->relid; + htp = SearchSysCacheTuple(RELOID, + ObjectIdGetDatum(relid), + 0,0,0); + if (!HeapTupleIsValid(htp)) + elog(WARN, "ExecCheckPerms: bogus RT relid: %d", + relid); + strncpy(rname.data, + ((Form_pg_class) GETSTRUCT(htp))->relname.data, + NAMEDATALEN); + if (i == resultRelation) { /* this is the result relation */ + qvars = pull_varnos(parseTree->qual); + tvars = pull_varnos((Node*)parseTree->targetList); + if (intMember(resultRelation, qvars) || + intMember(resultRelation, tvars)) { + /* result relation is scanned */ + ok = CHECK(ACL_RD); + opstr = "read"; + if (!ok) + break; + } + switch (operation) { + case CMD_INSERT: + ok = CHECK(ACL_AP) || + CHECK(ACL_WR); + opstr = "append"; + break; + case CMD_NOTIFY: /* what does this mean?? -- jw, 1/6/94 */ + case CMD_DELETE: + case CMD_UPDATE: + ok = CHECK(ACL_WR); + opstr = "write"; + break; + default: + elog(WARN, "ExecCheckPerms: bogus operation %d", + operation); + } + } else { + /* XXX NOTIFY?? */ + ok = CHECK(ACL_RD); + opstr = "read"; + } + if (!ok) + break; + ++i; + } + if (!ok) { +/* + elog(WARN, "%s on \"%-.*s\": permission denied", opstr, + NAMEDATALEN, rname.data); +*/ + elog(WARN, "%s %s", rname.data, ACL_NO_PRIV_WARNING); + } +} + + +/* ---------------------------------------------------------------- + * InitPlan + * + * Initializes the query plan: open files, allocate storage + * and start up the rule manager + * ---------------------------------------------------------------- + */ +static TupleDesc +InitPlan(CmdType operation, Query *parseTree, Plan *plan, EState *estate) +{ + List *rangeTable; + int resultRelation; + Relation intoRelationDesc; + + TupleDesc tupType; + List *targetList; + int len; + + /* ---------------- + * get information from query descriptor + * ---------------- + */ + rangeTable = parseTree->rtable; + resultRelation = parseTree->resultRelation; + + /* ---------------- + * initialize the node's execution state + * ---------------- + */ + estate->es_range_table = rangeTable; + + /* ---------------- + * initialize the BaseId counter so node base_id's + * are assigned correctly. Someday baseid's will have to + * be stored someplace other than estate because they + * should be unique per query planned. + * ---------------- + */ + estate->es_BaseId = 1; + + /* ---------------- + * initialize result relation stuff + * ---------------- + */ + + if (resultRelation != 0 && operation != CMD_SELECT) { + /* ---------------- + * if we have a result relation, open it and + * initialize the result relation info stuff. + * ---------------- + */ + RelationInfo *resultRelationInfo; + Index resultRelationIndex; + RangeTblEntry *rtentry; + Oid resultRelationOid; + Relation resultRelationDesc; + + resultRelationIndex = resultRelation; + rtentry = rt_fetch(resultRelationIndex, rangeTable); + resultRelationOid = rtentry->relid; + resultRelationDesc = heap_open(resultRelationOid); + + /* Write-lock the result relation right away: if the relation + is used in a subsequent scan, we won't have to elevate the + read-lock set by heap_beginscan to a write-lock (needed by + heap_insert, heap_delete and heap_replace). + This will hopefully prevent some deadlocks. - 01/24/94 */ + RelationSetLockForWrite(resultRelationDesc); + + resultRelationInfo = makeNode(RelationInfo); + resultRelationInfo->ri_RangeTableIndex = resultRelationIndex; + resultRelationInfo->ri_RelationDesc = resultRelationDesc; + resultRelationInfo->ri_NumIndices = 0; + resultRelationInfo->ri_IndexRelationDescs = NULL; + resultRelationInfo->ri_IndexRelationInfo = NULL; + + /* ---------------- + * open indices on result relation and save descriptors + * in the result relation information.. + * ---------------- + */ + ExecOpenIndices(resultRelationOid, resultRelationInfo); + + estate->es_result_relation_info = resultRelationInfo; + } else { + /* ---------------- + * if no result relation, then set state appropriately + * ---------------- + */ + estate->es_result_relation_info = NULL; + } + +#ifndef NO_SECURITY + ExecCheckPerms(operation, resultRelation, rangeTable, parseTree); +#endif + + /* ---------------- + * initialize the executor "tuple" table. + * ---------------- + */ + { + int nSlots = ExecCountSlotsNode(plan); + TupleTable tupleTable = ExecCreateTupleTable(nSlots+10); /* why add ten? - jolly */ + + estate->es_tupleTable = tupleTable; + } + + /* ---------------- + * initialize the private state information for + * all the nodes in the query tree. This opens + * files, allocates storage and leaves us ready + * to start processing tuples.. + * ---------------- + */ + ExecInitNode(plan, estate, NULL); + + /* ---------------- + * get the tuple descriptor describing the type + * of tuples to return.. (this is especially important + * if we are creating a relation with "retrieve into") + * ---------------- + */ + tupType = ExecGetTupType(plan); /* tuple descriptor */ + targetList = plan->targetlist; + len = ExecTargetListLength(targetList); /* number of attributes */ + + /* ---------------- + * now that we have the target list, initialize the junk filter + * if this is a REPLACE or a DELETE query. + * We also init the junk filter if this is an append query + * (there might be some rule lock info there...) + * NOTE: in the future we might want to initialize the junk + * filter for all queries. + * ---------------- + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE || + operation == CMD_INSERT) { + + JunkFilter *j = (JunkFilter*) ExecInitJunkFilter(targetList); + estate->es_junkFilter = j; + } else + estate->es_junkFilter = NULL; + + /* ---------------- + * initialize the "into" relation + * ---------------- + */ + intoRelationDesc = (Relation) NULL; + + if (operation == CMD_SELECT) { + char *intoName; + char archiveMode; + Oid intoRelationId; + + if (!parseTree->isPortal) { + /* + * a select into table + */ + if (parseTree->into != NULL) { + /* ---------------- + * create the "into" relation + * + * note: there is currently no way for the user to + * specify the desired archive mode of the + * "into" relation... + * ---------------- + */ + intoName = parseTree->into; + archiveMode = 'n'; + + intoRelationId = heap_create(intoName, + intoName, /* not used */ + archiveMode, + DEFAULT_SMGR, + tupType); + + /* ---------------- + * XXX rather than having to call setheapoverride(true) + * and then back to false, we should change the + * arguments to heap_open() instead.. + * ---------------- + */ + setheapoverride(true); + + intoRelationDesc = heap_open(intoRelationId); + + setheapoverride(false); + } + } + } + + estate->es_into_relation_descriptor = intoRelationDesc; + + /* ---------------- + * return the type information.. + * ---------------- + */ +/* + attinfo = (AttrInfo *)palloc(sizeof(AttrInfo)); + attinfo->numAttr = len; + attinfo->attrs = tupType->attrs; +*/ + + return tupType; +} + +/* ---------------------------------------------------------------- + * EndPlan + * + * Cleans up the query plan -- closes files and free up storages + * ---------------------------------------------------------------- + */ +static void +EndPlan(Plan *plan, EState *estate) +{ + RelationInfo *resultRelationInfo; + Relation intoRelationDesc; + + /* ---------------- + * get information from state + * ---------------- + */ + resultRelationInfo = estate->es_result_relation_info; + intoRelationDesc = estate->es_into_relation_descriptor; + + /* ---------------- + * shut down the query + * ---------------- + */ + ExecEndNode(plan, plan); + + /* ---------------- + * destroy the executor "tuple" table. + * ---------------- + */ + { + TupleTable tupleTable = (TupleTable) estate->es_tupleTable; + ExecDestroyTupleTable(tupleTable,true); /* was missing last arg */ + estate->es_tupleTable = NULL; + } + + /* ---------------- + * close the result relations if necessary + * ---------------- + */ + if (resultRelationInfo != NULL) { + Relation resultRelationDesc; + + resultRelationDesc = resultRelationInfo->ri_RelationDesc; + heap_close(resultRelationDesc); + + /* ---------------- + * close indices on the result relation + * ---------------- + */ + ExecCloseIndices(resultRelationInfo); + } + + /* ---------------- + * close the "into" relation if necessary + * ---------------- + */ + if (intoRelationDesc != NULL) { + heap_close(intoRelationDesc); + } +} + +/* ---------------------------------------------------------------- + * ExecutePlan + * + * processes the query plan to retrieve 'tupleCount' tuples in the + * direction specified. + * Retrieves all tuples if tupleCount is 0 + * + * result is either a slot containing a tuple in the case + * of a RETRIEVE or NULL otherwise. + * + * ---------------------------------------------------------------- + */ + +/* the ctid attribute is a 'junk' attribute that is removed before the + user can see it*/ + +static TupleTableSlot * +ExecutePlan(EState *estate, + Plan *plan, + Query *parseTree, + CmdType operation, + int numberTuples, + int direction, + void (*printfunc)()) +{ + Relation intoRelationDesc; + JunkFilter *junkfilter; + + TupleTableSlot *slot; + ItemPointer tupleid = NULL; + ItemPointerData tuple_ctid; + int current_tuple_count; + TupleTableSlot *result; + + /* ---------------- + * get information + * ---------------- + */ + intoRelationDesc = estate->es_into_relation_descriptor; + + /* ---------------- + * initialize local variables + * ---------------- + */ + slot = NULL; + current_tuple_count = 0; + result = NULL; + + /* ---------------- + * Set the direction. + * ---------------- + */ + estate->es_direction = direction; + + /* ---------------- + * Loop until we've processed the proper number + * of tuples from the plan.. + * ---------------- + */ + + for(;;) { + if (operation != CMD_NOTIFY) { + /* ---------------- + * Execute the plan and obtain a tuple + * ---------------- + */ + /* at the top level, the parent of a plan (2nd arg) is itself */ + slot = ExecProcNode(plan,plan); + + /* ---------------- + * if the tuple is null, then we assume + * there is nothing more to process so + * we just return null... + * ---------------- + */ + if (TupIsNull(slot)) { + result = NULL; + break; + } + } + + /* ---------------- + * if we have a junk filter, then project a new + * tuple with the junk removed. + * + * Store this new "clean" tuple in the place of the + * original tuple. + * + * Also, extract all the junk ifnormation we need. + * ---------------- + */ + if ((junkfilter = estate->es_junkFilter) != (JunkFilter*)NULL) { + Datum datum; +/* NameData attrName; */ + HeapTuple newTuple; + bool isNull; + + /* --------------- + * extract the 'ctid' junk attribute. + * --------------- + */ + if (operation == CMD_UPDATE || operation == CMD_DELETE) { + if (! ExecGetJunkAttribute(junkfilter, + slot, + "ctid", + &datum, + &isNull)) + elog(WARN,"ExecutePlan: NO (junk) `ctid' was found!"); + + if (isNull) + elog(WARN,"ExecutePlan: (junk) `ctid' is NULL!"); + + tupleid = (ItemPointer) DatumGetPointer(datum); + tuple_ctid = *tupleid; /* make sure we don't free the ctid!! */ + tupleid = &tuple_ctid; + } + + /* --------------- + * Finally create a new "clean" tuple with all junk attributes + * removed + * --------------- + */ + newTuple = ExecRemoveJunk(junkfilter, slot); + + slot = ExecStoreTuple(newTuple, /* tuple to store */ + slot, /* destination slot */ + InvalidBuffer,/* this tuple has no buffer */ + true); /* tuple should be pfreed */ + } /* if (junkfilter... */ + + /* ---------------- + * now that we have a tuple, do the appropriate thing + * with it.. either return it to the user, add + * it to a relation someplace, delete it from a + * relation, or modify some of it's attributes. + * ---------------- + */ + + switch(operation) { + case CMD_SELECT: + ExecRetrieve(slot, /* slot containing tuple */ + printfunc, /* print function */ + intoRelationDesc); /* "into" relation */ + result = slot; + break; + + case CMD_INSERT: + ExecAppend(slot, tupleid, estate); + result = NULL; + break; + + case CMD_DELETE: + ExecDelete(slot, tupleid, estate); + result = NULL; + break; + + case CMD_UPDATE: + ExecReplace(slot, tupleid, estate, parseTree); + result = NULL; + break; + + /* Total hack. I'm ignoring any accessor functions for + Relation, RelationTupleForm, NameData. + Assuming that NameData.data has offset 0. + */ + case CMD_NOTIFY: { + RelationInfo *rInfo = estate->es_result_relation_info; + Relation rDesc = rInfo->ri_RelationDesc; + Async_Notify(rDesc->rd_rel->relname.data); + result = NULL; + current_tuple_count = 0; + numberTuples = 1; + elog(DEBUG, "ExecNotify %s",&rDesc->rd_rel->relname); + } + break; + + default: + elog(DEBUG, "ExecutePlan: unknown operation in queryDesc"); + result = NULL; + break; + } + /* ---------------- + * check our tuple count.. if we've returned the + * proper number then return, else loop again and + * process more tuples.. + * ---------------- + */ + current_tuple_count += 1; + if (numberTuples == current_tuple_count) + break; + } + + /* ---------------- + * here, result is either a slot containing a tuple in the case + * of a RETRIEVE or NULL otherwise. + * ---------------- + */ + return result; +} + +/* ---------------------------------------------------------------- + * ExecRetrieve + * + * RETRIEVEs are easy.. we just pass the tuple to the appropriate + * print function. The only complexity is when we do a + * "retrieve into", in which case we insert the tuple into + * the appropriate relation (note: this is a newly created relation + * so we don't need to worry about indices or locks.) + * ---------------------------------------------------------------- + */ +static void +ExecRetrieve(TupleTableSlot *slot, + void (*printfunc)(), + Relation intoRelationDesc) +{ + HeapTuple tuple; + TupleDesc attrtype; + + /* ---------------- + * get the heap tuple out of the tuple table slot + * ---------------- + */ + tuple = slot->val; + attrtype = slot->ttc_tupleDescriptor; + + /* ---------------- + * insert the tuple into the "into relation" + * ---------------- + */ + if (intoRelationDesc != NULL) { + heap_insert (intoRelationDesc, tuple); + IncrAppended(); + } + + /* ---------------- + * send the tuple to the front end (or the screen) + * ---------------- + */ + (*printfunc)(tuple, attrtype); + IncrRetrieved(); +} + +/* ---------------------------------------------------------------- + * ExecAppend + * + * APPENDs are trickier.. we have to insert the tuple into + * the base relation and insert appropriate tuples into the + * index relations. + * ---------------------------------------------------------------- + */ + +static void +ExecAppend(TupleTableSlot *slot, + ItemPointer tupleid, + EState *estate) +{ + HeapTuple tuple; + RelationInfo *resultRelationInfo; + Relation resultRelationDesc; + int numIndices; + Oid newId; + + /* ---------------- + * get the heap tuple out of the tuple table slot + * ---------------- + */ + tuple = slot->val; + + /* ---------------- + * get information on the result relation + * ---------------- + */ + resultRelationInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelationInfo->ri_RelationDesc; + + /* ---------------- + * have to add code to preform unique checking here. + * cim -12/1/89 + * ---------------- + */ + + /* ---------------- + * insert the tuple + * ---------------- + */ + newId = heap_insert(resultRelationDesc, /* relation desc */ + tuple); /* heap tuple */ + IncrAppended(); + UpdateAppendOid(newId); + + /* ---------------- + * process indices + * + * Note: heap_insert adds a new tuple to a relation. As a side + * effect, the tupleid of the new tuple is placed in the new + * tuple's t_ctid field. + * ---------------- + */ + numIndices = resultRelationInfo->ri_NumIndices; + if (numIndices > 0) { + ExecInsertIndexTuples(slot, &(tuple->t_ctid), estate); + } +} + +/* ---------------------------------------------------------------- + * ExecDelete + * + * DELETE is like append, we delete the tuple and its + * index tuples. + * ---------------------------------------------------------------- + */ +static void +ExecDelete(TupleTableSlot *slot, + ItemPointer tupleid, + EState *estate) +{ + RelationInfo *resultRelationInfo; + Relation resultRelationDesc; + + /* ---------------- + * get the result relation information + * ---------------- + */ + resultRelationInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelationInfo->ri_RelationDesc; + + /* ---------------- + * delete the tuple + * ---------------- + */ + (void) heap_delete(resultRelationDesc, /* relation desc */ + tupleid); /* item pointer to tuple */ + + IncrDeleted(); + + /* ---------------- + * Note: Normally one would think that we have to + * delete index tuples associated with the + * heap tuple now.. + * + * ... but in POSTGRES, we have no need to do this + * because the vacuum daemon automatically + * opens an index scan and deletes index tuples + * when it finds deleted heap tuples. -cim 9/27/89 + * ---------------- + */ + +} + +/* ---------------------------------------------------------------- + * ExecReplace + * + * note: we can't run replace queries with transactions + * off because replaces are actually appends and our + * scan will mistakenly loop forever, replacing the tuple + * it just appended.. This should be fixed but until it + * is, we don't want to get stuck in an infinite loop + * which corrupts your database.. + * ---------------------------------------------------------------- + */ +static void +ExecReplace(TupleTableSlot *slot, + ItemPointer tupleid, + EState *estate, + Query *parseTree) +{ + HeapTuple tuple; + RelationInfo *resultRelationInfo; + Relation resultRelationDesc; + int numIndices; + + /* ---------------- + * abort the operation if not running transactions + * ---------------- + */ + if (IsBootstrapProcessingMode()) { + elog(DEBUG, "ExecReplace: replace can't run without transactions"); + return; + } + + /* ---------------- + * get the heap tuple out of the tuple table slot + * ---------------- + */ + tuple = slot->val; + + /* ---------------- + * get the result relation information + * ---------------- + */ + resultRelationInfo = estate->es_result_relation_info; + resultRelationDesc = resultRelationInfo->ri_RelationDesc; + + /* ---------------- + * have to add code to preform unique checking here. + * in the event of unique tuples, this becomes a deletion + * of the original tuple affected by the replace. + * cim -12/1/89 + * ---------------- + */ + + /* ---------------- + * replace the heap tuple + * + * Don't want to continue if our heap_replace didn't actually + * do a replace. This would be the case if heap_replace + * detected a non-functional update. -kw 12/30/93 + * ---------------- + */ + if (heap_replace(resultRelationDesc, /* relation desc */ + tupleid, /* item ptr of tuple to replace */ + tuple)) { /* replacement heap tuple */ + return; + } + + IncrReplaced(); + + /* ---------------- + * Note: instead of having to update the old index tuples + * associated with the heap tuple, all we do is form + * and insert new index tuples.. This is because + * replaces are actually deletes and inserts and + * index tuple deletion is done automagically by + * the vaccuum deamon.. All we do is insert new + * index tuples. -cim 9/27/89 + * ---------------- + */ + + /* ---------------- + * process indices + * + * heap_replace updates a tuple in the base relation by invalidating + * it and then appending a new tuple to the relation. As a side + * effect, the tupleid of the new tuple is placed in the new + * tuple's t_ctid field. So we now insert index tuples using + * the new tupleid stored there. + * ---------------- + */ + numIndices = resultRelationInfo->ri_NumIndices; + if (numIndices > 0) { + ExecInsertIndexTuples(slot, &(tuple->t_ctid), estate); + } +} diff --git a/src/backend/executor/execProcnode.c b/src/backend/executor/execProcnode.c new file mode 100644 index 00000000000..11a6f63a778 --- /dev/null +++ b/src/backend/executor/execProcnode.c @@ -0,0 +1,477 @@ +/*------------------------------------------------------------------------- + * + * execProcnode.c-- + * contains dispatch functions which call the appropriate "initialize", + * "get a tuple", and "cleanup" routines for the given node type. + * If the node has children, then it will presumably call ExecInitNode, + * ExecProcNode, or ExecEndNode on it's subnodes and do the appropriate + * processing.. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execProcnode.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecInitNode - initialize a plan node and it's subplans + * ExecProcNode - get a tuple by executing the plan node + * ExecEndNode - shut down a plan node and it's subplans + * + * NOTES + * This used to be three files. It is now all combined into + * one file so that it is easier to keep ExecInitNode, ExecProcNode, + * and ExecEndNode in sync when new nodes are added. + * + * EXAMPLE + * suppose we want the age of the manager of the shoe department and + * the number of employees in that department. so we have the query: + * + * retrieve (DEPT.no_emps, EMP.age) + * where EMP.name = DEPT.mgr and + * DEPT.name = "shoe" + * + * Suppose the planner gives us the following plan: + * + * Nest Loop (DEPT.mgr = EMP.name) + * / \ + * / \ + * Seq Scan Seq Scan + * DEPT EMP + * (name = "shoe") + * + * ExecStart() is called first. + * It calls InitPlan() which calls ExecInitNode() on + * the root of the plan -- the nest loop node. + * + * * ExecInitNode() notices that it is looking at a nest loop and + * as the code below demonstrates, it calls ExecInitNestLoop(). + * Eventually this calls ExecInitNode() on the right and left subplans + * and so forth until the entire plan is initialized. + * + * * Then when ExecRun() is called, it calls ExecutePlan() which + * calls ExecProcNode() repeatedly on the top node of the plan. + * Each time this happens, ExecProcNode() will end up calling + * ExecNestLoop(), which calls ExecProcNode() on its subplans. + * Each of these subplans is a sequential scan so ExecSeqScan() is + * called. The slots returned by ExecSeqScan() may contain + * tuples which contain the attributes ExecNestLoop() uses to + * form the tuples it returns. + * + * * Eventually ExecSeqScan() stops returning tuples and the nest + * loop join ends. Lastly, ExecEnd() calls ExecEndNode() which + * calls ExecEndNestLoop() which in turn calls ExecEndNode() on + * its subplans which result in ExecEndSeqScan(). + * + * This should show how the executor works by having + * ExecInitNode(), ExecProcNode() and ExecEndNode() dispatch + * their work to the appopriate node support routines which may + * in turn call these routines themselves on their subplans. + * + */ +#include "executor/executor.h" +#include "executor/nodeResult.h" +#include "executor/nodeAppend.h" +#include "executor/nodeSeqscan.h" +#include "executor/nodeIndexscan.h" +#include "executor/nodeNestloop.h" +#include "executor/nodeMergejoin.h" +#include "executor/nodeMaterial.h" +#include "executor/nodeSort.h" +#include "executor/nodeUnique.h" +#include "executor/nodeGroup.h" +#include "executor/nodeAgg.h" +#include "executor/nodeHash.h" +#include "executor/nodeHashjoin.h" +#include "executor/nodeTee.h" + +/* ------------------------------------------------------------------------ + * ExecInitNode + * + * Recursively initializes all the nodes in the plan rooted + * at 'node'. + * + * Initial States: + * 'node' is the plan produced by the query planner + * + * returns TRUE/FALSE on whether the plan was successfully initialized + * ------------------------------------------------------------------------ + */ +bool +ExecInitNode(Plan *node, EState *estate, Plan *parent) +{ + bool result; + + /* ---------------- + * do nothing when we get to the end + * of a leaf on tree. + * ---------------- + */ + if (node == NULL) + return FALSE; + + switch(nodeTag(node)) { + /* ---------------- + * control nodes + * ---------------- + */ + case T_Result: + result = ExecInitResult((Result *)node, estate, parent); + break; + + case T_Append: + result = ExecInitAppend((Append *)node, estate, parent); + break; + + /* ---------------- + * scan nodes + * ---------------- + */ + case T_SeqScan: + result = ExecInitSeqScan((SeqScan *)node, estate, parent); + break; + + case T_IndexScan: + result = ExecInitIndexScan((IndexScan *)node, estate, parent); + break; + + /* ---------------- + * join nodes + * ---------------- + */ + case T_NestLoop: + result = ExecInitNestLoop((NestLoop *)node, estate, parent); + break; + + case T_MergeJoin: + result = ExecInitMergeJoin((MergeJoin *)node, estate, parent); + break; + + /* ---------------- + * materialization nodes + * ---------------- + */ + case T_Material: + result = ExecInitMaterial((Material *)node, estate, parent); + break; + + case T_Sort: + result = ExecInitSort((Sort *)node, estate, parent); + break; + + case T_Unique: + result = ExecInitUnique((Unique *)node, estate, parent); + break; + + case T_Group: + result = ExecInitGroup((Group *)node, estate, parent); + break; + + case T_Agg: + result = ExecInitAgg((Agg *)node, estate, parent); + break; + + case T_Hash: + result = ExecInitHash((Hash *)node, estate, parent); + break; + + case T_HashJoin: + result = ExecInitHashJoin((HashJoin *)node, estate, parent); + break; + + case T_Tee: + result = ExecInitTee((Tee*)node, estate, parent); + break; + + default: + elog(DEBUG, "ExecInitNode: node not yet supported: %d", + nodeTag(node)); + result = FALSE; + } + + return result; +} + + +/* ---------------------------------------------------------------- + * ExecProcNode + * + * Initial States: + * the query tree must be initialized once by calling ExecInit. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecProcNode(Plan *node, Plan *parent) +{ + TupleTableSlot *result; + + /* ---------------- + * deal with NULL nodes.. + * ---------------- + */ + if (node == NULL) + return NULL; + + switch(nodeTag(node)) { + /* ---------------- + * control nodes + * ---------------- + */ + case T_Result: + result = ExecResult((Result *)node); + break; + + case T_Append: + result = ExecProcAppend((Append *)node); + break; + + /* ---------------- + * scan nodes + * ---------------- + */ + case T_SeqScan: + result = ExecSeqScan((SeqScan *)node); + break; + + case T_IndexScan: + result = ExecIndexScan((IndexScan *)node); + break; + + /* ---------------- + * join nodes + * ---------------- + */ + case T_NestLoop: + result = ExecNestLoop((NestLoop *)node, parent); + break; + + case T_MergeJoin: + result = ExecMergeJoin((MergeJoin *)node); + break; + + /* ---------------- + * materialization nodes + * ---------------- + */ + case T_Material: + result = ExecMaterial((Material *)node); + break; + + case T_Sort: + result = ExecSort((Sort *)node); + break; + + case T_Unique: + result = ExecUnique((Unique *)node); + break; + + case T_Group: + result = ExecGroup((Group *)node); + break; + + case T_Agg: + result = ExecAgg((Agg *)node); + break; + + case T_Hash: + result = ExecHash((Hash *)node); + break; + + case T_HashJoin: + result = ExecHashJoin((HashJoin *)node); + break; + + case T_Tee: + result = ExecTee((Tee*)node, parent); + break; + + default: + elog(DEBUG, "ExecProcNode: node not yet supported: %d", + nodeTag(node)); + result = FALSE; + } + + return result; +} + +int +ExecCountSlotsNode(Plan *node) +{ + if (node == (Plan *)NULL) + return 0; + + switch(nodeTag(node)) { + /* ---------------- + * control nodes + * ---------------- + */ + case T_Result: + return ExecCountSlotsResult((Result *)node); + + case T_Append: + return ExecCountSlotsAppend((Append *)node); + + /* ---------------- + * scan nodes + * ---------------- + */ + case T_SeqScan: + return ExecCountSlotsSeqScan((SeqScan *)node); + + case T_IndexScan: + return ExecCountSlotsIndexScan((IndexScan *)node); + + /* ---------------- + * join nodes + * ---------------- + */ + case T_NestLoop: + return ExecCountSlotsNestLoop((NestLoop *)node); + + case T_MergeJoin: + return ExecCountSlotsMergeJoin((MergeJoin *)node); + + /* ---------------- + * materialization nodes + * ---------------- + */ + case T_Material: + return ExecCountSlotsMaterial((Material *)node); + + case T_Sort: + return ExecCountSlotsSort((Sort *)node); + + case T_Unique: + return ExecCountSlotsUnique((Unique *)node); + + case T_Group: + return ExecCountSlotsGroup((Group *)node); + + case T_Agg: + return ExecCountSlotsAgg((Agg *)node); + + case T_Hash: + return ExecCountSlotsHash((Hash *)node); + + case T_HashJoin: + return ExecCountSlotsHashJoin((HashJoin *)node); + + case T_Tee: + return ExecCountSlotsTee((Tee*)node); + + default: + elog(WARN, "ExecCountSlotsNode: node not yet supported: %d", + nodeTag(node)); + break; + } + return 0; +} + +/* ---------------------------------------------------------------- + * ExecEndNode + * + * Recursively cleans up all the nodes in the plan rooted + * at 'node'. + * + * After this operation, the query plan will not be able to + * processed any further. This should be called only after + * the query plan has been fully executed. + * ---------------------------------------------------------------- + */ +void +ExecEndNode(Plan *node, Plan *parent) +{ + /* ---------------- + * do nothing when we get to the end + * of a leaf on tree. + * ---------------- + */ + if (node == NULL) + return; + + switch(nodeTag(node)) { + /* ---------------- + * control nodes + * ---------------- + */ + case T_Result: + ExecEndResult((Result *)node); + break; + + case T_Append: + ExecEndAppend((Append *)node); + break; + + /* ---------------- + * scan nodes + * ---------------- + */ + case T_SeqScan: + ExecEndSeqScan((SeqScan *)node); + break; + + case T_IndexScan: + ExecEndIndexScan((IndexScan *)node); + break; + + /* ---------------- + * join nodes + * ---------------- + */ + case T_NestLoop: + ExecEndNestLoop((NestLoop *)node); + break; + + case T_MergeJoin: + ExecEndMergeJoin((MergeJoin *)node); + break; + + /* ---------------- + * materialization nodes + * ---------------- + */ + case T_Material: + ExecEndMaterial((Material *)node); + break; + + case T_Sort: + ExecEndSort((Sort *)node); + break; + + case T_Unique: + ExecEndUnique((Unique *)node); + break; + + case T_Group: + ExecEndGroup((Group *)node); + break; + + case T_Agg: + ExecEndAgg((Agg *)node); + break; + + /* ---------------- + * XXX add hooks to these + * ---------------- + */ + case T_Hash: + ExecEndHash((Hash *) node); + break; + + case T_HashJoin: + ExecEndHashJoin((HashJoin *) node); + break; + + case T_Tee: + ExecEndTee((Tee*) node, parent); + break; + + default: + elog(DEBUG, "ExecEndNode: node not yet supported", + nodeTag(node)); + break; + } +} diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c new file mode 100644 index 00000000000..104c1f2f506 --- /dev/null +++ b/src/backend/executor/execQual.c @@ -0,0 +1,1504 @@ +/*------------------------------------------------------------------------- + * + * execQual.c-- + * Routines to evaluate qualification and targetlist expressions + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execQual.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecEvalExpr - evaluate an expression and return a datum + * ExecQual - return true/false if qualification is satisified + * ExecTargetList - form a new tuple by projecting the given tuple + * + * NOTES + * ExecEvalExpr() and ExecEvalVar() are hotspots. making these faster + * will speed up the entire system. Unfortunately they are currently + * implemented recursively.. Eliminating the recursion is bound to + * improve the speed of the executor. + * + * ExecTargetList() is used to make tuple projections. Rather then + * trying to speed it up, the execution plan should be pre-processed + * to facilitate attribute sharing between nodes wherever possible, + * instead of doing needless copying. -cim 5/31/91 + * + */ +#include "nodes/primnodes.h" +#include "nodes/relation.h" + +#include "optimizer/clauses.h" + +#include "nodes/memnodes.h" +#include "catalog/pg_language.h" +#include "executor/executor.h" +#include "executor/execFlatten.h" +#include "executor/functions.h" +#include "access/heapam.h" +#include "utils/memutils.h" +#include "utils/builtins.h" +#include "utils/palloc.h" +#include "utils/fcache.h" +#include "utils/fcache2.h" +#include "utils/array.h" + +/* ---------------- + * externs and constants + * ---------------- + */ + +/* + * XXX Used so we can get rid of use of Const nodes in the executor. + * Currently only used by ExecHashGetBucket and set only by ExecMakeVarConst + * and by ExecEvalArrayRef. + */ +bool execConstByVal; +int execConstLen; + +/* static functions decls */ +static Datum ExecEvalAggreg(Aggreg *agg, ExprContext *econtext, bool *isNull); +static Datum ExecEvalArrayRef(ArrayRef *arrayRef, ExprContext *econtext, + bool *isNull, bool *isDone); + +/* -------------------------------- + * ExecEvalArrayRef + * + * This function takes an ArrayRef and returns a Const Node if it + * is an array reference or returns the changed Array Node if it is + * an array assignment. + * + * -------------------------------- + */ +static Datum +ExecEvalArrayRef(ArrayRef *arrayRef, + ExprContext *econtext, + bool *isNull, + bool *isDone) +{ + bool dummy; + int i = 0, j = 0; + ArrayType *array_scanner; + List *upperIndexpr, *lowerIndexpr; + Node *assgnexpr; + List *elt; + IntArray upper, lower; + int *lIndex; + char *dataPtr; + + execConstByVal = arrayRef->refelembyval; + *isNull = false; + array_scanner = (ArrayType*)ExecEvalExpr(arrayRef->refexpr, + econtext, + isNull, + isDone); + if (*isNull) return (Datum)NULL; + + upperIndexpr = arrayRef->refupperindexpr; + + foreach (elt, upperIndexpr) { + upper.indx[i++] = (int32)ExecEvalExpr((Node*)lfirst(elt), + econtext, + isNull, + &dummy); + if (*isNull) return (Datum)NULL; + } + + lowerIndexpr = arrayRef->reflowerindexpr; + lIndex = NULL; + if (lowerIndexpr != NIL) { + foreach (elt, lowerIndexpr) { + lower.indx[j++] = (int32)ExecEvalExpr((Node*)lfirst(elt), + econtext, + isNull, + &dummy); + if (*isNull) return (Datum)NULL; + } + if (i != j) + elog(WARN, + "ExecEvalArrayRef: upper and lower indices mismatch"); + lIndex = lower.indx; + } + + assgnexpr = arrayRef->refassgnexpr; + if (assgnexpr != NULL) { + dataPtr = (char*)ExecEvalExpr((Node *) + assgnexpr, econtext, + isNull, &dummy); + if (*isNull) return (Datum)NULL; + if (lIndex == NULL) + return (Datum) array_set(array_scanner, i, upper.indx, dataPtr, + arrayRef->refelembyval, + arrayRef->refelemlength, + arrayRef->refattrlength, isNull); + return (Datum) array_assgn(array_scanner, i, upper.indx, + lower.indx, + (ArrayType*)dataPtr, + arrayRef->refelembyval, + arrayRef->refelemlength, isNull); + } + if (lIndex == NULL) + return (Datum) array_ref(array_scanner, i, upper.indx, + arrayRef->refelembyval, + arrayRef->refelemlength, + arrayRef->refattrlength, isNull); + return (Datum) array_clip(array_scanner, i, upper.indx, lower.indx, + arrayRef->refelembyval, + arrayRef->refelemlength, isNull); +} + + +/* ---------------------------------------------------------------- + * ExecEvalAggreg + * + * Returns a Datum whose value is the value of the precomputed + * aggregate found in the given expression context. + * ---------------------------------------------------------------- + */ +static Datum +ExecEvalAggreg(Aggreg *agg, ExprContext *econtext, bool *isNull) +{ + + *isNull = econtext->ecxt_nulls[agg->aggno]; + return econtext->ecxt_values[agg->aggno]; +} + +/* ---------------------------------------------------------------- + * ExecEvalVar + * + * Returns a Datum whose value is the value of a range + * variable with respect to given expression context. + * ---------------------------------------------------------------- + */ +Datum +ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull) +{ + Datum result; + TupleTableSlot *slot; + AttrNumber attnum; + HeapTuple heapTuple; + TupleDesc tuple_type; + Buffer buffer; + bool byval; + int16 len; + + /* ---------------- + * get the slot we want + * ---------------- + */ + switch(variable->varno) { + case INNER: /* get the tuple from the inner node */ + slot = econtext->ecxt_innertuple; + break; + + case OUTER: /* get the tuple from the outer node */ + slot = econtext->ecxt_outertuple; + break; + + default: /* get the tuple from the relation being scanned */ + slot = econtext->ecxt_scantuple; + break; + } + + /* ---------------- + * extract tuple information from the slot + * ---------------- + */ + heapTuple = slot->val; + tuple_type = slot->ttc_tupleDescriptor; + buffer = slot->ttc_buffer; + + attnum = variable->varattno; + + /* + * If the attribute number is invalid, then we are supposed to + * return the entire tuple, we give back a whole slot so that + * callers know what the tuple looks like. + */ + if (attnum == InvalidAttrNumber) + { + TupleTableSlot *tempSlot; + TupleDesc td; + HeapTuple tup; + + tempSlot = makeNode(TupleTableSlot); + tempSlot->ttc_shouldFree = false; + tempSlot->ttc_descIsNew = true; + tempSlot->ttc_tupleDescriptor = (TupleDesc)NULL, + tempSlot->ttc_buffer = InvalidBuffer; + tempSlot->ttc_whichplan = -1; + + tup = heap_copytuple(slot->val); + td = CreateTupleDescCopy(slot->ttc_tupleDescriptor); + + ExecSetSlotDescriptor(tempSlot, td); + + ExecStoreTuple(tup, tempSlot, InvalidBuffer, true); + return (Datum) tempSlot; + } + + result = (Datum) + heap_getattr(heapTuple, /* tuple containing attribute */ + buffer, /* buffer associated with tuple */ + attnum, /* attribute number of desired attribute */ + tuple_type, /* tuple descriptor of tuple */ + isNull); /* return: is attribute null? */ + + /* ---------------- + * return null if att is null + * ---------------- + */ + if (*isNull) + return (Datum) NULL; + + /* ---------------- + * get length and type information.. + * ??? what should we do about variable length attributes + * - variable length attributes have their length stored + * in the first 4 bytes of the memory pointed to by the + * returned value.. If we can determine that the type + * is a variable length type, we can do the right thing. + * -cim 9/15/89 + * ---------------- + */ + if (attnum < 0) { + /* ---------------- + * If this is a pseudo-att, we get the type and fake the length. + * There ought to be a routine to return the real lengths, so + * we'll mark this one ... XXX -mao + * ---------------- + */ + len = heap_sysattrlen(attnum); /* XXX see -mao above */ + byval = heap_sysattrbyval(attnum); /* XXX see -mao above */ + } else { + len = tuple_type->attrs[ attnum-1 ]->attlen; + byval = tuple_type->attrs[ attnum-1 ]->attbyval ? true : false ; + } + + execConstByVal = byval; + execConstLen = len; + + return result; +} + +/* ---------------------------------------------------------------- + * ExecEvalParam + * + * Returns the value of a parameter. A param node contains + * something like ($.name) and the expression context contains + * the current parameter bindings (name = "sam") (age = 34)... + * so our job is to replace the param node with the datum + * containing the appropriate information ("sam"). + * + * Q: if we have a parameter ($.foo) without a binding, i.e. + * there is no (foo = xxx) in the parameter list info, + * is this a fatal error or should this be a "not available" + * (in which case we shoud return a Const node with the + * isnull flag) ? -cim 10/13/89 + * + * Minor modification: Param nodes now have an extra field, + * `paramkind' which specifies the type of parameter + * (see params.h). So while searching the paramList for + * a paramname/value pair, we have also to check for `kind'. + * + * NOTE: The last entry in `paramList' is always an + * entry with kind == PARAM_INVALID. + * ---------------------------------------------------------------- + */ +Datum +ExecEvalParam(Param *expression, ExprContext *econtext, bool *isNull) +{ + + char *thisParameterName; + int thisParameterKind; + AttrNumber thisParameterId; + int matchFound; + ParamListInfo paramList; + + thisParameterName = expression->paramname; + thisParameterKind = expression->paramkind; + thisParameterId = expression->paramid; + paramList = econtext->ecxt_param_list_info; + + *isNull = false; + /* + * search the list with the parameter info to find a matching name. + * An entry with an InvalidName denotes the last element in the array. + */ + matchFound = 0; + if (paramList != NULL) { + /* + * search for an entry in 'paramList' that matches + * the `expression'. + */ + while(paramList->kind != PARAM_INVALID && !matchFound) { + switch (thisParameterKind) { + case PARAM_NAMED: + if (thisParameterKind == paramList->kind && + strcmp(paramList->name, thisParameterName) == 0){ + matchFound = 1; + } + break; + case PARAM_NUM: + if (thisParameterKind == paramList->kind && + paramList->id == thisParameterId) { + matchFound = 1; + } + break; + case PARAM_OLD: + case PARAM_NEW: + if (thisParameterKind == paramList->kind && + paramList->id == thisParameterId) + { + matchFound = 1; + /* + * sanity check + */ + if (strcmp(paramList->name, thisParameterName) != 0){ + elog(WARN, + "ExecEvalParam: new/old params with same id & diff names"); + } + } + break; + default: + /* + * oops! this is not supposed to happen! + */ + elog(WARN, "ExecEvalParam: invalid paramkind %d", + thisParameterKind); + } + if (! matchFound) { + paramList++; + } + } /*while*/ + } /*if*/ + + if (!matchFound) { + /* + * ooops! we couldn't find this parameter + * in the parameter list. Signal an error + */ + elog(WARN, "ExecEvalParam: Unknown value for parameter %s", + thisParameterName); + } + + /* + * return the value. + */ + if (paramList->isnull) + { + *isNull = true; + return (Datum)NULL; + } + + if (expression->param_tlist != NIL) + { + HeapTuple tup; + Datum value; + List *tlist = expression->param_tlist; + TargetEntry *tle = (TargetEntry *)lfirst(tlist); + TupleTableSlot *slot = (TupleTableSlot *)paramList->value; + + tup = slot->val; + value = ProjectAttribute(slot->ttc_tupleDescriptor, + tle, tup, isNull); + return value; + } + return(paramList->value); +} + + +/* ---------------------------------------------------------------- + * ExecEvalOper / ExecEvalFunc support routines + * ---------------------------------------------------------------- + */ + +/* ---------------- + * GetAttributeByName + * GetAttributeByNum + * + * These are functions which return the value of the + * named attribute out of the tuple from the arg slot. User defined + * C functions which take a tuple as an argument are expected + * to use this. Ex: overpaid(EMP) might call GetAttributeByNum(). + * ---------------- + */ +char * +GetAttributeByNum(TupleTableSlot *slot, + AttrNumber attrno, + bool *isNull) +{ + Datum retval; + + if (!AttributeNumberIsValid(attrno)) + elog(WARN, "GetAttributeByNum: Invalid attribute number"); + + if (!AttrNumberIsForUserDefinedAttr(attrno)) + elog(WARN, "GetAttributeByNum: cannot access system attributes here"); + + if (isNull == (bool *)NULL) + elog(WARN, "GetAttributeByNum: a NULL isNull flag was passed"); + + if (TupIsNull(slot)) + { + *isNull = true; + return (char *) NULL; + } + + retval = (Datum) + heap_getattr(slot->val, + slot->ttc_buffer, + attrno, + slot->ttc_tupleDescriptor, + isNull); + if (*isNull) + return (char *) NULL; + return (char *) retval; +} + +/* XXX char16 name for catalogs */ +char * +att_by_num(TupleTableSlot *slot, + AttrNumber attrno, + bool *isNull) +{ + return(GetAttributeByNum(slot, attrno, isNull)); +} + +char * +GetAttributeByName(TupleTableSlot *slot, char *attname, bool *isNull) +{ + AttrNumber attrno; + TupleDesc tupdesc; + HeapTuple tuple; + Datum retval; + int natts; + int i; + + if (attname == NULL) + elog(WARN, "GetAttributeByName: Invalid attribute name"); + + if (isNull == (bool *)NULL) + elog(WARN, "GetAttributeByName: a NULL isNull flag was passed"); + + if (TupIsNull(slot)) + { + *isNull = true; + return (char *) NULL; + } + + tupdesc = slot->ttc_tupleDescriptor; + tuple = slot->val; + + natts = tuple->t_natts; + + attrno = InvalidAttrNumber; + for (i=0;inatts;i++) { + if (namestrcmp(&(tupdesc->attrs[i]->attname), attname) == 0) { + attrno = tupdesc->attrs[i]->attnum; + break; + } + } + + if (attrno == InvalidAttrNumber) + elog(WARN, "GetAttributeByName: attribute %s not found", attname); + + retval = (Datum) + heap_getattr(slot->val, + slot->ttc_buffer, + attrno, + tupdesc, + isNull); + if (*isNull) + return (char *) NULL; + return (char *) retval; +} + +/* XXX char16 name for catalogs */ +char * +att_by_name(TupleTableSlot *slot, char *attname, bool *isNull) +{ + return(GetAttributeByName(slot, attname, isNull)); +} + +void +ExecEvalFuncArgs(FunctionCachePtr fcache, + ExprContext *econtext, + List *argList, + Datum argV[], + bool *argIsDone) +{ + int i; + bool argIsNull, *nullVect; + List *arg; + + nullVect = fcache->nullVect; + + i = 0; + foreach (arg, argList) { + /* ---------------- + * evaluate the expression, in general functions cannot take + * sets as arguments but we make an exception in the case of + * nested dot expressions. We have to watch out for this case + * here. + * ---------------- + */ + argV[i] = (Datum) + ExecEvalExpr((Node *) lfirst(arg), + econtext, + &argIsNull, + argIsDone); + if (! (*argIsDone)) + { + Assert(i == 0); + fcache->setArg = (char *)argV[0]; + fcache->hasSetArg = true; + } + if (argIsNull) + nullVect[i] = true; + else + nullVect[i] = false; + i++; + } +} + +/* ---------------- + * ExecMakeFunctionResult + * ---------------- + */ +Datum +ExecMakeFunctionResult(Node *node, + List *arguments, + ExprContext *econtext, + bool *isNull, + bool *isDone) +{ + Datum argv[MAXFMGRARGS]; + FunctionCachePtr fcache; + Func *funcNode = NULL; + Oper *operNode = NULL; + bool funcisset = false; + + /* + * This is kind of ugly, Func nodes now have targetlists so that + * we know when and what to project out from postquel function results. + * This means we have to pass the func node all the way down instead + * of using only the fcache struct as before. ExecMakeFunctionResult + * becomes a little bit more of a dual personality as a result. + */ + if (IsA(node,Func)) + { + funcNode = (Func *)node; + fcache = funcNode->func_fcache; + } + else + { + operNode = (Oper *)node; + fcache = operNode->op_fcache; + } + + /* ---------------- + * arguments is a list of expressions to evaluate + * before passing to the function manager. + * We collect the results of evaluating the expressions + * into a datum array (argv) and pass this array to arrayFmgr() + * ---------------- + */ + if (fcache->nargs != 0) { + bool argDone; + + if (fcache->nargs > MAXFMGRARGS) + elog(WARN, "ExecMakeFunctionResult: too many arguments"); + + /* + * If the setArg in the fcache is set we have an argument + * returning a set of tuples (i.e. a nested dot expression). We + * don't want to evaluate the arguments again until the function + * is done. hasSetArg will always be false until we eval the args + * for the first time. We should set this in the parser. + */ + if ((fcache->hasSetArg) && fcache->setArg != NULL) + { + argv[0] = (Datum)fcache->setArg; + argDone = false; + } + else + ExecEvalFuncArgs(fcache, econtext, arguments, argv, &argDone); + + if ((fcache->hasSetArg) && (argDone)) { + if (isDone) *isDone = true; + return (Datum)NULL; + } + } + + /* If this function is really a set, we have to diddle with things. + * If the function has already been called at least once, then the + * setArg field of the fcache holds + * the OID of this set in pg_proc. (This is not quite legit, since + * the setArg field is really for functions which take sets of tuples + * as input - set functions take no inputs at all. But it's a nice + * place to stash this value, for now.) + * + * If this is the first call of the set's function, then + * the call to ExecEvalFuncArgs above just returned the OID of + * the pg_proc tuple which defines this set. So replace the existing + * funcid in the funcnode with the set's OID. Also, we want a new + * fcache which points to the right function, so get that, now that + * we have the right OID. Also zero out the argv, since the real + * set doesn't take any arguments. + */ + if (((Func *)node)->funcid == SetEvalRegProcedure) { + funcisset = true; + if (fcache->setArg) { + argv[0] = 0; + + ((Func *)node)->funcid = (Oid) PointerGetDatum(fcache->setArg); + + } else { + ((Func *)node)->funcid = (Oid) argv[0]; + setFcache(node, argv[0], NIL,econtext); + fcache = ((Func *)node)->func_fcache; + fcache->setArg = (char*)argv[0]; + argv[0] = (Datum)0; + } + } + + /* ---------------- + * now return the value gotten by calling the function manager, + * passing the function the evaluated parameter values. + * ---------------- + */ + if (fcache->language == SQLlanguageId) { + Datum result; + + Assert(funcNode); + result = postquel_function (funcNode, (char **) argv, isNull, isDone); + /* + * finagle the situation where we are iterating through all results + * in a nested dot function (whose argument function returns a set + * of tuples) and the current function finally finishes. We need to + * get the next argument in the set and run the function all over + * again. This is getting unclean. + */ + if ((*isDone) && (fcache->hasSetArg)) { + bool argDone; + + ExecEvalFuncArgs(fcache, econtext, arguments, argv, &argDone); + + if (argDone) { + fcache->setArg = (char *)NULL; + *isDone = true; + result = (Datum)NULL; + } + else + result = postquel_function(funcNode, + (char **) argv, + isNull, + isDone); + } + if (funcisset) { + /* reset the funcid so that next call to this routine will + * still recognize this func as a set. + * Note that for now we assume that the set function in + * pg_proc must be a Postquel function - the funcid is + * not reset below for C functions. + */ + ((Func *)node)->funcid = SetEvalRegProcedure; + /* If we're done with the results of this function, get rid + * of its func cache. + */ + if (*isDone) { + ((Func *)node)->func_fcache = NULL; + } + } + return result; + } + else + { + int i; + + if (isDone) *isDone = true; + for (i = 0; i < fcache->nargs; i++) + if (fcache->nullVect[i] == true) *isNull = true; + + return((Datum) fmgr_c(fcache->func, fcache->foid, fcache->nargs, + (FmgrValues *) argv, isNull)); + } +} + + +/* ---------------------------------------------------------------- + * ExecEvalOper + * ExecEvalFunc + * + * Evaluate the functional result of a list of arguments by calling the + * function manager. Note that in the case of operator expressions, the + * optimizer had better have already replaced the operator OID with the + * appropriate function OID or we're hosed. + * + * old comments + * Presumably the function manager will not take null arguments, so we + * check for null arguments before sending the arguments to (fmgr). + * + * Returns the value of the functional expression. + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ExecEvalOper + * ---------------------------------------------------------------- + */ +Datum +ExecEvalOper(Expr *opClause, ExprContext *econtext, bool *isNull) +{ + Oper *op; + List *argList; + FunctionCachePtr fcache; + bool isDone; + + /* ---------------- + * an opclause is a list (op args). (I think) + * + * we extract the oid of the function associated with + * the op and then pass the work onto ExecMakeFunctionResult + * which evaluates the arguments and returns the result of + * calling the function on the evaluated arguments. + * ---------------- + */ + op = (Oper *) opClause->oper; + argList = opClause->args; + + /* + * get the fcache from the Oper node. + * If it is NULL, then initialize it + */ + fcache = op->op_fcache; + if (fcache == NULL) { + setFcache((Node*)op, op->opid, argList, econtext); + fcache = op->op_fcache; + } + + /* ----------- + * call ExecMakeFunctionResult() with a dummy isDone that we ignore. + * We don't have operator whose arguments are sets. + * ----------- + */ + return + ExecMakeFunctionResult((Node *)op, argList, econtext, isNull, &isDone); +} + +/* ---------------------------------------------------------------- + * ExecEvalFunc + * ---------------------------------------------------------------- + */ + +Datum +ExecEvalFunc(Expr *funcClause, + ExprContext *econtext, + bool *isNull, + bool *isDone) +{ + Func *func; + List *argList; + FunctionCachePtr fcache; + + /* ---------------- + * an funcclause is a list (func args). (I think) + * + * we extract the oid of the function associated with + * the func node and then pass the work onto ExecMakeFunctionResult + * which evaluates the arguments and returns the result of + * calling the function on the evaluated arguments. + * + * this is nearly identical to the ExecEvalOper code. + * ---------------- + */ + func = (Func *)funcClause->oper; + argList = funcClause->args; + + /* + * get the fcache from the Func node. + * If it is NULL, then initialize it + */ + fcache = func->func_fcache; + if (fcache == NULL) { + setFcache((Node*)func, func->funcid, argList, econtext); + fcache = func->func_fcache; + } + + return + ExecMakeFunctionResult((Node*)func, argList, econtext, isNull, isDone); +} + +/* ---------------------------------------------------------------- + * ExecEvalNot + * ExecEvalOr + * ExecEvalAnd + * + * Evaluate boolean expressions. Evaluation of 'or' is + * short-circuited when the first true (or null) value is found. + * + * The query planner reformulates clause expressions in the + * qualification to conjunctive normal form. If we ever get + * an AND to evaluate, we can be sure that it's not a top-level + * clause in the qualification, but appears lower (as a function + * argument, for example), or in the target list. Not that you + * need to know this, mind you... + * ---------------------------------------------------------------- + */ +Datum +ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull) +{ + Datum expr_value; + Node *clause; + bool isDone; + + clause = lfirst(notclause->args); + + /* ---------------- + * We don't iterate over sets in the quals, so pass in an isDone + * flag, but ignore it. + * ---------------- + */ + expr_value = ExecEvalExpr(clause, econtext, isNull, &isDone); + + /* ---------------- + * if the expression evaluates to null, then we just + * cascade the null back to whoever called us. + * ---------------- + */ + if (*isNull) + return expr_value; + + /* ---------------- + * evaluation of 'not' is simple.. expr is false, then + * return 'true' and vice versa. + * ---------------- + */ + if (DatumGetInt32(expr_value) == 0) + return (Datum) true; + + return (Datum) false; +} + +/* ---------------------------------------------------------------- + * ExecEvalOr + * ---------------------------------------------------------------- + */ +Datum +ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull) +{ + List *clauses; + List *clause; + bool isDone; + bool IsNull; + Datum const_value; + + IsNull = false; + clauses = orExpr->args; + + /* ---------------- + * we use three valued logic functions here... + * we evaluate each of the clauses in turn, + * as soon as one is true we return that + * value. If none is true and none of the + * clauses evaluate to NULL we return + * the value of the last clause evaluated (which + * should be false) with *isNull set to false else + * if none is true and at least one clause evaluated + * to NULL we set *isNull flag to true - + * ---------------- + */ + foreach (clause, clauses) { + + /* ---------------- + * We don't iterate over sets in the quals, so pass in an isDone + * flag, but ignore it. + * ---------------- + */ + const_value = ExecEvalExpr((Node *) lfirst(clause), + econtext, + isNull, + &isDone); + + /* ---------------- + * if the expression evaluates to null, then we + * remember it in the local IsNull flag, if none of the + * clauses are true then we need to set *isNull + * to true again. + * ---------------- + */ + if (*isNull) + IsNull = *isNull; + + /* ---------------- + * if we have a true result, then we return it. + * ---------------- + */ + if (DatumGetInt32(const_value) != 0) + return const_value; + } + + /* IsNull is true if at least one clause evaluated to NULL */ + *isNull = IsNull; + return const_value; +} + +/* ---------------------------------------------------------------- + * ExecEvalAnd + * ---------------------------------------------------------------- + */ +Datum +ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull) +{ + List *clauses; + List *clause; + Datum const_value; + bool isDone; + bool IsNull; + + IsNull = false; + + clauses = andExpr->args; + + /* ---------------- + * we evaluate each of the clauses in turn, + * as soon as one is false we return that + * value. If none are false or NULL then we return + * the value of the last clause evaluated, which + * should be true. + * ---------------- + */ + foreach (clause, clauses) { + + /* ---------------- + * We don't iterate over sets in the quals, so pass in an isDone + * flag, but ignore it. + * ---------------- + */ + const_value = ExecEvalExpr((Node *) lfirst(clause), + econtext, + isNull, + &isDone); + + /* ---------------- + * if the expression evaluates to null, then we + * remember it in IsNull, if none of the clauses after + * this evaluates to false we will have to set *isNull + * to true again. + * ---------------- + */ + if (*isNull) + IsNull = *isNull; + + /* ---------------- + * if we have a false result, then we return it, since the + * conjunction must be false. + * ---------------- + */ + if (DatumGetInt32(const_value) == 0) + return const_value; + } + + *isNull = IsNull; + return const_value; +} + +/* ---------------------------------------------------------------- + * ExecEvalExpr + * + * Recursively evaluate a targetlist or qualification expression. + * + * This routine is an inner loop routine and should be as fast + * as possible. + * + * Node comparison functions were replaced by macros for speed and to plug + * memory leaks incurred by using the planner's Lispy stuff for + * comparisons. Order of evaluation of node comparisons IS IMPORTANT; + * the macros do no checks. Order of evaluation: + * + * o an isnull check, largely to avoid coredumps since greg doubts this + * routine is called with a null ptr anyway in proper operation, but is + * not completely sure... + * o ExactNodeType checks. + * o clause checks or other checks where we look at the lfirst of something. + * ---------------------------------------------------------------- + */ +Datum +ExecEvalExpr(Node *expression, + ExprContext *econtext, + bool *isNull, + bool *isDone) +{ + Datum retDatum; + + *isNull = false; + + /* + * Some callers don't care about is done and only want 1 result. They + * indicate this by passing NULL + */ + if (isDone) + *isDone = true; + + /* ---------------- + * here we dispatch the work to the appropriate type + * of function given the type of our expression. + * ---------------- + */ + if (expression == NULL) { + *isNull = true; + return (Datum) true; + } + + switch(nodeTag(expression)) { + case T_Var: + retDatum = (Datum) ExecEvalVar((Var *) expression, econtext, isNull); + break; + case T_Const: { + Const *con = (Const *)expression; + + if (con->constisnull) + *isNull = true; + retDatum = con->constvalue; + break; + } + case T_Param: + retDatum = (Datum)ExecEvalParam((Param *)expression, econtext, isNull); + break; + case T_Iter: + retDatum = (Datum) ExecEvalIter((Iter *) expression, + econtext, + isNull, + isDone); + break; + case T_Aggreg: + retDatum = (Datum) ExecEvalAggreg((Aggreg *)expression, + econtext, + isNull); + break; + case T_ArrayRef: + retDatum = (Datum) ExecEvalArrayRef((ArrayRef *) expression, + econtext, + isNull, + isDone); + break; + case T_Expr: { + Expr *expr = (Expr *)expression; + switch (expr->opType) { + case OP_EXPR: + retDatum = (Datum) ExecEvalOper(expr, econtext, isNull); + break; + case FUNC_EXPR: + retDatum = (Datum) ExecEvalFunc(expr, econtext, isNull, isDone); + break; + case OR_EXPR: + retDatum = (Datum) ExecEvalOr(expr, econtext, isNull); + break; + case AND_EXPR: + retDatum = (Datum) ExecEvalAnd(expr, econtext, isNull); + break; + case NOT_EXPR: + retDatum = (Datum) ExecEvalNot(expr, econtext, isNull); + break; + default: + elog(WARN, "ExecEvalExpr: unknown expression type"); + break; + } + break; + } + default: + elog(WARN, "ExecEvalExpr: unknown expression type"); + break; + } + + return retDatum; +} + +/* ---------------------------------------------------------------- + * ExecQual / ExecTargetList + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ExecQualClause + * + * this is a workhorse for ExecQual. ExecQual has to deal + * with a list of qualifications, so it passes each qualification + * in the list to this function one at a time. ExecQualClause + * returns true when the qualification *fails* and false if + * the qualification succeeded (meaning we have to test the + * rest of the qualification) + * ---------------------------------------------------------------- + */ +bool +ExecQualClause(Node *clause, ExprContext *econtext) +{ + Datum expr_value; + bool isNull; + bool isDone; + + /* when there is a null clause, consider the qualification to be true */ + if (clause == NULL) + return true; + + /* + * pass isDone, but ignore it. We don't iterate over multiple + * returns in the qualifications. + */ + expr_value = (Datum) + ExecEvalExpr(clause, econtext, &isNull, &isDone); + + /* ---------------- + * this is interesting behaviour here. When a clause evaluates + * to null, then we consider this as passing the qualification. + * it seems kind of like, if the qual is NULL, then there's no + * qual.. + * ---------------- + */ + if (isNull) + return true; + + /* ---------------- + * remember, we return true when the qualification fails.. + * ---------------- + */ + if (DatumGetInt32(expr_value) == 0) + return true; + + return false; +} + +/* ---------------------------------------------------------------- + * ExecQual + * + * Evaluates a conjunctive boolean expression and returns t + * iff none of the subexpressions are false (or null). + * ---------------------------------------------------------------- + */ +bool +ExecQual(List *qual, ExprContext *econtext) +{ + List *clause; + bool result; + + /* ---------------- + * debugging stuff + * ---------------- + */ + EV_printf("ExecQual: qual is "); + EV_nodeDisplay(qual); + EV_printf("\n"); + + IncrProcessed(); + + /* ---------------- + * return true immediately if no qual + * ---------------- + */ + if (qual == NIL) + return true; + + /* ---------------- + * a "qual" is a list of clauses. To evaluate the + * qual, we evaluate each of the clauses in the list. + * + * ExecQualClause returns true when we know the qualification + * *failed* so we just pass each clause in qual to it until + * we know the qual failed or there are no more clauses. + * ---------------- + */ + result = false; + foreach (clause, qual) { + result = ExecQualClause((Node *)lfirst(clause), econtext); + if (result == true) + break; + } + + /* ---------------- + * if result is true, then it means a clause failed so we + * return false. if result is false then it means no clause + * failed so we return true. + * ---------------- + */ + if (result == true) + return false; + + return true; +} + +int +ExecTargetListLength(List *targetlist) +{ + int len; + List *tl; + TargetEntry *curTle; + + len = 0; + foreach (tl, targetlist) { + curTle = lfirst(tl); + + if (curTle->resdom != NULL) + len++; + else + len += curTle->fjoin->fj_nNodes; + } + return len; +} + +/* ---------------------------------------------------------------- + * ExecTargetList + * + * Evaluates a targetlist with respect to the current + * expression context and return a tuple. + * ---------------------------------------------------------------- + */ +static HeapTuple +ExecTargetList(List *targetlist, + int nodomains, + TupleDesc targettype, + Datum *values, + ExprContext *econtext, + bool *isDone) +{ + char nulls_array[64]; + bool fjNullArray[64]; + bool *fjIsNull; + char *null_head; + List *tl; + TargetEntry *tle; + Node *expr; + Resdom *resdom; + AttrNumber resind; + Datum constvalue; + HeapTuple newTuple; + bool isNull; + + /* ---------------- + * debugging stuff + * ---------------- + */ + EV_printf("ExecTargetList: tl is "); + EV_nodeDisplay(targetlist); + EV_printf("\n"); + + /* ---------------- + * Return a dummy tuple if the targetlist is empty . + * the dummy tuple is necessary to differentiate + * between passing and failing the qualification. + * ---------------- + */ + if (targetlist == NIL) { + /* ---------------- + * I now think that the only time this makes + * any sence is when we run a delete query. Then + * we need to return something other than nil + * so we know to delete the tuple associated + * with the saved tupleid.. see what ExecutePlan + * does with the returned tuple.. -cim 9/21/89 + * + * It could also happen in queries like: + * retrieve (foo.all) where bar.a = 3 + * + * is this a new phenomenon? it might cause bogus behavior + * if we try to free this tuple later!! I put a hook in + * ExecProject to watch out for this case -mer 24 Aug 1992 + * ---------------- + */ + CXT1_printf("ExecTargetList: context is %d\n", CurrentMemoryContext); + *isDone = true; + return (HeapTuple) true; + } + + /* ---------------- + * allocate an array of char's to hold the "null" information + * only if we have a really large targetlist. otherwise we use + * the stack. + * ---------------- + */ + if (nodomains > 64) { + null_head = (char *) palloc(nodomains+1); + fjIsNull = (bool *) palloc(nodomains+1); + } else { + null_head = &nulls_array[0]; + fjIsNull = &fjNullArray[0]; + } + + /* ---------------- + * evaluate all the expressions in the target list + * ---------------- + */ + EV_printf("ExecTargetList: setting target list values\n"); + + *isDone = true; + foreach (tl, targetlist) { + /* ---------------- + * remember, a target list is a list of lists: + * + * (( expr) ( expr) ...) + * + * tl is a pointer to successive cdr's of the targetlist + * tle is a pointer to the target list entry in tl + * ---------------- + */ + tle = lfirst(tl); + + if (tle->resdom != NULL) { + expr = tle->expr; + resdom = tle->resdom; + resind = resdom->resno - 1; + constvalue = (Datum) ExecEvalExpr(expr, + econtext, + &isNull, + isDone); + + if ((IsA(expr,Iter)) && (*isDone)) + return (HeapTuple)NULL; + + values[resind] = constvalue; + + if (!isNull) + null_head[resind] = ' '; + else + null_head[resind] = 'n'; + }else { + int curNode; + Resdom *fjRes; + List *fjTlist = (List *)tle->expr; + Fjoin *fjNode = tle->fjoin; + int nNodes = fjNode->fj_nNodes; + DatumPtr results = fjNode->fj_results; + + ExecEvalFjoin(tle, econtext, fjIsNull, isDone); + if (*isDone) + return (HeapTuple)NULL; + + /* + * get the result from the inner node + */ + fjRes = (Resdom *)fjNode->fj_innerNode; + resind = fjRes->resno - 1; + if (fjIsNull[0]) + null_head[resind] = 'n'; + else { + null_head[resind] = ' '; + values[resind] = results[0]; + } + + /* + * Get results from all of the outer nodes + */ + for (curNode = 1; + curNode < nNodes; + curNode++, fjTlist = lnext(fjTlist)) + { +#if 0 /* what is this?? */ + Node *outernode = lfirst(fjTlist); + fjRes = (Resdom *)outernode->iterexpr; +#endif + resind = fjRes->resno - 1; + if (fjIsNull[curNode]) { + null_head[resind] = 'n'; + }else { + null_head[resind] = ' '; + values[resind] = results[curNode]; + } + } + } + } + + /* ---------------- + * form the new result tuple (in the "normal" context) + * ---------------- + */ + newTuple = (HeapTuple) + heap_formtuple(targettype, values, null_head); + + /* ---------------- + * free the nulls array if we allocated one.. + * ---------------- + */ + if (nodomains > 64) pfree(null_head); + + return + newTuple; +} + +/* ---------------------------------------------------------------- + * ExecProject + * + * projects a tuple based in projection info and stores + * it in the specified tuple table slot. + * + * Note: someday soon the executor can be extended to eliminate + * redundant projections by storing pointers to datums + * in the tuple table and then passing these around when + * possible. this should make things much quicker. + * -cim 6/3/91 + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecProject(ProjectionInfo *projInfo, bool *isDone) +{ + TupleTableSlot *slot; + List *targetlist; + int len; + TupleDesc tupType; + Datum *tupValue; + ExprContext *econtext; + HeapTuple newTuple; + + /* ---------------- + * sanity checks + * ---------------- + */ + if (projInfo == NULL) + return (TupleTableSlot *) NULL; + + /* ---------------- + * get the projection info we want + * ---------------- + */ + slot = projInfo->pi_slot; + targetlist = projInfo->pi_targetlist; + len = projInfo->pi_len; + tupType = slot->ttc_tupleDescriptor; + + tupValue = projInfo->pi_tupValue; + econtext = projInfo->pi_exprContext; + + if (targetlist == NIL) { + *isDone = true; + return (TupleTableSlot *) NULL; + } + + /* ---------------- + * form a new (result) tuple + * ---------------- + */ + newTuple = ExecTargetList(targetlist, + len, + tupType, + tupValue, + econtext, + isDone); + + /* ---------------- + * store the tuple in the projection slot and return the slot. + * + * If there's no projection target list we don't want to pfree + * the bogus tuple that ExecTargetList passes back to us. + * -mer 24 Aug 1992 + * ---------------- + */ + return (TupleTableSlot *) + ExecStoreTuple(newTuple, /* tuple to store */ + slot, /* slot to store in */ + InvalidBuffer, /* tuple has no buffer */ + true); +} + diff --git a/src/backend/executor/execScan.c b/src/backend/executor/execScan.c new file mode 100644 index 00000000000..96dd5551289 --- /dev/null +++ b/src/backend/executor/execScan.c @@ -0,0 +1,136 @@ +/*------------------------------------------------------------------------- + * + * execScan.c-- + * This code provides support for generalized relation scans. ExecScan + * is passed a node and a pointer to a function to "do the right thing" + * and return a tuple from the relation. ExecScan then does the tedious + * stuff - checking the qualification and projecting the tuple + * appropriately. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execScan.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#include +#include "executor/executor.h" + +/* ---------------------------------------------------------------- + * ExecScan + * + * Scans the relation using the 'access method' indicated and + * returns the next qualifying tuple in the direction specified + * in the global variable ExecDirection. + * The access method returns the next tuple and execScan() is + * responisble for checking the tuple returned against the qual-clause. + * + * Conditions: + * -- the "cursor" maintained by the AMI is positioned at the tuple + * returned previously. + * + * Initial States: + * -- the relation indicated is opened for scanning so that the + * "cursor" is positioned before the first qualifying tuple. + * + * May need to put startmmgr and endmmgr in here. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecScan(Scan *node, + TupleTableSlot* (*accessMtd)()) /* function returning a tuple */ +{ + CommonScanState *scanstate; + EState *estate; + List *qual; + bool isDone; + + TupleTableSlot *slot; + TupleTableSlot *resultSlot; + HeapTuple newTuple; + + ExprContext *econtext; + ProjectionInfo *projInfo; + + + /* ---------------- + * initialize misc variables + * ---------------- + */ + newTuple = NULL; + slot = NULL; + + estate = node->plan.state; + scanstate = node->scanstate; + + /* ---------------- + * get the expression context + * ---------------- + */ + econtext = scanstate->cstate.cs_ExprContext; + + /* ---------------- + * initialize fields in ExprContext which don't change + * in the course of the scan.. + * ---------------- + */ + qual = node->plan.qual; + econtext->ecxt_relation = scanstate->css_currentRelation; + econtext->ecxt_relid = node->scanrelid; + + if (scanstate->cstate.cs_TupFromTlist) { + projInfo = scanstate->cstate.cs_ProjInfo; + resultSlot = ExecProject(projInfo, &isDone); + if (!isDone) + return resultSlot; + } + /* + * get a tuple from the access method + * loop until we obtain a tuple which passes the qualification. + */ + for(;;) { + slot = (TupleTableSlot *) (*accessMtd)(node); + + /* ---------------- + * if the slot returned by the accessMtd contains + * NULL, then it means there is nothing more to scan + * so we just return the empty slot. + * ---------------- + */ + if (TupIsNull(slot)) return slot; + + /* ---------------- + * place the current tuple into the expr context + * ---------------- + */ + econtext->ecxt_scantuple = slot; + + /* ---------------- + * check that the current tuple satisfies the qual-clause + * if our qualification succeeds then we + * leave the loop. + * ---------------- + */ + + /* add a check for non-nil qual here to avoid a + function call to ExecQual() when the qual is nil */ + if (!qual || ExecQual(qual, econtext) == true) + break; + } + + /* ---------------- + * form a projection tuple, store it in the result tuple + * slot and return it. + * ---------------- + */ + projInfo = scanstate->cstate.cs_ProjInfo; + + resultSlot = ExecProject(projInfo, &isDone); + scanstate->cstate.cs_TupFromTlist = !isDone; + + return resultSlot; +} + diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c new file mode 100644 index 00000000000..8e7b5283dc6 --- /dev/null +++ b/src/backend/executor/execTuples.c @@ -0,0 +1,1013 @@ +/*------------------------------------------------------------------------- + * + * execTuples.c-- + * Routines dealing with the executor tuple tables. These are used to + * ensure that the executor frees copies of tuples (made by + * ExecTargetList) properly. + * + * Routines dealing with the type information for tuples. Currently, + * the type information for a tuple is an array of FormData_pg_attribute. + * This information is needed by routines manipulating tuples + * (getattribute, formtuple, etc.). + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execTuples.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * + * TABLE CREATE/DELETE + * ExecCreateTupleTable - create a new tuple table + * ExecDestroyTupleTable - destroy a table + * + * SLOT RESERVERATION + * ExecAllocTableSlot - find an available slot in the table + * + * SLOT ACCESSORS + * ExecStoreTuple - store a tuple in the table + * ExecFetchTuple - fetch a tuple from the table + * ExecClearTuple - clear contents of a table slot + * ExecSlotPolicy - return slot's tuple pfree policy + * ExecSetSlotPolicy - diddle the slot policy + * ExecSlotDescriptor - type of tuple in a slot + * ExecSetSlotDescriptor - set a slot's tuple descriptor + * ExecSetSlotDescriptorIsNew - diddle the slot-desc-is-new flag + * ExecSetNewSlotDescriptor - set a desc and the is-new-flag all at once + * ExecSlotBuffer - return buffer of tuple in slot + * ExecSetSlotBuffer - set the buffer for tuple in slot + * ExecIncrSlotBufferRefcnt - bump the refcnt of the slot buffer + * + * SLOT STATUS PREDICATES + * TupIsNull - true when slot contains no tuple + * ExecSlotDescriptorIsNew - true if we're now storing a different + * type of tuple in a slot + * + * CONVENIENCE INITIALIZATION ROUTINES + * ExecInitResultTupleSlot \ convience routines to initialize + * ExecInitScanTupleSlot \ the various tuple slots for nodes + * ExecInitMarkedTupleSlot / which store copies of tuples. + * ExecInitOuterTupleSlot / + * ExecInitHashTupleSlot / + * + * old routines: + * ExecGetTupType - get type of tuple returned by this node + * ExecTypeFromTL - form a TupleDesc from a target list + * + * EXAMPLE OF HOW TABLE ROUTINES WORK + * Suppose we have a query such as retrieve (EMP.name) and we have + * a single SeqScan node in the query plan. + * + * At ExecStart() + * ---------------- + * - InitPlan() calls ExecCreateTupleTable() to create the tuple + * table which will hold tuples processed by the executor. + * + * - ExecInitSeqScan() calls ExecInitScanTupleSlot() and + * ExecInitResultTupleSlot() to reserve places in the tuple + * table for the tuples returned by the access methods and the + * tuples resulting from preforming target list projections. + * + * During ExecRun() + * ---------------- + * - SeqNext() calls ExecStoreTuple() to place the tuple returned + * by the access methods into the scan tuple slot. + * + * - ExecSeqScan() calls ExecStoreTuple() to take the result + * tuple from ExecTargetList() and place it into the result tuple + * slot. + * + * - ExecutePlan() calls ExecRetrieve() which gets the tuple out of + * the slot passed to it by calling ExecFetchTuple(). this tuple + * is then returned. + * + * At ExecEnd() + * ---------------- + * - EndPlan() calls ExecDestroyTupleTable() to clean up any remaining + * tuples left over from executing the query. + * + * The important thing to watch in the executor code is how pointers + * to the slots containing tuples are passed instead of the tuples + * themselves. This facilitates the communication of related information + * (such as whether or not a tuple should be pfreed, what buffer contains + * this tuple, the tuple's tuple descriptor, etc). Note that much of + * this information is also kept in the ExprContext of each node. + * Soon the executor will be redesigned and ExprContext's will contain + * only slot pointers. -cim 3/14/91 + * + * NOTES + * The tuple table stuff is relatively new, put here to alleviate + * the process growth problems in the executor. The other routines + * are old (from the original lisp system) and may someday become + * obsolete. -cim 6/23/90 + * + * In the implementation of nested-dot queries such as + * "retrieve (EMP.hobbies.all)", a single scan may return tuples + * of many types, so now we return pointers to tuple descriptors + * along with tuples returned via the tuple table. This means + * we now have a bunch of routines to diddle the slot descriptors + * too. -cim 1/18/90 + * + * The tuple table stuff depends on the executor/tuptable.h macros, + * and the TupleTableSlot node in execnodes.h. + * + */ + +#include "executor/executor.h" +#undef ExecStoreTuple + +#include "access/tupdesc.h" +#include "utils/palloc.h" +#include "utils/lsyscache.h" +#include "storage/bufmgr.h" +#include "parser/catalog_utils.h" + +/* ---------------------------------------------------------------- + * tuple table create/delete functions + * ---------------------------------------------------------------- + */ +/* -------------------------------- + * ExecCreateTupleTable + * + * This creates a new tuple table of the specified initial + * size. If the size is insufficient, ExecAllocTableSlot() + * will grow the table as necessary. + * + * This should be used by InitPlan() to allocate the table. + * The table's address will be stored in the EState structure. + * -------------------------------- + */ +TupleTable /* return: address of table */ +ExecCreateTupleTable(int initialSize) /* initial number of slots in table */ +{ + TupleTable newtable; /* newly allocated table */ + TupleTableSlot* array; /* newly allocated slot array */ + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(initialSize >= 1); + + /* ---------------- + * Now allocate our new table along with space for the pointers + * to the tuples. + */ + + newtable = (TupleTable) palloc(sizeof(TupleTableData)); + array = (TupleTableSlot*) palloc(initialSize * sizeof(TupleTableSlot)); + + /* ---------------- + * clean out the slots we just allocated + * ---------------- + */ + memset(array, 0, initialSize * sizeof(TupleTableSlot)); + + /* ---------------- + * initialize the new table and return it to the caller. + * ---------------- + */ + newtable->size = initialSize; + newtable->next = 0; + newtable->array = array; + + return newtable; +} + +/* -------------------------------- + * ExecDestroyTupleTable + * + * This pfrees the storage assigned to the tuple table and + * optionally pfrees the contents of the table also. + * It is expected that this routine be called by EndPlan(). + * -------------------------------- + */ +void +ExecDestroyTupleTable(TupleTable table, /* tuple table */ + bool shouldFree) /* true if we should free slot contents */ +{ + int next; /* next avaliable slot */ + TupleTableSlot *array; /* start of table array */ + int i; /* counter */ + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(table != NULL); + + /* ---------------- + * get information from the table + * ---------------- + */ + array = table->array; + next = table->next; + + /* ---------------- + * first free all the valid pointers in the tuple array + * if that's what the caller wants.. + * + * Note: we do nothing about the Buffer and Tuple Descriptor's + * we store in the slots. This may have to change (ex: we should + * probably worry about pfreeing tuple descs too) -cim 3/14/91 + * ---------------- + */ + if (shouldFree) + for (i = 0; i < next; i++) { + TupleTableSlot slot; + HeapTuple tuple; + + slot = array[i]; + tuple = slot.val; + + if (tuple != NULL) { + slot.val = (HeapTuple)NULL; + if (slot.ttc_shouldFree) { + /* ---------------- + * since a tuple may contain a pointer to + * lock information allocated along with the + * tuple, we have to be careful to free any + * rule locks also -cim 1/17/90 + * ---------------- + */ + pfree(tuple); + } + } + } + + /* ---------------- + * finally free the tuple array and the table itself. + * ---------------- + */ + pfree(array); + pfree(table); + +} + + +/* ---------------------------------------------------------------- + * tuple table slot reservation functions + * ---------------------------------------------------------------- + */ +/* -------------------------------- + * ExecAllocTableSlot + * + * This routine is used to reserve slots in the table for + * use by the various plan nodes. It is expected to be + * called by the node init routines (ex: ExecInitNestLoop). + * once per slot needed by the node. Not all nodes need + * slots (some just pass tuples around). + * -------------------------------- + */ +TupleTableSlot* /* return: the slot allocated in the tuple table */ +ExecAllocTableSlot(TupleTable table) +{ + int slotnum; /* new slot number */ + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(table != NULL); + + /* ---------------- + * if our table is full we have to allocate a larger + * size table. Since ExecAllocTableSlot() is only called + * before the table is ever used to store tuples, we don't + * have to worry about the contents of the old table. + * If this changes, then we will have to preserve the contents. + * -cim 6/23/90 + * + * Unfortunately, we *cannot* do this. All of the nodes in + * the plan that have already initialized their slots will have + * pointers into _freed_ memory. This leads to bad ends. We + * now count the number of slots we will need and create all the + * slots we will need ahead of time. The if below should never + * happen now. Give a WARN if it does. -mer 4 Aug 1992 + * ---------------- + */ + if (table->next >= table->size) { + /* + * int newsize = NewTableSize(table->size); + * + * pfree(table->array); + * table->array = (Pointer) palloc(newsize * TableSlotSize); + * bzero(table->array, newsize * TableSlotSize); + * table->size = newsize; + */ + elog(NOTICE, "Plan requires more slots than are available"); + elog(WARN, "send mail to your local executor guru to fix this"); + } + + /* ---------------- + * at this point, space in the table is guaranteed so we + * reserve the next slot, initialize and return it. + * ---------------- + */ + slotnum = table->next; + table->next++; + + table->array[slotnum].type = T_TupleTableSlot; + + return &(table->array[slotnum]); +} + +/* ---------------------------------------------------------------- + * tuple table slot accessor functions + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * ExecStoreTuple + * + * This function is used to store a tuple into a specified + * slot in the tuple table. Note: the only slots which should + * be called with shouldFree == false are those slots used to + * store tuples not allocated with pfree(). Currently the + * seqscan and indexscan nodes use this for the tuples returned + * by amgetattr, which are actually pointers onto disk pages. + * -------------------------------- + */ +TupleTableSlot* /* return: slot passed */ +ExecStoreTuple(HeapTuple tuple, /* tuple to store */ + TupleTableSlot* slot, /* slot in which to store tuple */ + Buffer buffer, /* buffer associated with tuple */ + bool shouldFree) /* true if we call pfree() when we gc. */ +{ + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(slot != NULL); + + /* clear out the slot first */ + ExecClearTuple(slot); + + /* ---------------- + * store the new tuple into the specified slot and + * return the slot into which we stored the tuple. + * ---------------- + */ + slot->val = tuple; + slot->ttc_buffer = buffer; + slot->ttc_shouldFree = shouldFree; + + return slot; +} + +/* -------------------------------- + * ExecClearTuple + * + * This function is used to clear out a slot in the tuple table. + * -------------------------------- + */ +TupleTableSlot* /* return: slot passed */ +ExecClearTuple(TupleTableSlot* slot) /* slot in which to store tuple */ +{ + HeapTuple oldtuple; /* prior contents of slot */ + + /* ---------------- + * sanity checks + * ---------------- + */ + Assert(slot != NULL); + + /* ---------------- + * get information from the tuple table + * ---------------- + */ + oldtuple = slot->val; + + /* ---------------- + * free the old contents of the specified slot if necessary. + * ---------------- + */ + if (slot->ttc_shouldFree && oldtuple != NULL) { + /* ---------------- + * since a tuple may contain a pointer to + * lock information allocated along with the + * tuple, we have to be careful to free any + * rule locks also -cim 1/17/90 + * ---------------- + */ + pfree(oldtuple); + } + + /* ---------------- + * store NULL into the specified slot and return the slot. + * - also set buffer to InvalidBuffer -cim 3/14/91 + * ---------------- + */ + slot->val = (HeapTuple)NULL; + + if (BufferIsValid(slot->ttc_buffer)) + ReleaseBuffer(slot->ttc_buffer); + + slot->ttc_buffer = InvalidBuffer; + slot->ttc_shouldFree = true; + + return slot; +} + + +/* -------------------------------- + * ExecSlotPolicy + * + * This function is used to get the call/don't call pfree + * setting of a slot. Most executor routines don't need this. + * It's only when you do tricky things like marking tuples for + * merge joins that you need to diddle the slot policy. + * -------------------------------- + */ +bool /* return: slot policy */ +ExecSlotPolicy(TupleTableSlot* slot) /* slot to inspect */ +{ + return slot->ttc_shouldFree; +} + +/* -------------------------------- + * ExecSetSlotPolicy + * + * This function is used to change the call/don't call pfree + * setting of a slot. Most executor routines don't need this. + * It's only when you do tricky things like marking tuples for + * merge joins that you need to diddle the slot policy. + * -------------------------------- + */ +bool /* return: old slot policy */ +ExecSetSlotPolicy(TupleTableSlot* slot, /* slot to change */ + bool shouldFree) /* true if we call pfree() when we gc. */ +{ + bool old_shouldFree = slot->ttc_shouldFree; + slot->ttc_shouldFree = shouldFree; + + return old_shouldFree; +} + +/* -------------------------------- + * ExecSlotDescriptor + * + * This function is used to get the tuple descriptor associated + * with the slot's tuple. + * + * Now a macro in tuptable.h -mer 5 March 1992 + * -------------------------------- + */ + +/* -------------------------------- + * ExecSetSlotDescriptor + * + * This function is used to set the tuple descriptor associated + * with the slot's tuple. + * -------------------------------- + */ +TupleDesc /* return: old slot tuple descriptor */ +ExecSetSlotDescriptor(TupleTableSlot *slot, /* slot to change */ + TupleDesc tupdesc) /* tuple descriptor */ +{ + TupleDesc old_tupdesc = slot->ttc_tupleDescriptor; + + slot->ttc_tupleDescriptor = tupdesc; + return old_tupdesc; +} + +/* -------------------------------- + * ExecSetSlotDescriptorIsNew + * + * This function is used to change the setting of the "isNew" flag + * -------------------------------- + */ +void +ExecSetSlotDescriptorIsNew(TupleTableSlot *slot,/* slot to change */ + bool isNew) /* "isNew" setting */ +{ + slot->ttc_descIsNew = isNew; +} + +/* -------------------------------- + * ExecSetNewSlotDescriptor + * + * This function is used to set the tuple descriptor associated + * with the slot's tuple, and set the "isNew" flag at the same time. + * -------------------------------- + */ +TupleDesc /* return: old slot tuple descriptor */ +ExecSetNewSlotDescriptor(TupleTableSlot *slot, /* slot to change */ + TupleDesc tupdesc) /* tuple descriptor */ +{ + TupleDesc old_tupdesc = slot->ttc_tupleDescriptor; + slot->ttc_tupleDescriptor = tupdesc; + slot->ttc_descIsNew = true; + + return old_tupdesc; +} + +/* -------------------------------- + * ExecSlotBuffer + * + * This function is used to get the tuple descriptor associated + * with the slot's tuple. Be very careful with this as it does not + * balance the reference counts. If the buffer returned is stored + * someplace else, then also use ExecIncrSlotBufferRefcnt(). + * + * Now a macro in tuptable.h + * -------------------------------- + */ + +/* -------------------------------- + * ExecSetSlotBuffer + * + * This function is used to set the tuple descriptor associated + * with the slot's tuple. Be very careful with this as it does not + * balance the reference counts. If we're using this then we should + * also use ExecIncrSlotBufferRefcnt(). + * -------------------------------- + */ +Buffer /* return: old slot buffer */ +ExecSetSlotBuffer(TupleTableSlot *slot, /* slot to change */ + Buffer b) /* tuple descriptor */ +{ + Buffer oldb = slot->ttc_buffer; + slot->ttc_buffer = b; + + return oldb; +} + +/* -------------------------------- + * ExecIncrSlotBufferRefcnt + * + * When we pass around buffers in the tuple table, we have to + * be careful to increment reference counts appropriately. + * This is used mainly in the mergejoin code. + * -------------------------------- + */ +void +ExecIncrSlotBufferRefcnt(TupleTableSlot *slot) /* slot to bump refcnt */ +{ +/* Buffer b = SlotBuffer((TupleTableSlot*) slot); */ + Buffer b = slot->ttc_buffer; + if (BufferIsValid(b)) + IncrBufferRefCount(b); +} + +/* ---------------------------------------------------------------- + * tuple table slot status predicates + * ---------------------------------------------------------------- + */ + +/* ---------------- + * TupIsNull + * + * This is used mainly to detect when there are no more + * tuples to process. + * ---------------- + */ +bool /* return: true if tuple in slot is NULL */ +TupIsNull(TupleTableSlot* slot) /* slot to check */ +{ + HeapTuple tuple; /* contents of slot (returned) */ + + /* ---------------- + * if the slot itself is null then we return true + * ---------------- + */ + if (slot == NULL) + return true; + + /* ---------------- + * get information from the slot and return true or + * false depending on the contents of the slot. + * ---------------- + */ + tuple = slot->val; + + return + (tuple == NULL ? true : false); +} + +/* -------------------------------- + * ExecSlotDescriptorIsNew + * + * This function is used to check if the tuple descriptor + * associated with this slot has just changed. ie: we are + * now storing a new type of tuple in this slot + * -------------------------------- + */ +bool /* return: descriptor "is new" */ +ExecSlotDescriptorIsNew(TupleTableSlot *slot) /* slot to inspect */ +{ +/* bool isNew = SlotTupleDescriptorIsNew((TupleTableSlot*) slot); + return isNew; */ + return slot->ttc_descIsNew; +} + +/* ---------------------------------------------------------------- + * convenience initialization routines + * ---------------------------------------------------------------- + */ +/* -------------------------------- + * ExecInit{Result,Scan,Raw,Marked,Outer,Hash}TupleSlot + * + * These are convenience routines to initialize the specfied slot + * in nodes inheriting the appropriate state. + * -------------------------------- + */ +#define INIT_SLOT_DEFS \ + TupleTable tupleTable; \ + TupleTableSlot* slot + +#define INIT_SLOT_ALLOC \ + tupleTable = (TupleTable) estate->es_tupleTable; \ + slot = ExecAllocTableSlot(tupleTable); \ + slot->val = (HeapTuple)NULL; \ + slot->ttc_shouldFree = true; \ + slot->ttc_tupleDescriptor = (TupleDesc)NULL; \ + slot->ttc_whichplan = -1;\ + slot->ttc_descIsNew = true; + +/* ---------------- + * ExecInitResultTupleSlot + * ---------------- + */ +void +ExecInitResultTupleSlot(EState *estate, CommonState *commonstate) +{ + INIT_SLOT_DEFS; + INIT_SLOT_ALLOC; + commonstate->cs_ResultTupleSlot = (TupleTableSlot *) slot; +} + +/* ---------------- + * ExecInitScanTupleSlot + * ---------------- + */ +void +ExecInitScanTupleSlot(EState *estate, CommonScanState *commonscanstate) +{ + INIT_SLOT_DEFS; + INIT_SLOT_ALLOC; + commonscanstate->css_ScanTupleSlot = (TupleTableSlot *)slot; +} + +/* ---------------- + * ExecInitMarkedTupleSlot + * ---------------- + */ +void +ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate) +{ + INIT_SLOT_DEFS; + INIT_SLOT_ALLOC; + mergestate->mj_MarkedTupleSlot = (TupleTableSlot *) slot; +} + +/* ---------------- + * ExecInitOuterTupleSlot + * ---------------- + */ +void +ExecInitOuterTupleSlot(EState *estate, HashJoinState *hashstate) +{ + INIT_SLOT_DEFS; + INIT_SLOT_ALLOC; + hashstate->hj_OuterTupleSlot = slot; +} + +/* ---------------- + * ExecInitHashTupleSlot + * ---------------- + */ +void +ExecInitHashTupleSlot(EState *estate, HashJoinState *hashstate) +{ + INIT_SLOT_DEFS; + INIT_SLOT_ALLOC; + hashstate->hj_HashTupleSlot = slot; +} + +TupleTableSlot * +NodeGetResultTupleSlot(Plan *node) +{ + TupleTableSlot *slot; + + switch(nodeTag(node)) { + + case T_Result: + { + ResultState *resstate = ((Result *)node)->resstate; + slot = resstate->cstate.cs_ResultTupleSlot; + } + break; + + case T_SeqScan: + { + CommonScanState *scanstate = ((SeqScan *)node)->scanstate; + slot = scanstate->cstate.cs_ResultTupleSlot; + } + break; + + case T_NestLoop: + { + NestLoopState *nlstate = ((NestLoop *)node)->nlstate; + slot = nlstate->jstate.cs_ResultTupleSlot; + } + break; + + case T_Append: + { + Append *n = (Append *)node; + AppendState *unionstate; + List *unionplans; + int whichplan; + Plan *subplan; + + unionstate = n->unionstate; + unionplans = n->unionplans; + whichplan = unionstate->as_whichplan; + + subplan = (Plan*) nth(whichplan, unionplans); + slot = NodeGetResultTupleSlot(subplan); + break; + } + + case T_IndexScan: + { + CommonScanState *scanstate = ((IndexScan *)node)->scan.scanstate; + slot = scanstate->cstate.cs_ResultTupleSlot; + } + break; + + case T_Material: + { + MaterialState *matstate = ((Material *)node)->matstate; + slot = matstate->csstate.css_ScanTupleSlot; + } + break; + + case T_Sort: + { + SortState *sortstate = ((Sort *)node)->sortstate; + slot = sortstate->csstate.css_ScanTupleSlot; + } + break; + + case T_Agg: + { + AggState *aggstate = ((Agg *)node)->aggstate; + slot = aggstate->csstate.cstate.cs_ResultTupleSlot; + } + break; + + case T_Group: + { + GroupState *grpstate = ((Group *)node)->grpstate; + slot = grpstate->csstate.cstate.cs_ResultTupleSlot; + } + break; + + case T_Hash: + { + HashState *hashstate = ((Hash *)node)->hashstate; + slot = hashstate->cstate.cs_ResultTupleSlot; + } + break; + + case T_Unique: + { + UniqueState *uniquestate = ((Unique *)node)->uniquestate; + slot = uniquestate->cs_ResultTupleSlot; + } + break; + + case T_MergeJoin: + { + MergeJoinState *mergestate = ((MergeJoin *)node)->mergestate; + slot = mergestate->jstate.cs_ResultTupleSlot; + } + break; + + case T_HashJoin: + { + HashJoinState *hashjoinstate = ((HashJoin *)node)->hashjoinstate; + slot = hashjoinstate->jstate.cs_ResultTupleSlot; + } + break; + + case T_Tee: + { + TeeState *teestate = ((Tee*)node)->teestate; + slot = teestate->cstate.cs_ResultTupleSlot; + } + break; + + default: + /* ---------------- + * should never get here + * ---------------- + */ + elog(WARN, "NodeGetResultTupleSlot: node not yet supported: %d ", + nodeTag(node)); + + return NULL; + } + return slot; +} + +/* ---------------------------------------------------------------- + * ExecGetTupType + * + * this gives you the tuple descriptor for tuples returned + * by this node. I really wish I could ditch this routine, + * but since not all nodes store their type info in the same + * place, we have to do something special for each node type. + * + * Soon, the system will have to adapt to deal with changing + * tuple descriptors as we deal with dynamic tuple types + * being returned from procedure nodes. Perhaps then this + * routine can be retired. -cim 6/3/91 + * + * old comments + * This routine just gets the type information out of the + * node's state. If you already have a node's state, you + * can get this information directly, but this is a useful + * routine if you want to get the type information from + * the node's inner or outer subplan easily without having + * to inspect the subplan.. -cim 10/16/89 + * + * Assume that for existential nodes, we get the targetlist out + * of the right node's targetlist + * ---------------------------------------------------------------- + */ + +TupleDesc +ExecGetTupType(Plan *node) +{ + TupleTableSlot *slot; + TupleDesc tupType; + + if (node == NULL) + return NULL; + + slot = NodeGetResultTupleSlot(node); + tupType = slot->ttc_tupleDescriptor; + return tupType; +} + +/* +TupleDesc +ExecCopyTupType(TupleDesc td, int natts) +{ + TupleDesc newTd; + int i; + + newTd = CreateTemplateTupleDesc(natts); + i = 0; + while (i < natts) + { + newTd[i] = + (AttributeTupleForm)palloc(sizeof(FormData_pg_attribute)); + memmove(newTd[i], td[i], sizeof(FormData_pg_attribute)); + i++; + } + return newTd; +} +*/ + +/* ---------------------------------------------------------------- + * ExecTypeFromTL + * + * Currently there are about 4 different places where we create + * TupleDescriptors. They should all be merged, or perhaps + * be rewritten to call BuildDesc(). + * + * old comments + * Forms attribute type info from the target list in the node. + * It assumes all domains are individually specified in the target list. + * It fails if the target list contains something like Emp.all + * which represents all the attributes from EMP relation. + * + * Conditions: + * The inner and outer subtrees should be initialized because it + * might be necessary to know the type infos of the subtrees. + * ---------------------------------------------------------------- + */ +TupleDesc +ExecTypeFromTL(List *targetList) +{ + List *tlcdr; + TupleDesc typeInfo; + Resdom *resdom; + Oid restype; + int len; + + /* ---------------- + * examine targetlist - if empty then return NULL + * ---------------- + */ + len = ExecTargetListLength(targetList); + + if (len == 0) + return NULL; + + /* ---------------- + * allocate a new typeInfo + * ---------------- + */ + typeInfo = CreateTemplateTupleDesc(len); + + /* ---------------- + * notes: get resdom from (resdom expr) + * get_typbyval comes from src/lib/l-lisp/lsyscache.c + * ---------------- + */ + tlcdr = targetList; + while (tlcdr != NIL) { + TargetEntry *tle = lfirst(tlcdr); + if (tle->resdom != NULL) { + resdom = tle->resdom; + restype = resdom->restype; + + TupleDescInitEntry(typeInfo, + resdom->resno, + resdom->resname, + get_id_typname(restype), + 0, + false); + +/* + ExecSetTypeInfo(resdom->resno - 1, + typeInfo, + (Oid) restype, + resdom->resno, + resdom->reslen, + resdom->resname->data, + get_typbyval(restype), + get_typalign(restype)); +*/ + } + else { + Resdom *fjRes; + List *fjTlistP; + List *fjList = lfirst(tlcdr); +#ifdef SETS_FIXED + TargetEntry *tle; + Fjoin *fjNode = ((TargetEntry *)lfirst(fjList))->fjoin; + + tle = fjNode->fj_innerNode; /* ??? */ +#endif + fjRes = tle->resdom; + restype = fjRes->restype; + + TupleDescInitEntry(typeInfo, + fjRes->resno, + fjRes->resname, + get_id_typname(restype), + 0, + false); +/* + ExecSetTypeInfo(fjRes->resno - 1, + typeInfo, + (Oid) restype, + fjRes->resno, + fjRes->reslen, + (char *) fjRes->resname, + get_typbyval(restype), + get_typalign(restype)); +*/ + + foreach(fjTlistP, lnext(fjList)) { + TargetEntry *fjTle = lfirst(fjTlistP); + + fjRes = fjTle->resdom; + + TupleDescInitEntry(typeInfo, + fjRes->resno, + fjRes->resname, + get_id_typname(restype), + 0, + false); + +/* + ExecSetTypeInfo(fjRes->resno - 1, + typeInfo, + (Oid) fjRes->restype, + fjRes->resno, + fjRes->reslen, + (char *) fjRes->resname, + get_typbyval(fjRes->restype), + get_typalign(fjRes->restype)); +*/ + } + } + + tlcdr = lnext(tlcdr); + } + + return typeInfo; +} + + diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c new file mode 100644 index 00000000000..8d1108aca25 --- /dev/null +++ b/src/backend/executor/execUtils.c @@ -0,0 +1,1092 @@ +/*------------------------------------------------------------------------- + * + * execUtils.c-- + * miscellanious executor utility routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/execUtils.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecAssignNodeBaseInfo \ + * ExecAssignDebugHooks > preforms misc work done in all the + * ExecAssignExprContext / init node routines. + * + * ExecGetTypeInfo | old execCStructs interface + * ExecMakeTypeInfo | code from the version 1 + * ExecOrderTypeInfo | lisp system. These should + * ExecSetTypeInfo | go away or be updated soon. + * ExecFreeTypeInfo | -cim 11/1/89 + * ExecTupleAttributes / + * + + * QueryDescGetTypeInfo - moved here from main.c + * am not sure what uses it -cim 10/12/89 + * + * ExecGetIndexKeyInfo \ + * ExecOpenIndices | referenced by InitPlan, EndPlan, + * ExecCloseIndices | ExecAppend, ExecReplace + * ExecFormIndexTuple | + * ExecInsertIndexTuple / + * + * NOTES + * This file has traditionally been the place to stick misc. + * executor support stuff that doesn't really go anyplace else. + * + */ + +#include "executor/executor.h" +#include "access/itup.h" +#include "optimizer/clauses.h" +#include "utils/palloc.h" +#include "commands/command.h" +#include "catalog/index.h" + +/* ---------------------------------------------------------------- + * global counters for number of tuples processed, retrieved, + * appended, replaced, deleted. + * ---------------------------------------------------------------- + */ +int NTupleProcessed; +int NTupleRetrieved; +int NTupleReplaced; +int NTupleAppended; +int NTupleDeleted; +int NIndexTupleInserted; +extern int NIndexTupleProcessed; /* have to be defined in the access + method level so that the cinterface.a + will link ok. */ + +/* ---------------------------------------------------------------- + * statistic functions + * ---------------------------------------------------------------- + */ + +/* ---------------------------------------------------------------- + * ResetTupleCount + * ---------------------------------------------------------------- + */ +void +ResetTupleCount() +{ + NTupleProcessed = 0; + NTupleRetrieved = 0; + NTupleAppended = 0; + NTupleDeleted = 0; + NTupleReplaced = 0; + NIndexTupleProcessed = 0; +} + +/* ---------------------------------------------------------------- + * PrintTupleCount + * ---------------------------------------------------------------- + */ +void +DisplayTupleCount(FILE *statfp) +{ + if (NTupleProcessed > 0) + fprintf(statfp, "!\t%d tuple%s processed, ", NTupleProcessed, + (NTupleProcessed == 1) ? "" : "s"); + else { + fprintf(statfp, "!\tno tuples processed.\n"); + return; + } + if (NIndexTupleProcessed > 0) + fprintf(statfp, "%d indextuple%s processed, ", NIndexTupleProcessed, + (NIndexTupleProcessed == 1) ? "" : "s"); + if (NIndexTupleInserted > 0) + fprintf(statfp, "%d indextuple%s inserted, ", NIndexTupleInserted, + (NIndexTupleInserted == 1) ? "" : "s"); + if (NTupleRetrieved > 0) + fprintf(statfp, "%d tuple%s retrieved. ", NTupleRetrieved, + (NTupleRetrieved == 1) ? "" : "s"); + if (NTupleAppended > 0) + fprintf(statfp, "%d tuple%s appended. ", NTupleAppended, + (NTupleAppended == 1) ? "" : "s"); + if (NTupleDeleted > 0) + fprintf(statfp, "%d tuple%s deleted. ", NTupleDeleted, + (NTupleDeleted == 1) ? "" : "s"); + if (NTupleReplaced > 0) + fprintf(statfp, "%d tuple%s replaced. ", NTupleReplaced, + (NTupleReplaced == 1) ? "" : "s"); + fprintf(statfp, "\n"); +} + +/* ---------------------------------------------------------------- + * miscellanious init node support functions + * + * ExecAssignNodeBaseInfo - assigns the baseid field of the node + * ExecAssignDebugHooks - assigns the node's debugging hooks + * ExecAssignExprContext - assigns the node's expression context + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ExecAssignNodeBaseInfo + * + * as it says, this assigns the baseid field of the node and + * increments the counter in the estate. In addition, it initializes + * the base_parent field of the basenode. + * ---------------- + */ +void +ExecAssignNodeBaseInfo(EState *estate, CommonState *cstate, Plan *parent) +{ + int baseId; + + baseId = estate->es_BaseId; + cstate->cs_base_id = baseId; + estate->es_BaseId = baseId + 1; +} + +/* ---------------- + * ExecAssignExprContext + * + * This initializes the ExprContext field. It is only necessary + * to do this for nodes which use ExecQual or ExecTargetList + * because those routines depend on econtext. Other nodes which + * dont have to evaluate expressions don't need to do this. + * ---------------- + */ +void +ExecAssignExprContext(EState *estate, CommonState *commonstate) +{ + ExprContext *econtext; + ParamListInfo paraminfo; + List *rangeTable; + + paraminfo = estate->es_param_list_info; + rangeTable = estate->es_range_table; + + econtext = makeNode(ExprContext); + econtext->ecxt_scantuple = NULL; /* scan tuple slot */ + econtext->ecxt_innertuple = NULL; /* inner tuple slot */ + econtext->ecxt_outertuple = NULL; /* outer tuple slot */ + econtext->ecxt_relation = NULL; /* relation */ + econtext->ecxt_relid = 0; /* relid */ + econtext->ecxt_param_list_info = paraminfo; /* param list info */ + econtext->ecxt_range_table = rangeTable; /* range table */ + + commonstate->cs_ExprContext = econtext; +} + +/* ---------------------------------------------------------------- + * Result slot tuple type and ProjectionInfo support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ExecAssignResultType + * ---------------- + */ +void +ExecAssignResultType(CommonState *commonstate, + TupleDesc tupDesc) +{ + TupleTableSlot *slot; + + slot = commonstate->cs_ResultTupleSlot; + slot->ttc_tupleDescriptor = tupDesc; +} + +/* ---------------- + * ExecAssignResultTypeFromOuterPlan + * ---------------- + */ +void +ExecAssignResultTypeFromOuterPlan(Plan *node, CommonState *commonstate) +{ + Plan *outerPlan; + TupleDesc tupDesc; + + outerPlan = outerPlan(node); + tupDesc = ExecGetTupType(outerPlan); + + ExecAssignResultType(commonstate, tupDesc); +} + +/* ---------------- + * ExecAssignResultTypeFromTL + * ---------------- + */ +void +ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate) +{ + List *targetList; + int i; + int len; + List *tl; + TargetEntry *tle; + List *fjtl; + TupleDesc origTupDesc; + + targetList = node->targetlist; + origTupDesc = ExecTypeFromTL(targetList); + len = ExecTargetListLength(targetList); + + fjtl = NIL; + tl = targetList; + i = 0; + while (tl != NIL || fjtl != NIL) { + if (fjtl != NIL) { + tle = lfirst(fjtl); + fjtl = lnext(fjtl); + } + else { + tle = lfirst(tl); + tl = lnext(tl); + } +#ifdef SETS_FIXED + if (!tl_is_resdom(tle)) { + Fjoin *fj = (Fjoin *)lfirst(tle); + /* it is a FJoin */ + fjtl = lnext(tle); + tle = fj->fj_innerNode; + } +#endif + i++; + } + if (len > 0) { + ExecAssignResultType(commonstate, + origTupDesc); + } + else + ExecAssignResultType(commonstate, + (TupleDesc)NULL); +} + +/* ---------------- + * ExecGetResultType + * ---------------- + */ +TupleDesc +ExecGetResultType(CommonState *commonstate) +{ + TupleTableSlot *slot = commonstate->cs_ResultTupleSlot; + + return slot->ttc_tupleDescriptor; +} + +/* ---------------- + * ExecFreeResultType + * ---------------- + */ +void +ExecFreeResultType(CommonState *commonstate) +{ + TupleTableSlot *slot; + TupleDesc tupType; + + slot = commonstate->cs_ResultTupleSlot; + tupType = slot->ttc_tupleDescriptor; + +/* ExecFreeTypeInfo(tupType); */ + pfree(tupType); +} + + +/* ---------------- + * ExecAssignProjectionInfo + forms the projection information from the node's targetlist + * ---------------- + */ +void +ExecAssignProjectionInfo(Plan *node, CommonState *commonstate) +{ + ProjectionInfo *projInfo; + List *targetList; + int len; + + targetList = node->targetlist; + len = ExecTargetListLength(targetList); + + projInfo = makeNode(ProjectionInfo); + projInfo->pi_targetlist = targetList; + projInfo->pi_len = len; + projInfo->pi_tupValue = + (len <= 0) ? NULL : (Datum *) palloc(sizeof(Datum) * len); + projInfo->pi_exprContext = commonstate->cs_ExprContext; + projInfo->pi_slot = commonstate->cs_ResultTupleSlot; + + commonstate->cs_ProjInfo = projInfo; +} + + +/* ---------------- + * ExecFreeProjectionInfo + * ---------------- + */ +void +ExecFreeProjectionInfo(CommonState *commonstate) +{ + ProjectionInfo *projInfo; + + /* ---------------- + * get projection info. if NULL then this node has + * none so we just return. + * ---------------- + */ + projInfo = commonstate->cs_ProjInfo; + if (projInfo == NULL) + return; + + /* ---------------- + * clean up memory used. + * ---------------- + */ + if (projInfo->pi_tupValue != NULL) + pfree(projInfo->pi_tupValue); + + pfree(projInfo); + commonstate->cs_ProjInfo = NULL; +} + +/* ---------------------------------------------------------------- + * the following scan type support functions are for + * those nodes which are stubborn and return tuples in + * their Scan tuple slot instead of their Result tuple + * slot.. luck fur us, these nodes do not do projections + * so we don't have to worry about getting the ProjectionInfo + * right for them... -cim 6/3/91 + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ExecGetScanType + * ---------------- + */ +TupleDesc +ExecGetScanType(CommonScanState *csstate) +{ + TupleTableSlot *slot = csstate->css_ScanTupleSlot; + return slot->ttc_tupleDescriptor; +} + +/* ---------------- + * ExecFreeScanType + * ---------------- + */ +void +ExecFreeScanType(CommonScanState *csstate) +{ + TupleTableSlot *slot; + TupleDesc tupType; + + slot = csstate->css_ScanTupleSlot; + tupType = slot->ttc_tupleDescriptor; + +/* ExecFreeTypeInfo(tupType); */ + pfree(tupType); +} + +/* ---------------- + * ExecAssignScanType + * ---------------- + */ +void +ExecAssignScanType(CommonScanState *csstate, + TupleDesc tupDesc) +{ + TupleTableSlot *slot; + + slot = (TupleTableSlot *) csstate->css_ScanTupleSlot; + slot->ttc_tupleDescriptor = tupDesc; +} + +/* ---------------- + * ExecAssignScanTypeFromOuterPlan + * ---------------- + */ +void +ExecAssignScanTypeFromOuterPlan(Plan *node, CommonScanState *csstate) +{ + Plan *outerPlan; + TupleDesc tupDesc; + + outerPlan = outerPlan(node); + tupDesc = ExecGetTupType(outerPlan); + + ExecAssignScanType(csstate, tupDesc); +} + + +/* ---------------------------------------------------------------- + * ExecTypeFromTL support routines. + * + * these routines are used mainly from ExecTypeFromTL. + * -cim 6/12/90 + * + * old comments + * Routines dealing with the structure 'attribute' which conatains + * the type information about attributes in a tuple: + * + * ExecMakeTypeInfo(noType) -- + * returns pointer to array of 'noType' structure 'attribute'. + * ExecSetTypeInfo(index, typeInfo, attNum, attLen) -- + * sets the element indexed by 'index' in typeInfo with + * the values: attNum, attLen. + * ExecFreeTypeInfo(typeInfo) -- + * frees the structure 'typeInfo'. + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ExecSetTypeInfo + * + * This initializes fields of a single attribute in a + * tuple descriptor from the specified parameters. + * + * XXX this duplicates much of the functionality of TupleDescInitEntry. + * the routines should be moved to the same place and be rewritten + * to share common code. + * ---------------- + */ +#if 0 +void +ExecSetTypeInfo(int index, + TupleDesc typeInfo, + Oid typeID, + int attNum, + int attLen, + char *attName, + bool attbyVal, + char attalign) +{ + AttributeTupleForm att; + + /* ---------------- + * get attribute pointer and preform a sanity check.. + * ---------------- + */ + att = typeInfo[index]; + if (att == NULL) + elog(WARN, "ExecSetTypeInfo: trying to assign through NULL"); + + /* ---------------- + * assign values to the tuple descriptor, being careful not + * to copy a null attName.. + * + * XXX it is unknown exactly what information is needed to + * initialize the attribute struct correctly so for now + * we use 0. this should be fixed -- otherwise we run the + * risk of using garbage data. -cim 5/5/91 + * ---------------- + */ + att->attrelid = 0; /* dummy value */ + + if (attName != (char *) NULL) + strncpy(att->attname.data, attName, NAMEDATALEN); + else + memset(att->attname.data,0,NAMEDATALEN); + + att->atttypid = typeID; + att->attdefrel = 0; /* dummy value */ + att->attnvals = 0; /* dummy value */ + att->atttyparg = 0; /* dummy value */ + att->attlen = attLen; + att->attnum = attNum; + att->attbound = 0; /* dummy value */ + att->attbyval = attbyVal; + att->attcanindex = 0; /* dummy value */ + att->attproc = 0; /* dummy value */ + att->attnelems = 0; /* dummy value */ + att->attcacheoff = -1; + att->attisset = false; + att->attalign = attalign; +} + +/* ---------------- + * ExecFreeTypeInfo frees the array of attrbutes + * created by ExecMakeTypeInfo and returned by ExecTypeFromTL... + * ---------------- + */ +void +ExecFreeTypeInfo(TupleDesc typeInfo) +{ + /* ---------------- + * do nothing if asked to free a null pointer + * ---------------- + */ + if (typeInfo == NULL) + return; + + /* ---------------- + * the entire array of typeinfo pointers created by + * ExecMakeTypeInfo was allocated with a single palloc() + * so we can deallocate the whole array with a single pfree(). + * (we should not try and free all the elements in the array) + * -cim 6/12/90 + * ---------------- + */ + pfree(typeInfo); +} + + +/* ---------------------------------------------------------------- + * QueryDescGetTypeInfo + * + *| I don't know how this is used, all I know is that it + *| appeared one day in main.c so I moved it here. -cim 11/1/89 + * ---------------------------------------------------------------- + */ +TupleDesc +QueryDescGetTypeInfo(QueryDesc *queryDesc) +{ + Plan *plan; + TupleDesc tupleType; + List *targetList; + AttrInfo *attinfo = (AttrInfo *)palloc(sizeof(AttrInfo)); + + plan = queryDesc->plantree; + tupleType = (TupleDesc) ExecGetTupType(plan); +/* + targetList = plan->targetlist; + + attinfo->numAttr = ExecTargetListLength(targetList); + attinfo->attrs = tupleType; +*/ + attinfo->numAttr = tupleType->natts; + attinfo->attrs = tupleType->attrs; + return attinfo; +} +#endif + +/* ---------------------------------------------------------------- + * ExecInsertIndexTuples support + * ---------------------------------------------------------------- + */ +/* ---------------------------------------------------------------- + * ExecGetIndexKeyInfo + * + * Extracts the index key attribute numbers from + * an index tuple form (i.e. a tuple from the pg_index relation) + * into an array of attribute numbers. The array and the + * size of the array are returned to the caller via return + * parameters. + * ---------------------------------------------------------------- + */ +void +ExecGetIndexKeyInfo(IndexTupleForm indexTuple, + int *numAttsOutP, + AttrNumber **attsOutP, + FuncIndexInfoPtr fInfoP) +{ + int i; + int numKeys; + AttrNumber *attKeys; + + /* ---------------- + * check parameters + * ---------------- + */ + if (numAttsOutP == NULL && attsOutP == NULL) { + elog(DEBUG, "ExecGetIndexKeyInfo: %s", + "invalid parameters: numAttsOutP and attsOutP must be non-NULL"); + } + + /* ---------------- + * set the procid for a possible functional index. + * ---------------- + */ + FIsetProcOid(fInfoP, indexTuple->indproc); + + /* ---------------- + * count the number of keys.. + * ---------------- + */ + numKeys = 0; + for (i=0; i<8 && indexTuple->indkey[i] != 0; i++) + numKeys++; + + /* ---------------- + * place number keys in callers return area + * or the number of arguments for a functional index. + * + * If we have a functional index then the number of + * attributes defined in the index must 1 (the function's + * single return value). + * ---------------- + */ + if (FIgetProcOid(fInfoP) != InvalidOid) { + FIsetnArgs(fInfoP, numKeys); + (*numAttsOutP) = 1; + } + else + (*numAttsOutP) = numKeys; + + if (numKeys < 1) { + elog(DEBUG, "ExecGetIndexKeyInfo: %s", + "all index key attribute numbers are zero!"); + (*attsOutP) = NULL; + return; + } + + /* ---------------- + * allocate and fill in array of key attribute numbers + * ---------------- + */ + CXT1_printf("ExecGetIndexKeyInfo: context is %d\n", CurrentMemoryContext); + + attKeys = (AttrNumber*) + palloc(numKeys * sizeof(AttrNumber)); + + for (i=0; iindkey[i]; + + /* ---------------- + * return array to caller. + * ---------------- + */ + (*attsOutP) = attKeys; +} + +/* ---------------------------------------------------------------- + * ExecOpenIndices + * + * Here we scan the pg_index relation to find indices + * associated with a given heap relation oid. Since we + * don't know in advance how many indices we have, we + * form lists containing the information we need from + * pg_index and then process these lists. + * + * Note: much of this code duplicates effort done by + * the IndexCatalogInformation function in plancat.c + * because IndexCatalogInformation is poorly written. + * + * It would be much better the functionality provided + * by this function and IndexCatalogInformation was + * in the form of a small set of orthogonal routines.. + * If you are trying to understand this, I suggest you + * look at the code to IndexCatalogInformation and + * FormIndexTuple.. -cim 9/27/89 + * ---------------------------------------------------------------- + */ +void +ExecOpenIndices(Oid resultRelationOid, + RelationInfo *resultRelationInfo) +{ + Relation indexRd; + HeapScanDesc indexSd; + ScanKeyData key; + HeapTuple tuple; + IndexTupleForm indexStruct; + Oid indexOid; + List *oidList; + List *nkeyList; + List *keyList; + List *fiList; + char *predString; + List *predList; + List *indexoid; + List *numkeys; + List *indexkeys; + List *indexfuncs; + List *indexpreds; + int len; + + RelationPtr relationDescs; + IndexInfo **indexInfoArray; + FuncIndexInfoPtr fInfoP; + int numKeyAtts; + AttrNumber *indexKeyAtts; + PredInfo *predicate; + int i; + + /* ---------------- + * open pg_index + * ---------------- + */ + indexRd = heap_openr(IndexRelationName); + + /* ---------------- + * form a scan key + * ---------------- + */ + ScanKeyEntryInitialize(&key, 0, Anum_pg_index_indrelid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(resultRelationOid)); + + /* ---------------- + * scan the index relation, looking for indices for our + * result relation.. + * ---------------- + */ + indexSd = heap_beginscan(indexRd, /* scan desc */ + false, /* scan backward flag */ + NowTimeQual, /* time qual */ + 1, /* number scan keys */ + &key); /* scan keys */ + + oidList = NIL; + nkeyList = NIL; + keyList = NIL; + fiList = NIL; + predList = NIL; + + while(tuple = heap_getnext(indexSd, /* scan desc */ + false, /* scan backward flag */ + NULL), /* return: buffer */ + HeapTupleIsValid(tuple)) { + + /* ---------------- + * For each index relation we find, extract the information + * we need and store it in a list.. + * + * first get the oid of the index relation from the tuple + * ---------------- + */ + indexStruct = (IndexTupleForm) GETSTRUCT(tuple); + indexOid = indexStruct->indexrelid; + + /* ---------------- + * allocate space for functional index information. + * ---------------- + */ + fInfoP = (FuncIndexInfoPtr)palloc( sizeof(*fInfoP) ); + + /* ---------------- + * next get the index key information from the tuple + * ---------------- + */ + ExecGetIndexKeyInfo(indexStruct, + &numKeyAtts, + &indexKeyAtts, + fInfoP); + + /* ---------------- + * next get the index predicate from the tuple + * ---------------- + */ + if (VARSIZE(&indexStruct->indpred) != 0) { + predString = fmgr(F_TEXTOUT, &indexStruct->indpred); + predicate = (PredInfo*)stringToNode(predString); + pfree(predString); + } else { + predicate = NULL; + } + + /* ---------------- + * save the index information into lists + * ---------------- + */ + oidList = lconsi(indexOid, oidList); + nkeyList = lconsi(numKeyAtts, nkeyList); + keyList = lcons(indexKeyAtts, keyList); + fiList = lcons(fInfoP, fiList); + predList = lcons(predicate, predList); + } + + /* ---------------- + * we have the info we need so close the pg_index relation.. + * ---------------- + */ + heap_endscan(indexSd); + heap_close(indexRd); + + /* ---------------- + * Now that we've collected the index information into three + * lists, we open the index relations and store the descriptors + * and the key information into arrays. + * ---------------- + */ + len = length(oidList); + if (len > 0) { + /* ---------------- + * allocate space for relation descs + * ---------------- + */ + CXT1_printf("ExecOpenIndices: context is %d\n", CurrentMemoryContext); + relationDescs = (RelationPtr) + palloc(len * sizeof(Relation)); + + /* ---------------- + * initialize index info array + * ---------------- + */ + CXT1_printf("ExecOpenIndices: context is %d\n", CurrentMemoryContext); + indexInfoArray = (IndexInfo**) + palloc(len * sizeof(IndexInfo*)); + + for (i=0; iii_NumKeyAttributes = 0; + ii->ii_KeyAttributeNumbers = (AttrNumber*) NULL; + ii->ii_FuncIndexInfo = (FuncIndexInfoPtr) NULL; + ii->ii_Predicate = NULL; + indexInfoArray[i] = ii; + } + + /* ---------------- + * attempt to open each of the indices. If we succeed, + * then store the index relation descriptor into the + * relation descriptor array. + * ---------------- + */ + i = 0; + foreach (indexoid, oidList) { + Relation indexDesc; + + indexOid = lfirsti(indexoid); + indexDesc = index_open(indexOid); + if (indexDesc != NULL) + relationDescs[i++] = indexDesc; + } + + /* ---------------- + * store the relation descriptor array and number of + * descs into the result relation info. + * ---------------- + */ + resultRelationInfo->ri_NumIndices = i; + resultRelationInfo->ri_IndexRelationDescs = relationDescs; + + /* ---------------- + * store the index key information collected in our + * lists into the index info array + * ---------------- + */ + i = 0; + foreach (numkeys, nkeyList) { + numKeyAtts = lfirsti(numkeys); + indexInfoArray[i++]->ii_NumKeyAttributes = numKeyAtts; + } + + i = 0; + foreach (indexkeys, keyList) { + indexKeyAtts = (AttrNumber*) lfirst(indexkeys); + indexInfoArray[i++]->ii_KeyAttributeNumbers = indexKeyAtts; + } + + i = 0; + foreach (indexfuncs, fiList) { + FuncIndexInfoPtr fiP = (FuncIndexInfoPtr)lfirst(indexfuncs); + indexInfoArray[i++]->ii_FuncIndexInfo = fiP; + } + + i = 0; + foreach (indexpreds, predList) { + indexInfoArray[i++]->ii_Predicate = lfirst(indexpreds); + } + /* ---------------- + * store the index info array into relation info + * ---------------- + */ + resultRelationInfo->ri_IndexRelationInfo = indexInfoArray; + } + + /* ---------------- + * All done, resultRelationInfo now contains complete information + * on the indices associated with the result relation. + * ---------------- + */ + + /* should free oidList, nkeyList and keyList here */ + /* OK - let's do it -jolly */ + freeList(oidList); + freeList(nkeyList); + freeList(keyList); + freeList(fiList); + freeList(predList); +} + +/* ---------------------------------------------------------------- + * ExecCloseIndices + * + * Close the index relations stored in resultRelationInfo + * ---------------------------------------------------------------- + */ +void +ExecCloseIndices(RelationInfo *resultRelationInfo) +{ + int i; + int numIndices; + RelationPtr relationDescs; + + numIndices = resultRelationInfo->ri_NumIndices; + relationDescs = resultRelationInfo->ri_IndexRelationDescs; + + for (i=0; iii_NumKeyAttributes; + keyAttributeNumbers = indexInfo->ii_KeyAttributeNumbers; + fInfoP = indexInfo->ii_FuncIndexInfo; + + /* ---------------- + * datum and null are arrays in which we collect the index attributes + * when forming a new index tuple. + * ---------------- + */ + CXT1_printf("ExecFormIndexTuple: context is %d\n", CurrentMemoryContext); + datum = (Datum *) palloc(numberOfAttributes * sizeof *datum); + nulls = (char *) palloc(numberOfAttributes * sizeof *nulls); + + /* ---------------- + * get the tuple descriptors from the relations so we know + * how to form the index tuples.. + * ---------------- + */ + heapDescriptor = RelationGetTupleDescriptor(heapRelation); + indexDescriptor = RelationGetTupleDescriptor(indexRelation); + + /* ---------------- + * FormIndexDatum fills in its datum and null parameters + * with attribute information taken from the given heap tuple. + * ---------------- + */ + FormIndexDatum(numberOfAttributes, /* num attributes */ + keyAttributeNumbers, /* array of att nums to extract */ + heapTuple, /* tuple from base relation */ + heapDescriptor, /* heap tuple's descriptor */ + InvalidBuffer, /* buffer associated with heap tuple */ + datum, /* return: array of attributes */ + nulls, /* return: array of char's */ + fInfoP); /* functional index information */ + + indexTuple = index_formtuple(indexDescriptor, + datum, + nulls); + + /* ---------------- + * free temporary arrays + * + * XXX should store these in the IndexInfo instead of allocating + * and freeing on every insertion, but efficency here is not + * that important and FormIndexTuple is wasteful anyways.. + * -cim 9/27/89 + * ---------------- + */ + pfree(nulls); + pfree(datum); + + return indexTuple; +} + +/* ---------------------------------------------------------------- + * ExecInsertIndexTuples + * + * This routine takes care of inserting index tuples + * into all the relations indexing the result relation + * when a heap tuple is inserted into the result relation. + * Much of this code should be moved into the genam + * stuff as it only exists here because the genam stuff + * doesn't provide the functionality needed by the + * executor.. -cim 9/27/89 + * ---------------------------------------------------------------- + */ +void +ExecInsertIndexTuples(TupleTableSlot *slot, + ItemPointer tupleid, + EState *estate) +{ + HeapTuple heapTuple; + RelationInfo *resultRelationInfo; + int i; + int numIndices; + RelationPtr relationDescs; + Relation heapRelation; + IndexInfo **indexInfoArray; + Node *predicate; + bool satisfied; + ExprContext *econtext; + IndexTuple indexTuple; + InsertIndexResult result; + + heapTuple = slot->val; + + /* ---------------- + * get information from the result relation info structure. + * ---------------- + */ + resultRelationInfo = estate->es_result_relation_info; + numIndices = resultRelationInfo->ri_NumIndices; + relationDescs = resultRelationInfo->ri_IndexRelationDescs; + indexInfoArray = resultRelationInfo->ri_IndexRelationInfo; + heapRelation = resultRelationInfo->ri_RelationDesc; + + /* ---------------- + * for each index, form and insert the index tuple + * ---------------- + */ + econtext = NULL; + for (i=0; iii_Predicate; + if (predicate != NULL) { + if (econtext == NULL) { + econtext = makeNode(ExprContext); + } + econtext->ecxt_scantuple = slot; + + /* Skip this index-update if the predicate isn't satisfied */ + satisfied = ExecQual((List*)predicate, econtext); + if (satisfied == false) + continue; + } + + indexTuple = ExecFormIndexTuple(heapTuple, + heapRelation, + relationDescs[i], + indexInfoArray[i]); + + indexTuple->t_tid = (*tupleid); /* structure assignment */ + + result = index_insert(relationDescs[i], /* index relation */ + indexTuple); /* index tuple */ + + /* ---------------- + * keep track of index inserts for debugging + * ---------------- + */ + IncrIndexInserted(); + + /* ---------------- + * free index tuple after insertion + * ---------------- + */ + if (result) pfree(result); + pfree(indexTuple); + } + if (econtext != NULL) pfree(econtext); +} + diff --git a/src/backend/executor/execdebug.h b/src/backend/executor/execdebug.h new file mode 100644 index 00000000000..b5200ca2577 --- /dev/null +++ b/src/backend/executor/execdebug.h @@ -0,0 +1,377 @@ +/*------------------------------------------------------------------------- + * + * execdebug.h-- + * #defines governing debugging behaviour in the executor + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: execdebug.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECDEBUG_H +#define EXECDEBUG_H + +/* ---------------------------------------------------------------- + * debugging defines. + * + * If you want certain debugging behaviour, then #define + * the variable to 1, else #undef it. -cim 10/26/89 + * ---------------------------------------------------------------- + */ + +/* ---------------- + * EXEC_DEBUGSTORETUP is for tuple table debugging - this + * will print a message every time we call ExecStoreTuple. + * -cim 3/20/91 + * ---------------- + */ +#undef EXEC_DEBUGSTORETUP + +/* ---------------- + * EXEC_TUPLECOUNT is a #define which causes the + * executor keep track of tuple counts. This might be + * causing some problems with the decstation stuff so + * you might want to undefine this if you are doing work + * on the decs - cim 10/20/89 + * ---------------- + */ +#undef EXEC_TUPLECOUNT + +/* ---------------- + * EXEC_SHOWBUFSTATS controls whether or not buffer statistics + * are shown for each query. -cim 2/9/89 + * ---------------- + */ +#undef EXEC_SHOWBUFSTATS + +/* ---------------- + * EXEC_CONTEXTDEBUG turns on the printing of debugging information + * by CXT_printf() calls regarding which memory context is the + * CurrentMemoryContext for palloc() calls. + * ---------------- + */ +#undef EXEC_CONTEXTDEBUG + +/* ---------------- + * EXEC_RETURNSIZE is a compile flag governing the + * behaviour of lispFmgr.. See ExecMakeFunctionResult(). + * Undefining this avoids a problem in the system cache. + * + * Note: undefining this means that there is incorrect + * information in the const nodes corresponding + * to function (or operator) results. The thing is, + * 99% of the time this is fine because when you do + * something like x = emp.sal + 1, you already know + * the type and size of x so the fact that + didn't + * return the correct size doesn't matter. + * With variable length stuff the size is stored in + * the first few bytes of the data so again, it's + * not likely to matter. + * ---------------- + */ +#undef EXEC_RETURNSIZE + +/* ---------------- + * EXEC_UTILSDEBUG is a flag which turns on debugging of the + * executor utilities by EU_printf() in eutils.c + * ---------------- + */ +#undef EXEC_UTILSDEBUG + +/* ---------------- + * EXEC_NESTLOOPDEBUG is a flag which turns on debugging of the + * nest loop node by NL_printf() and ENL_printf() in nestloop.c + * ---------------- + */ +#undef EXEC_NESTLOOPDEBUG + +/* ---------------- + * EXEC_PROCDEBUG is a flag which turns on debugging of + * ExecProcNode() by PN_printf() in procnode.c + * ---------------- + */ +#undef EXEC_PROCDEBUG + +/* ---------------- + * EXEC_EVALDEBUG is a flag which turns on debugging of + * ExecEval and ExecTargetList() stuff by EV_printf() in qual.c + * ---------------- + */ +#undef EXEC_EVALDEBUG + +/* ---------------- + * EXEC_SCANDEBUG is a flag which turns on debugging of + * the ExecSeqScan() stuff by S_printf() in seqscan.c + * ---------------- + */ +#undef EXEC_SCANDEBUG + +/* ---------------- + * EXEC_SORTDEBUG is a flag which turns on debugging of + * the ExecSort() stuff by SO_printf() in sort.c + * ---------------- + */ +#undef EXEC_SORTDEBUG + +/* ---------------- + * EXEC_MERGEJOINDEBUG is a flag which turns on debugging of + * the ExecMergeJoin() stuff by MJ_printf() in mergejoin.c + * ---------------- + */ +#undef EXEC_MERGEJOINDEBUG + +/* ---------------- + * EXEC_MERGEJOINPFREE is a flag which causes merge joins + * to pfree intermittant tuples (which is the proper thing) + * Not defining this means we avoid menory management problems + * at the cost of doing deallocation of stuff only at the + * end of the transaction + * ---------------- + */ +#undef EXEC_MERGEJOINPFREE + +/* ---------------- + * EXEC_DEBUGINTERACTIVE is a flag which enables the + * user to issue "DEBUG" commands from an interactive + * backend. + * ---------------- + */ +#undef EXEC_DEBUGINTERACTIVE + +/* ---------------- + * EXEC_DEBUGVARIABLEFILE is string, which if defined will + * be loaded when the executor is initialized. If this + * string is not defined then nothing will be loaded.. + * + * Example: + * + * #define EXEC_DEBUGVARIABLEFILE "/a/postgres/cimarron/.pg_debugvars" + # + * Note: since these variables are read at execution time, + * they can't affect the first query.. this hack should be + * replaced by something better sometime. -cim 11/2/89 + * ---------------- + */ +#undef EXEC_DEBUGVARIABLEFILE + +/* ---------------------------------------------------------------- + * #defines controlled by above definitions + * + * Note: most of these are "incomplete" because I didn't + * need the ones not defined. More should be added + * only as necessary -cim 10/26/89 + * ---------------------------------------------------------------- + */ +#define T_OR_F(b) (b ? "true" : "false") +#define NULL_OR_TUPLE(slot) (TupIsNull(slot) ? "null" : "a tuple") + + +/* #define EXEC_TUPLECOUNT - XXX take out for now for executor stubbing -- jolly*/ +/* ---------------- + * tuple count debugging defines + * ---------------- + */ +#ifdef EXEC_TUPLECOUNT +extern int NTupleProcessed; +extern int NTupleRetrieved; +extern int NTupleReplaced; +extern int NTupleAppended; +extern int NTupleDeleted; +extern int NIndexTupleProcessed; +extern int NIndexTupleInserted; + +#define IncrRetrieved() NTupleRetrieved++ +#define IncrAppended() NTupleAppended++ +#define IncrDeleted() NTupleDeleted++ +#define IncrReplaced() NTupleReplaced++ +#define IncrInserted() NTupleInserted++ +#define IncrProcessed() NTupleProcessed++ +#define IncrIndexProcessed() NIndexTupleProcessed++ +#define IncrIndexInserted() NIndexTupleInserted++ +#else +#define IncrRetrieved() +#define IncrAppended() +#define IncrDeleted() +#define IncrReplaced() +#define IncrInserted() +#define IncrProcessed() +#define IncrIndexProcessed() +#define IncrIndexInserted() +#endif /* EXEC_TUPLECOUNT */ + +/* ---------------- + * memory context debugging defines + * ---------------- + */ +#ifdef EXEC_CONTEXTDEBUG +#define CXT_printf(s) printf(s) +#define CXT1_printf(s, a) printf(s, a) +#else +#define CXT_printf(s) +#define CXT1_printf(s, a) +#endif /* EXEC_CONTEXTDEBUG */ + +/* ---------------- + * eutils debugging defines + * ---------------- + */ +#ifdef EXEC_UTILSDEBUG +#define EU_nodeDisplay(l) nodeDisplay(l, 0) +#define EU_printf(s) printf(s) +#define EU1_printf(s, a) printf(s, a) +#define EU4_printf(s, a, b, c, d) printf(s, a, b, c, d) +#else +#define EU_nodeDisplay(l) +#define EU_printf(s) +#define EU1_printf(s, a) +#define EU4_printf(s, a, b, c, d) +#endif /* EXEC_UTILSDEBUG */ + + +/* ---------------- + * nest loop debugging defines + * ---------------- + */ +#ifdef EXEC_NESTLOOPDEBUG +#define NL_nodeDisplay(l) nodeDisplay(l, 0) +#define NL_printf(s) printf(s) +#define NL1_printf(s, a) printf(s, a) +#define NL4_printf(s, a, b, c, d) printf(s, a, b, c, d) +#define ENL1_printf(message) printf("ExecNestLoop: %s\n", message) +#else +#define NL_nodeDisplay(l) +#define NL_printf(s) +#define NL1_printf(s, a) +#define NL4_printf(s, a, b, c, d) +#define ENL1_printf(message) +#endif /* EXEC_NESTLOOPDEBUG */ + +/* ---------------- + * proc node debugging defines + * ---------------- + */ +#ifdef EXEC_PROCDEBUG +#define PN_printf(s) printf(s) +#define PN1_printf(s, p) printf(s, p) +#else +#define PN_printf(s) +#define PN1_printf(s, p) +#endif /* EXEC_PROCDEBUG */ + +/* ---------------- + * exec eval / target list debugging defines + * ---------------- + */ +#ifdef EXEC_EVALDEBUG +#define EV_nodeDisplay(l) nodeDisplay(l, 0) +#define EV_printf(s) printf(s) +#define EV1_printf(s, a) printf(s, a) +#define EV5_printf(s, a, b, c, d, e) printf(s, a, b, c, d, e) +#else +#define EV_nodeDisplay(l) +#define EV_printf(s) +#define EV1_printf(s, a) +#define EV5_printf(s, a, b, c, d, e) +#endif /* EXEC_EVALDEBUG */ + +/* ---------------- + * scan debugging defines + * ---------------- + */ +#ifdef EXEC_SCANDEBUG +#define S_nodeDisplay(l) nodeDisplay(l, 0) +#define S_printf(s) printf(s) +#define S1_printf(s, p) printf(s, p) +#else +#define S_nodeDisplay(l) +#define S_printf(s) +#define S1_printf(s, p) +#endif /* EXEC_SCANDEBUG */ + +/* ---------------- + * sort node debugging defines + * ---------------- + */ +#ifdef EXEC_SORTDEBUG +#define SO_nodeDisplay(l) nodeDisplay(l, 0) +#define SO_printf(s) printf(s) +#define SO1_printf(s, p) printf(s, p) +#else +#define SO_nodeDisplay(l) +#define SO_printf(s) +#define SO1_printf(s, p) +#endif /* EXEC_SORTDEBUG */ + +/* ---------------- + * merge join debugging defines + * ---------------- + */ +#ifdef EXEC_MERGEJOINDEBUG +#define MJ_nodeDisplay(l) nodeDisplay(l, 0) +#define MJ_printf(s) printf(s) +#define MJ1_printf(s, p) printf(s, p) +#define MJ2_printf(s, p1, p2) printf(s, p1, p2) +#define MJ_debugtup(tuple, type) debugtup(tuple, type) +#define MJ_dump(context, state) ExecMergeTupleDump(econtext, state) +#define MJ_DEBUG_QUAL(clause, res) \ + MJ2_printf(" ExecQual(%s, econtext) returns %s\n", \ + CppAsString(clause), T_OR_F(res)); + +#define MJ_DEBUG_MERGE_COMPARE(qual, res) \ + MJ2_printf(" MergeCompare(mergeclauses, %s, ..) returns %s\n", \ + CppAsString(qual), T_OR_F(res)); + +#define MJ_DEBUG_PROC_NODE(slot) \ + MJ2_printf(" %s = ExecProcNode(innerPlan) returns %s\n", \ + CppAsString(slot), NULL_OR_TUPLE(slot)); +#else +#define MJ_nodeDisplay(l) +#define MJ_printf(s) +#define MJ1_printf(s, p) +#define MJ2_printf(s, p1, p2) +#define MJ_debugtup(tuple, type) +#define MJ_dump(context, state) +#define MJ_DEBUG_QUAL(clause, res) +#define MJ_DEBUG_MERGE_COMPARE(qual, res) +#define MJ_DEBUG_PROC_NODE(slot) +#endif /* EXEC_MERGEJOINDEBUG */ + +/* ---------------------------------------------------------------- + * DO NOT DEFINE THESE EVER OR YOU WILL BURN! + * ---------------------------------------------------------------- + */ +/* ---------------- + * DOESNOTWORK is currently placed around memory manager + * code that is known to cause problems. Code in between + * is likely not converted and probably won't work anyways. + * ---------------- + */ +#undef DOESNOTWORK + +/* ---------------- + * PERHAPSNEVER is placed around the "scan attribute" + * support code for the rule manager because for now we + * do things inefficiently. The correct solution to our + * problem is to add code to the parser/planner to save + * attribute information for the rule manager rather than + * have the executor have to grope through the entire plan + * for it so if we ever decide to make things better, + * we should probably delete the stuff in between PERHAPSNEVER.. + * ---------------- + */ +#undef PERHAPSNEVER + +/* ---------------- + * NOTYET is placed around any code not yet implemented + * in the executor. Only remove these when actually implementing + * said code. + * ---------------- + */ +#undef NOTYET + +extern long NDirectFileRead; +extern long NDirectFileWrite; + +#endif /* ExecDebugIncluded */ diff --git a/src/backend/executor/execdefs.h b/src/backend/executor/execdefs.h new file mode 100644 index 00000000000..5aec485c95c --- /dev/null +++ b/src/backend/executor/execdefs.h @@ -0,0 +1,54 @@ +/*------------------------------------------------------------------------- + * + * execdefs.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: execdefs.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECDEFS_H +#define EXECDEFS_H + +/* ---------------- + * executor scan direction definitions + * ---------------- + */ +#define EXEC_FRWD 1 /* Scan forward */ +#define EXEC_BKWD -1 /* Scan backward */ + +/* ---------------- + * ExecutePlan() tuplecount definitions + * ---------------- + */ +#define ALL_TUPLES 0 /* return all tuples */ +#define ONE_TUPLE 1 /* return only one tuple */ + +/* ---------------- + * constants used by ExecMain + * ---------------- + */ +#define EXEC_RUN 3 +#define EXEC_FOR 4 +#define EXEC_BACK 5 +#define EXEC_RETONE 6 +#define EXEC_RESULT 7 + +/* ---------------- + * Merge Join states + * ---------------- + */ +#define EXEC_MJ_INITIALIZE 1 +#define EXEC_MJ_JOINMARK 2 +#define EXEC_MJ_JOINTEST 3 +#define EXEC_MJ_JOINTUPLES 4 +#define EXEC_MJ_NEXTOUTER 5 +#define EXEC_MJ_TESTOUTER 6 +#define EXEC_MJ_NEXTINNER 7 +#define EXEC_MJ_SKIPINNER 8 +#define EXEC_MJ_SKIPOUTER 9 + +#endif /* EXECDEFS_H */ diff --git a/src/backend/executor/execdesc.h b/src/backend/executor/execdesc.h new file mode 100644 index 00000000000..54752625f55 --- /dev/null +++ b/src/backend/executor/execdesc.h @@ -0,0 +1,38 @@ +/*------------------------------------------------------------------------- + * + * execdesc.h-- + * plan and query descriptor accessor macros used by the executor + * and related modules. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: execdesc.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECDESC_H +#define EXECDESC_H + +#include "nodes/parsenodes.h" +#include "nodes/plannodes.h" +#include "tcop/dest.h" + +/* ---------------- + * query descriptor: + * a QueryDesc encapsulates everything that the executor + * needs to execute the query + * --------------------- + */ +typedef struct QueryDesc { + CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */ + Query *parsetree; + Plan *plantree; + CommandDest dest; /* the destination output of the execution */ +} QueryDesc; + +/* in pquery.c */ +extern QueryDesc *CreateQueryDesc(Query *parsetree, Plan *plantree, + CommandDest dest); + +#endif /* EXECDESC_H */ diff --git a/src/backend/executor/executor.h b/src/backend/executor/executor.h new file mode 100644 index 00000000000..65caf098f13 --- /dev/null +++ b/src/backend/executor/executor.h @@ -0,0 +1,229 @@ +/*------------------------------------------------------------------------- + * + * executor.h-- + * support for the POSTGRES executor module + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: executor.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECUTOR_H +#define EXECUTOR_H + +/* ---------------------------------------------------------------- + * #includes + * ---------------------------------------------------------------- + */ +#include +#include + +#include "postgres.h" +#include "nodes/pg_list.h" + +/* ---------------- + * executor debugging definitions are kept in a separate file + * so people can customize what debugging they want to see and not + * have this information clobbered every time a new version of + * executor.h is checked in -cim 10/26/89 + * ---------------- + */ +#include "executor/execdebug.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "access/istrat.h" +#include "access/itup.h" +#include "access/skey.h" +#include "utils/tqual.h" +#include "catalog/catname.h" +#include "utils/syscache.h" +#include "executor/execdefs.h" +#include "executor/tuptable.h" + +#include "nodes/parsenodes.h" + +#include "storage/buf.h" +#include "miscadmin.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/mcxt.h" +#include "utils/memutils.h" +#include "utils/rel.h" + +#include "catalog/pg_index.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "catalog/pg_aggregate.h" + +#include "access/printtup.h" +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/execnodes.h" + +#include "tcop/dest.h" +#include "storage/smgr.h" + +#include "access/genam.h" +#include "executor/execdesc.h" + +/* + * prototypes from functions in execAmi.c + */ +extern void ExecOpenScanR(Oid relOid, int nkeys, ScanKey skeys, bool isindex, + ScanDirection dir, TimeQual timeRange, + Relation *returnRelation, Pointer *returnScanDesc); +extern Relation ExecOpenR(Oid relationOid, bool isindex); +extern Pointer ExecBeginScan(Relation relation, int nkeys, ScanKey skeys, + bool isindex, ScanDirection dir, TimeQual time_range); +extern void ExecCloseR(Plan *node); +extern void ExecReScan(Plan *node, ExprContext *exprCtxt, Plan *parent); +extern HeapScanDesc ExecReScanR(Relation relDesc, HeapScanDesc scanDesc, + ScanDirection direction, int nkeys, ScanKey skeys); +extern void ExecMarkPos(Plan *node); +extern void ExecRestrPos(Plan *node); +extern Relation ExecCreatR(TupleDesc tupType, Oid relationOid); + +/* + * prototypes from functions in execJunk.c + */ +extern JunkFilter *ExecInitJunkFilter(List *targetList); +extern bool ExecGetJunkAttribute(JunkFilter *junkfilter, TupleTableSlot *slot, + char *attrName, Datum *value, bool *isNull); +extern HeapTuple ExecRemoveJunk(JunkFilter *junkfilter, TupleTableSlot *slot); + + +/* + * prototypes from functions in execMain.c + */ +extern TupleDesc ExecutorStart(QueryDesc *queryDesc, EState *estate); +extern TupleTableSlot* ExecutorRun(QueryDesc *queryDesc, EState *estate, int feature, int count); +extern void ExecutorEnd(QueryDesc *queryDesc, EState *estate); + +/* + * prototypes from functions in execProcnode.c + */ +extern bool ExecInitNode(Plan *node, EState *estate, Plan *parent); +extern TupleTableSlot *ExecProcNode(Plan *node, Plan *parent); +extern int ExecCountSlotsNode(Plan *node); +extern void ExecEndNode(Plan *node, Plan *parent); + +/* + * prototypes from functions in execQual.c + */ +extern bool execConstByVal; +extern int execConstLen; + +extern Datum ExecExtractResult(TupleTableSlot *slot, AttrNumber attnum, + bool *isNull); +extern Datum ExecEvalVar(Var *variable, ExprContext *econtext, bool *isNull); +extern Datum ExecEvalParam(Param *expression, ExprContext *econtext, + bool *isNull); +extern char *GetAttributeByNum(TupleTableSlot *slot, AttrNumber attrno, + bool *isNull); +extern char *att_by_num(TupleTableSlot *slot, AttrNumber attrno, + bool *isNull); +/* stop here */ +extern char *GetAttributeByName(TupleTableSlot *slot, char *attname, + bool *isNull); +extern char *att_by_name(TupleTableSlot *slot, char *attname, bool *isNull); +extern void ExecEvalFuncArgs(FunctionCachePtr fcache, ExprContext *econtext, + List *argList, Datum argV[], bool *argIsDone); +extern Datum ExecMakeFunctionResult(Node *node, List *arguments, + ExprContext *econtext, bool *isNull, bool *isDone); +extern Datum ExecEvalOper(Expr *opClause, ExprContext *econtext, + bool *isNull); +extern Datum ExecEvalFunc(Expr *funcClause, ExprContext *econtext, + bool *isNull, bool *isDone); +extern Datum ExecEvalNot(Expr *notclause, ExprContext *econtext, bool *isNull); +extern Datum ExecEvalOr(Expr *orExpr, ExprContext *econtext, bool *isNull); +extern Datum ExecEvalAnd(Expr *andExpr, ExprContext *econtext, bool *isNull); +extern Datum ExecEvalExpr(Node *expression, ExprContext *econtext, bool *isNull, + bool *isDone); +extern bool ExecQualClause(Node *clause, ExprContext *econtext); +extern bool ExecQual(List *qual, ExprContext *econtext); +extern int ExecTargetListLength(List *targetlist); +extern TupleTableSlot *ExecProject(ProjectionInfo *projInfo, bool *isDone); + +/* + * prototypes from functions in execScan.c + */ +extern TupleTableSlot *ExecScan(Scan *node, TupleTableSlot* (*accessMtd)()); + +/* + * prototypes from functions in execTuples.c + */ +extern TupleTable ExecCreateTupleTable(int initialSize); +extern void ExecDestroyTupleTable(TupleTable table, bool shouldFree); +extern TupleTableSlot* ExecAllocTableSlot(TupleTable table); +extern TupleTableSlot* ExecStoreTuple(HeapTuple tuple, + TupleTableSlot *slot, + Buffer buffer, + bool shouldFree); +extern TupleTableSlot* ExecClearTuple(TupleTableSlot* slot); +extern bool ExecSlotPolicy(TupleTableSlot *slot); +extern bool ExecSetSlotPolicy(TupleTableSlot *slot, bool shouldFree); +extern TupleDesc ExecSetSlotDescriptor(TupleTableSlot *slot, + TupleDesc tupdesc); +extern void ExecSetSlotDescriptorIsNew(TupleTableSlot *slot, bool isNew); +extern TupleDesc ExecSetNewSlotDescriptor(TupleTableSlot *slot, + TupleDesc tupdesc); +extern Buffer ExecSetSlotBuffer(TupleTableSlot *slot, Buffer b); +extern void ExecIncrSlotBufferRefcnt(TupleTableSlot *slot); +extern bool TupIsNull(TupleTableSlot* slot); +extern bool ExecSlotDescriptorIsNew(TupleTableSlot *slot); +extern void ExecInitResultTupleSlot(EState *estate, CommonState *commonstate); +extern void ExecInitScanTupleSlot(EState *estate, + CommonScanState *commonscanstate); +extern void ExecInitMarkedTupleSlot(EState *estate, MergeJoinState *mergestate); +extern void ExecInitOuterTupleSlot(EState *estate, HashJoinState *hashstate); +extern void ExecInitHashTupleSlot(EState *estate, HashJoinState *hashstate); +extern TupleTableSlot *NodeGetResultTupleSlot(Plan *node); + +extern TupleDesc ExecGetTupType(Plan *node); +extern TupleDesc ExecTypeFromTL(List *targetList); + +/* + * prototypes from functions in execTuples.c + */ +extern void ResetTupleCount(); +extern void DisplayTupleCount(FILE *statfp); +extern void ExecAssignNodeBaseInfo(EState *estate, CommonState *basenode, + Plan *parent); +extern void ExecAssignExprContext(EState *estate, CommonState *commonstate); +extern void ExecAssignResultType(CommonState *commonstate, + TupleDesc tupDesc); +extern void ExecAssignResultTypeFromOuterPlan(Plan *node, + CommonState *commonstate); +extern void ExecAssignResultTypeFromTL(Plan *node, CommonState *commonstate); +extern TupleDesc ExecGetResultType(CommonState *commonstate); +extern void ExecFreeResultType(CommonState *commonstate); +extern void ExecAssignProjectionInfo(Plan *node, CommonState *commonstate); +extern void ExecFreeProjectionInfo(CommonState *commonstate); +extern TupleDesc ExecGetScanType(CommonScanState *csstate); +extern void ExecFreeScanType(CommonScanState *csstate); +extern void ExecAssignScanType(CommonScanState *csstate, + TupleDesc tupDesc); +extern void ExecAssignScanTypeFromOuterPlan(Plan *node, + CommonScanState *csstate); +extern AttributeTupleForm ExecGetTypeInfo(Relation relDesc); + +extern void ExecGetIndexKeyInfo(IndexTupleForm indexTuple, int *numAttsOutP, + AttrNumber **attsOutP, FuncIndexInfoPtr fInfoP); +extern void ExecOpenIndices(Oid resultRelationOid, + RelationInfo *resultRelationInfo); +extern void ExecCloseIndices(RelationInfo *resultRelationInfo); +extern IndexTuple ExecFormIndexTuple(HeapTuple heapTuple, + Relation heapRelation, Relation indexRelation, IndexInfo *indexInfo); +extern void ExecInsertIndexTuples(TupleTableSlot *slot, ItemPointer tupleid, + EState *estate); + + +/* ---------------------------------------------------------------- + * the end + * ---------------------------------------------------------------- + */ + +#endif /* EXECUTOR_H */ diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c new file mode 100644 index 00000000000..2f6e29d8277 --- /dev/null +++ b/src/backend/executor/functions.c @@ -0,0 +1,388 @@ +/*------------------------------------------------------------------------- + * + * functions.c-- + * Routines to handle functions called from the executor + * Putting this stuff in fmgr makes the postmaster a mess.... + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/functions.c,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" + +#include "catalog/pg_proc.h" +#include "parser/parse_query.h" +#include "tcop/pquery.h" +#include "tcop/tcopprot.h" +#include "nodes/params.h" +#include "fmgr.h" +#include "utils/fcache.h" +#include "utils/datum.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/syscache.h" +#include "catalog/pg_language.h" +#include "access/heapam.h" +#include "access/xact.h" +#include "executor/executor.h" +#include "executor/functions.h" + +#undef new + +typedef enum {F_EXEC_START, F_EXEC_RUN, F_EXEC_DONE} ExecStatus; + +typedef struct local_es { + QueryDesc *qd; + EState *estate; + struct local_es *next; + ExecStatus status; +} execution_state; + +#define LAST_POSTQUEL_COMMAND(es) ((es)->next == (execution_state *)NULL) + +/* non-export function prototypes */ +static TupleDesc postquel_start(execution_state *es); +static execution_state *init_execution_state(FunctionCachePtr fcache, + char *args[]); +static TupleTableSlot *postquel_getnext(execution_state *es); +static void postquel_end(execution_state *es); +static void postquel_sub_params(execution_state *es, int nargs, + char *args[], bool *nullV); +static Datum postquel_execute(execution_state *es, FunctionCachePtr fcache, + List *fTlist, char **args, bool *isNull); + + +Datum +ProjectAttribute(TupleDesc TD, + TargetEntry *tlist, + HeapTuple tup, + bool *isnullP) +{ + Datum val,valueP; + Var *attrVar = (Var *)tlist->expr; + AttrNumber attrno = attrVar->varattno; + + + val = PointerGetDatum(heap_getattr(tup, + InvalidBuffer, + attrno, + TD, + isnullP)); + if (*isnullP) + return (Datum) NULL; + + valueP = datumCopy(val, + TD->attrs[attrno-1]->atttypid, + TD->attrs[attrno-1]->attbyval, + (Size) TD->attrs[attrno-1]->attlen); + return valueP; +} + +static execution_state * +init_execution_state(FunctionCachePtr fcache, + char *args[]) +{ + execution_state *newes; + execution_state *nextes; + execution_state *preves; + QueryTreeList *queryTree_list; + int i; + List *planTree_list; + int nargs; + + nargs = fcache->nargs; + + newes = (execution_state *) palloc(sizeof(execution_state)); + nextes = newes; + preves = (execution_state *)NULL; + + + planTree_list = (List *) + pg_plan(fcache->src, fcache->argOidVect, nargs, &queryTree_list, None); + + for (i=0; i < queryTree_list->len; i++) { + EState *estate; + Query *queryTree = (Query*) (queryTree_list->qtrees[i]); + Plan *planTree = lfirst(planTree_list); + + if (!nextes) + nextes = (execution_state *) palloc(sizeof(execution_state)); + if (preves) + preves->next = nextes; + + nextes->next = NULL; + nextes->status = F_EXEC_START; + nextes->qd = CreateQueryDesc(queryTree, + planTree, + None); + estate = CreateExecutorState(); + + if (nargs > 0) { + int i; + ParamListInfo paramLI; + + paramLI = + (ParamListInfo)palloc((nargs+1)*sizeof(ParamListInfoData)); + + memset(paramLI, 0, nargs*sizeof(ParamListInfoData)); + + estate->es_param_list_info = paramLI; + + for (i=0; ikind = PARAM_NUM; + paramLI->id = i+1; + paramLI->isnull = false; + paramLI->value = (Datum) NULL; + } + paramLI->kind = PARAM_INVALID; + } + else + estate->es_param_list_info = (ParamListInfo)NULL; + nextes->estate = estate; + preves = nextes; + nextes = (execution_state *)NULL; + + planTree_list = lnext(planTree_list); + } + + return newes; +} + +static TupleDesc +postquel_start(execution_state *es) +{ + return ExecutorStart(es->qd, es->estate); +} + +static TupleTableSlot * +postquel_getnext(execution_state *es) +{ + int feature; + + feature = (LAST_POSTQUEL_COMMAND(es)) ? EXEC_RETONE : EXEC_RUN; + + return ExecutorRun(es->qd, es->estate, feature, 0); +} + +static void +postquel_end(execution_state *es) +{ + ExecutorEnd(es->qd, es->estate); +} + +static void +postquel_sub_params(execution_state *es, + int nargs, + char *args[], + bool *nullV) +{ + ParamListInfo paramLI; + EState *estate; + + estate = es->estate; + paramLI = estate->es_param_list_info; + + while (paramLI->kind != PARAM_INVALID) { + if (paramLI->kind == PARAM_NUM) { + Assert(paramLI->id <= nargs); + paramLI->value = (Datum)args[(paramLI->id - 1)]; + paramLI->isnull = nullV[(paramLI->id - 1)]; + } + paramLI++; + } +} + +static TupleTableSlot * +copy_function_result(FunctionCachePtr fcache, + TupleTableSlot *resultSlot) +{ + TupleTableSlot *funcSlot; + TupleDesc resultTd; + HeapTuple newTuple; + HeapTuple oldTuple; + + Assert(! TupIsNull(resultSlot)); + oldTuple = resultSlot->val; + + funcSlot = (TupleTableSlot*)fcache->funcSlot; + + if (funcSlot == (TupleTableSlot*)NULL) + return resultSlot; + + resultTd = resultSlot->ttc_tupleDescriptor; + /* + * When the funcSlot is NULL we have to initialize the funcSlot's + * tuple descriptor. + */ + if (TupIsNull(funcSlot)) { + int i= 0; + TupleDesc funcTd = funcSlot->ttc_tupleDescriptor; + + while (i < oldTuple->t_natts) { + funcTd->attrs[i] = + (AttributeTupleForm)palloc(ATTRIBUTE_TUPLE_SIZE); + memmove(funcTd->attrs[i], + resultTd->attrs[i], + ATTRIBUTE_TUPLE_SIZE); + i++; + } + } + + newTuple = heap_copytuple(oldTuple); + + return ExecStoreTuple(newTuple,funcSlot,InvalidBuffer,true); +} + +static Datum +postquel_execute(execution_state *es, + FunctionCachePtr fcache, + List *fTlist, + char **args, + bool *isNull) +{ + TupleTableSlot *slot; + Datum value; + + if (es->status == F_EXEC_START) + { + (void) postquel_start(es); + es->status = F_EXEC_RUN; + } + + if (fcache->nargs > 0) + postquel_sub_params(es, fcache->nargs, args, fcache->nullVect); + + slot = postquel_getnext(es); + + if (TupIsNull(slot)) { + postquel_end(es); + es->status = F_EXEC_DONE; + *isNull = true; + /* + * If this isn't the last command for the function + * we have to increment the command + * counter so that subsequent commands can see changes made + * by previous ones. + */ + if (!LAST_POSTQUEL_COMMAND(es)) CommandCounterIncrement(); + return (Datum)NULL; + } + + if (LAST_POSTQUEL_COMMAND(es)) { + TupleTableSlot *resSlot; + + /* + * Copy the result. copy_function_result is smart enough + * to do nothing when no action is called for. This helps + * reduce the logic and code redundancy here. + */ + resSlot = copy_function_result(fcache, slot); + if (fTlist != NIL) { + HeapTuple tup; + TargetEntry *tle = lfirst(fTlist); + + tup = resSlot->val; + value = ProjectAttribute(resSlot->ttc_tupleDescriptor, + tle, + tup, + isNull); + }else { + value = (Datum)resSlot; + *isNull = false; + } + + /* + * If this is a single valued function we have to end the + * function execution now. + */ + if (fcache->oneResult) { + postquel_end(es); + es->status = F_EXEC_DONE; + } + + return value; + } + /* + * If this isn't the last command for the function, we don't + * return any results, but we have to increment the command + * counter so that subsequent commands can see changes made + * by previous ones. + */ + CommandCounterIncrement(); + return (Datum)NULL; +} + +Datum +postquel_function(Func *funcNode, char **args, bool *isNull, bool *isDone) +{ + execution_state *es; + Datum result; + FunctionCachePtr fcache = funcNode->func_fcache; + + es = (execution_state *) fcache->func_state; + if (es == NULL) + { + es = init_execution_state(fcache, args); + fcache->func_state = (char *) es; + } + + while (es && es->status == F_EXEC_DONE) + es = es->next; + + Assert(es); + /* + * Execute each command in the function one after another until we're + * executing the final command and get a result or we run out of + * commands. + */ + while (es != (execution_state *)NULL) + { + result = postquel_execute(es, + fcache, + funcNode->func_tlist, + args, + isNull); + if (es->status != F_EXEC_DONE) + break; + es = es->next; + } + + /* + * If we've gone through every command in this function, we are done. + */ + if (es == (execution_state *)NULL) + { + /* + * Reset the execution states to start over again + */ + es = (execution_state *)fcache->func_state; + while (es) + { + es->status = F_EXEC_START; + es = es->next; + } + /* + * Let caller know we're finished. + */ + *isDone = true; + return (fcache->oneResult) ? result : (Datum)NULL; + } + /* + * If we got a result from a command within the function it has + * to be the final command. All others shouldn't be returing + * anything. + */ + Assert ( LAST_POSTQUEL_COMMAND(es) ); + *isDone = false; + + return result; +} diff --git a/src/backend/executor/functions.h b/src/backend/executor/functions.h new file mode 100644 index 00000000000..1a1a88b36a1 --- /dev/null +++ b/src/backend/executor/functions.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * functions.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: functions.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef FUNCTIONS_H +#define FUNCTIONS_H + +extern Datum ProjectAttribute(TupleDesc TD, TargetEntry *tlist, + HeapTuple tup, bool *isnullP); + +extern Datum postquel_function(Func *funcNode, char **args, + bool *isNull, bool *isDone); + +#endif /* FUNCTIONS_H */ diff --git a/src/backend/executor/hashjoin.h b/src/backend/executor/hashjoin.h new file mode 100644 index 00000000000..e7ae086fe16 --- /dev/null +++ b/src/backend/executor/hashjoin.h @@ -0,0 +1,82 @@ +/*------------------------------------------------------------------------- + * + * hashjoin.h-- + * internal structures for hash table and buckets + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: hashjoin.h,v 1.1.1.1 1996/07/09 06:21:25 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HASHJOIN_H +#define HASHJOIN_H + +#include "access/htup.h" +#include "storage/ipc.h" + +/* ----------------- + * have to use relative address as pointers in the hashtable + * because the hashtable may reallocate in difference processes + * ----------------- + */ +typedef int RelativeAddr; + +/* ------------------ + * the relative addresses are always relative to the head of the + * hashtable, the following macro converts them to absolute address. + * ------------------ + */ +#define ABSADDR(X) ((X) < 0 ? NULL: (char*)hashtable + X) +#define RELADDR(X) (RelativeAddr)((char*)(X) - (char*)hashtable) + +typedef char **charPP; +typedef int *intP; + +/* ---------------------------------------------------------------- + * hash-join hash table structures + * ---------------------------------------------------------------- + */ +typedef struct HashTableData { + int nbuckets; + int totalbuckets; + int bucketsize; + IpcMemoryId shmid; + RelativeAddr top; /* char* */ + RelativeAddr bottom; /* char* */ + RelativeAddr overflownext; /* char* */ + RelativeAddr batch; /* char* */ + RelativeAddr readbuf; /* char* */ + int nbatch; + RelativeAddr outerbatchNames; /* RelativeAddr* */ + RelativeAddr outerbatchPos; /* RelativeAddr* */ + RelativeAddr innerbatchNames; /* RelativeAddr* */ + RelativeAddr innerbatchPos; /* RelativeAddr* */ + RelativeAddr innerbatchSizes; /* int* */ + int curbatch; + int nprocess; + int pcount; +} HashTableData; /* real hash table follows here */ + +typedef HashTableData *HashJoinTable; + +typedef struct OverflowTupleData { + RelativeAddr tuple; /* HeapTuple */ + RelativeAddr next; /* struct OverflowTupleData * */ +} OverflowTupleData; /* real tuple follows here */ + +typedef OverflowTupleData *OverflowTuple; + +typedef struct HashBucketData { + RelativeAddr top; /* HeapTuple */ + RelativeAddr bottom; /* HeapTuple */ + RelativeAddr firstotuple; /* OverflowTuple */ + RelativeAddr lastotuple; /* OverflowTuple */ +} HashBucketData; /* real bucket follows here */ + +typedef HashBucketData *HashBucket; + +#define HASH_PERMISSION 0700 + +#endif /* HASHJOIN_H */ diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c new file mode 100644 index 00000000000..ee187367c74 --- /dev/null +++ b/src/backend/executor/nodeAgg.c @@ -0,0 +1,558 @@ +/*------------------------------------------------------------------------- + * + * nodeAgg.c-- + * Routines to handle aggregate nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * NOTE + * The implementation of Agg node has been reworked to handle legal + * SQL aggregates. (Do not expect POSTQUEL semantics.) -- ay 2/95 + * + * IDENTIFICATION + * /usr/local/devel/pglite/cvs/src/backend/executor/nodeAgg.c,v 1.13 1995/08/01 20:19:07 jolly Exp + * + *------------------------------------------------------------------------- + */ +#include "access/heapam.h" +#include "catalog/pg_aggregate.h" +#include "catalog/catalog.h" +#include "executor/executor.h" +#include "executor/nodeAgg.h" +#include "storage/bufmgr.h" +#include "utils/palloc.h" +#include "parser/catalog_utils.h" + +/* + * AggFuncInfo - + * keeps the transition functions information around + */ +typedef struct AggFuncInfo { + Oid xfn1_oid; + Oid xfn2_oid; + Oid finalfn_oid; + func_ptr xfn1; + func_ptr xfn2; + func_ptr finalfn; + int xfn1_nargs; + int xfn2_nargs; + int finalfn_nargs; +} AggFuncInfo; + +static Datum aggGetAttr(TupleTableSlot *tuple, Aggreg *agg, bool *isNull); + + +/* --------------------------------------- + * + * ExecAgg - + * + * ExecAgg receives tuples from its outer subplan and aggregates over + * the appropriate attribute for each (unique) aggregate in the target + * list. (The number of tuples to aggregate over depends on whether a + * GROUP BY clause is present. It might be the number of tuples in a + * group or all the tuples that satisfy the qualifications.) The value of + * each aggregate is stored in the expression context for ExecProject to + * evaluate the result tuple. + * + * ExecAgg evaluates each aggregate in the following steps: (initcond1, + * initcond2 are the initial values and sfunc1, sfunc2, and finalfunc are + * the transition functions.) + * + * value1[i] = initcond1 + * value2[i] = initcond2 + * forall tuples do + * value1[i] = sfunc1(aggregate_attribute, value1[i]) + * value2[i] = sfunc2(value2[i]) + * value1[i] = finalfunc(value1[i], value2[i]) + * + * If the outer subplan is a Group node, ExecAgg returns as many tuples + * as there are groups. + * + * XXX handling of NULL doesn't work + * + * OLD COMMENTS + * + * XXX Aggregates should probably have another option: what to do + * with transfn2 if we hit a null value. "count" (transfn1 = null, + * transfn2 = increment) will want to have transfn2 called; "avg" + * (transfn1 = add, transfn2 = increment) will not. -pma 1/3/93 + * + * ------------------------------------------ + */ +TupleTableSlot * +ExecAgg(Agg *node) +{ + AggState *aggstate; + EState *estate; + Aggreg **aggregates; + Plan *outerPlan; + int i, nagg; + Datum *value1, *value2; + int *noInitValue; + AggFuncInfo *aggFuncInfo; + long nTuplesAgged = 0; + ExprContext *econtext; + ProjectionInfo *projInfo; + TupleTableSlot *resultSlot; + HeapTuple oneTuple; + char* nulls; + bool isDone; + bool isNull = FALSE, isNull1 = FALSE, isNull2 = FALSE; + + /* --------------------- + * get state info from node + * --------------------- + */ + aggstate = node->aggstate; + if (aggstate->agg_done) + return NULL; + + estate = node->plan.state; + econtext = aggstate->csstate.cstate.cs_ExprContext; + aggregates = node->aggs; + nagg = node->numAgg; + + value1 = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_values; + nulls = node->aggstate->csstate.cstate.cs_ExprContext->ecxt_nulls; + + value2 = (Datum *)palloc(sizeof(Datum) * nagg); + memset(value2, 0, sizeof(Datum) * nagg); + + aggFuncInfo = (AggFuncInfo *)palloc(sizeof(AggFuncInfo) * nagg); + memset(aggFuncInfo, 0, sizeof(AggFuncInfo) * nagg); + + noInitValue = (int *)palloc(sizeof(int) * nagg); + memset(noInitValue, 0, sizeof(noInitValue) * nagg); + + outerPlan = outerPlan(node); + oneTuple = NULL; + + projInfo = aggstate->csstate.cstate.cs_ProjInfo; + + for(i = 0; i < nagg; i++) { + Aggreg *agg; + char *aggname; + HeapTuple aggTuple; + Form_pg_aggregate aggp; + Oid xfn1_oid, xfn2_oid, finalfn_oid; + func_ptr xfn1_ptr, xfn2_ptr, finalfn_ptr; + int xfn1_nargs, xfn2_nargs, finalfn_nargs; + + agg = aggregates[i]; + + /* --------------------- + * find transfer functions of all the aggregates and initialize + * their initial values + * --------------------- + */ + aggname = agg->aggname; + aggTuple = SearchSysCacheTuple(AGGNAME, + PointerGetDatum(aggname), + ObjectIdGetDatum(agg->basetype), + 0,0); + if (!HeapTupleIsValid(aggTuple)) + elog(WARN, "ExecAgg: cache lookup failed for aggregate \"%s\"(%s)", + aggname, + tname(get_id_type(agg->basetype))); + aggp = (Form_pg_aggregate) GETSTRUCT(aggTuple); + + xfn1_oid = aggp->aggtransfn1; + xfn2_oid = aggp->aggtransfn2; + finalfn_oid = aggp->aggfinalfn; + + if (OidIsValid(finalfn_oid)) { + fmgr_info(finalfn_oid, &finalfn_ptr, &finalfn_nargs); + aggFuncInfo[i].finalfn_oid = finalfn_oid; + aggFuncInfo[i].finalfn = finalfn_ptr; + aggFuncInfo[i].finalfn_nargs = finalfn_nargs; + } + + if (OidIsValid(xfn2_oid)) { + fmgr_info(xfn2_oid, &xfn2_ptr, &xfn2_nargs); + aggFuncInfo[i].xfn2_oid = xfn2_oid; + aggFuncInfo[i].xfn2 = xfn2_ptr; + aggFuncInfo[i].xfn2_nargs = xfn2_nargs; + value2[i] = (Datum)AggNameGetInitVal((char*)aggname, + aggp->aggbasetype, + 2, + &isNull2); + /* ------------------------------------------ + * If there is a second transition function, its initial + * value must exist -- as it does not depend on data values, + * we have no other way of determining an initial value. + * ------------------------------------------ + */ + if (isNull2) + elog(WARN, "ExecAgg: agginitval2 is null"); + } + + if (OidIsValid(xfn1_oid)) { + fmgr_info(xfn1_oid, &xfn1_ptr, &xfn1_nargs); + aggFuncInfo[i].xfn1_oid = xfn1_oid; + aggFuncInfo[i].xfn1 = xfn1_ptr; + aggFuncInfo[i].xfn1_nargs = xfn1_nargs; + value1[i] = (Datum)AggNameGetInitVal((char*)aggname, + aggp->aggbasetype, + 1, + &isNull1); + + /* ------------------------------------------ + * If the initial value for the first transition function + * doesn't exist in the pg_aggregate table then we let + * the first value returned from the outer procNode become + * the initial value. (This is useful for aggregates like + * max{} and min{}.) + * ------------------------------------------ + */ + if (isNull1) { + noInitValue[i] = 1; + nulls[i] = 1; + } + } + } + + /* ---------------- + * for each tuple from the the outer plan, apply all the aggregates + * ---------------- + */ + for (;;) { + HeapTuple outerTuple = NULL; + TupleTableSlot *outerslot; + + isNull = isNull1 = isNull2 = 0; + outerslot = ExecProcNode(outerPlan, (Plan*)node); + if (outerslot) outerTuple = outerslot->val; + if (!HeapTupleIsValid(outerTuple)) { + /* when the outerplan doesn't return a single tuple, + create a dummy heaptuple anyway + because we still need to return a valid aggregate value. + The value returned will be the initial values of the + transition functions */ + if (nTuplesAgged == 0) { + TupleDesc tupType; + Datum *tupValue; + char* null_array; + + tupType = aggstate->csstate.css_ScanTupleSlot->ttc_tupleDescriptor; + tupValue = projInfo->pi_tupValue; + + /* initially, set all the values to NULL */ + null_array = malloc(nagg); + for (i=0;ixfn1) { + if (noInitValue[i]) { + /* + * value1 and value2 has not been initialized. This + * is the first non-NULL value. We use it as the + * initial value. + */ + /* but we can't just use it straight, we have + to make a copy of it since the tuple from which + it came will be freed on the next iteration + of the scan */ + attnum = ((Var*)aggregates[i]->target)->varattno; + attlen = outerslot->ttc_tupleDescriptor->attrs[attnum-1]->attlen; + if (attlen == -1) { + /* variable length */ + attlen = VARSIZE((struct varlena*) newVal); + } + value1[i] = (Datum)palloc(attlen); + if (outerslot->ttc_tupleDescriptor->attrs[attnum-1]->attbyval) + value1[i] = newVal; + else + memmove((char*) (value1[i]), (char*) (newVal), attlen); + /* value1[i] = newVal; */ + noInitValue[i] = 0; + nulls[i] = 0; + } else { + /* + * apply the transition functions. + */ + args[0] = value1[i]; + args[1] = newVal; + value1[i] = + (Datum)fmgr_c(aggfns->xfn1, aggfns->xfn1_oid, + aggfns->xfn1_nargs, (FmgrValues *)args, + &isNull1); + Assert(!isNull1); + } + } + + if (aggfns->xfn2) { + Datum xfn2_val = value2[i]; + + value2[i] = + (Datum)fmgr_c(aggfns->xfn2, aggfns->xfn2_oid, + aggfns->xfn2_nargs, + (FmgrValues *)&xfn2_val, &isNull2); + Assert(!isNull2); + } + } + + /* + * keep this for the projection (we only need one of these - + * all the tuples we aggregate over share the same group column) + */ + if (!oneTuple) { + oneTuple = heap_copytuple(outerslot->val); + } + + nTuplesAgged++; + } + + /* -------------- + * finalize the aggregate (if necessary), and get the resultant value + * -------------- + */ + for(i = 0; i < nagg; i++) { + char *args[2]; + AggFuncInfo *aggfns = &aggFuncInfo[i]; + + if (aggfns->finalfn && nTuplesAgged > 0) { + if (aggfns->finalfn_nargs > 1) { + args[0] = (char*)value1[i]; + args[1] = (char*)value2[i]; + } else if (aggfns->xfn1) { + args[0] = (char*)value1[i]; + } else if (aggfns->xfn2) { + args[0] = (char*)value2[i]; + } else + elog(WARN, "ExecAgg: no valid transition functions??"); + value1[i] = + (Datum)fmgr_c(aggfns->finalfn, aggfns->finalfn_oid, + aggfns->finalfn_nargs, (FmgrValues *) args, + &(nulls[i])); + } else if (aggfns->xfn1) { + /* + * value in the right place, ignore. (If you remove this + * case, fix the else part. -ay 2/95) + */ + } else if (aggfns->xfn2) { + value1[i] = value2[i]; + } else + elog(WARN, "ExecAgg: no valid transition functions??"); + } + + /* + * whether the aggregation is done depends on whether we are doing + * aggregation over groups or the entire table + */ + if (nodeTag(outerPlan)==T_Group) { + /* aggregation over groups */ + aggstate->agg_done = ((Group*)outerPlan)->grpstate->grp_done; + } else { + aggstate->agg_done = TRUE; + } + + /* ---------------- + * form a projection tuple, store it in the result tuple + * slot and return it. + * ---------------- + */ + ExecStoreTuple(oneTuple, + aggstate->csstate.css_ScanTupleSlot, + InvalidBuffer, + false); + econtext->ecxt_scantuple = aggstate->csstate.css_ScanTupleSlot; + resultSlot = ExecProject(projInfo, &isDone); + + if (oneTuple) + pfree(oneTuple); + + return resultSlot; +} + +/* ----------------- + * ExecInitAgg + * + * Creates the run-time information for the agg node produced by the + * planner and initializes its outer subtree + * ----------------- + */ +bool +ExecInitAgg(Agg *node, EState *estate, Plan *parent) +{ + AggState *aggstate; + Plan *outerPlan; + ExprContext *econtext; + + /* + * assign the node's execution state + */ + node->plan.state = estate; + + /* + * create state structure + */ + aggstate = makeNode(AggState); + node->aggstate = aggstate; + aggstate->agg_done = FALSE; + + /* + * assign node's base id and create expression context + */ + ExecAssignNodeBaseInfo(estate, &aggstate->csstate.cstate, + (Plan*) parent); + ExecAssignExprContext(estate, &aggstate->csstate.cstate); + +#define AGG_NSLOTS 2 + /* + * tuple table initialization + */ + ExecInitScanTupleSlot(estate, &aggstate->csstate); + ExecInitResultTupleSlot(estate, &aggstate->csstate.cstate); + + econtext = aggstate->csstate.cstate.cs_ExprContext; + econtext->ecxt_values = + (Datum *)palloc(sizeof(Datum) * node->numAgg); + memset(econtext->ecxt_values, 0, sizeof(Datum) * node->numAgg); + econtext->ecxt_nulls = (char *)palloc(node->numAgg); + memset(econtext->ecxt_nulls, 0, node->numAgg); + + /* + * initializes child nodes + */ + outerPlan = outerPlan(node); + ExecInitNode(outerPlan, estate, (Plan *)node); + + /* ---------------- + * initialize tuple type. + * ---------------- + */ + ExecAssignScanTypeFromOuterPlan((Plan *) node, &aggstate->csstate); + + /* + * Initialize tuple type for both result and scan. + * This node does no projection + */ + ExecAssignResultTypeFromTL((Plan*) node, &aggstate->csstate.cstate); + ExecAssignProjectionInfo((Plan*)node, &aggstate->csstate.cstate); + + return TRUE; +} + +int +ExecCountSlotsAgg(Agg *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + AGG_NSLOTS; +} + +/* ------------------------ + * ExecEndAgg(node) + * + * ----------------------- + */ +void +ExecEndAgg(Agg *node) +{ + AggState *aggstate; + Plan *outerPlan; + + aggstate = node->aggstate; + + ExecFreeProjectionInfo(&aggstate->csstate.cstate); + + outerPlan = outerPlan(node); + ExecEndNode(outerPlan, (Plan*)node); + + /* clean up tuple table */ + ExecClearTuple(aggstate->csstate.css_ScanTupleSlot); +} + + +/***************************************************************************** + * Support Routines + *****************************************************************************/ + +/* + * aggGetAttr - + * get the attribute (specified in the Var node in agg) to aggregate + * over from the tuple + */ +static Datum +aggGetAttr(TupleTableSlot *slot, + Aggreg *agg, + bool *isNull) +{ + Datum result; + AttrNumber attnum; + HeapTuple heapTuple; + TupleDesc tuple_type; + Buffer buffer; + + /* ---------------- + * extract tuple information from the slot + * ---------------- + */ + heapTuple = slot->val; + tuple_type = slot->ttc_tupleDescriptor; + buffer = slot->ttc_buffer; + + attnum = ((Var*)agg->target)->varattno; + + /* + * If the attribute number is invalid, then we are supposed to + * return the entire tuple, we give back a whole slot so that + * callers know what the tuple looks like. + */ + if (attnum == InvalidAttrNumber) { + TupleTableSlot *tempSlot; + TupleDesc td; + HeapTuple tup; + + tempSlot = makeNode(TupleTableSlot); + tempSlot->ttc_shouldFree = false; + tempSlot->ttc_descIsNew = true; + tempSlot->ttc_tupleDescriptor = (TupleDesc)NULL, + tempSlot->ttc_buffer = InvalidBuffer; + tempSlot->ttc_whichplan = -1; + + tup = heap_copytuple(slot->val); + td = CreateTupleDescCopy(slot->ttc_tupleDescriptor); + + ExecSetSlotDescriptor(tempSlot, td); + + ExecStoreTuple(tup, tempSlot, InvalidBuffer, true); + return (Datum) tempSlot; + } + + result = (Datum) + heap_getattr(heapTuple, /* tuple containing attribute */ + buffer, /* buffer associated with tuple */ + attnum, /* attribute number of desired attribute */ + tuple_type, /* tuple descriptor of tuple */ + isNull); /* return: is attribute null? */ + + /* ---------------- + * return null if att is null + * ---------------- + */ + if (*isNull) + return (Datum) NULL; + + return result; +} diff --git a/src/backend/executor/nodeAgg.h b/src/backend/executor/nodeAgg.h new file mode 100644 index 00000000000..51c2b2b2270 --- /dev/null +++ b/src/backend/executor/nodeAgg.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * nodeAgg.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeAgg.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEAGG_H +#define NODEAGG_H + +extern TupleTableSlot *ExecAgg(Agg *node); +extern bool ExecInitAgg(Agg *node, EState *estate, Plan *parent); +extern int ExecCountSlotsAgg(Agg *node); +extern void ExecEndAgg(Agg *node); + +#endif /* NODEAGG_H */ diff --git a/src/backend/executor/nodeAppend.c b/src/backend/executor/nodeAppend.c new file mode 100644 index 00000000000..0a6cd5d01bb --- /dev/null +++ b/src/backend/executor/nodeAppend.c @@ -0,0 +1,483 @@ +/*------------------------------------------------------------------------- + * + * nodeAppend.c-- + * routines to handle append nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* INTERFACE ROUTINES + * ExecInitAppend - initialize the append node + * ExecProcAppend - retrieve the next tuple from the node + * ExecEndAppend - shut down the append node + * + * NOTES + * Each append node contains a list of one or more subplans which + * must be iteratively processed (forwards or backwards). + * Tuples are retrieved by executing the 'whichplan'th subplan + * until the subplan stops returning tuples, at which point that + * plan is shut down and the next started up. + * + * Append nodes don't make use of their left and right + * subtrees, rather they maintain a list of subplans so + * a typical append node looks like this in the plan tree: + * + * ... + * / + * Append -------+------+------+--- nil + * / \ | | | + * nil nil ... ... ... + * subplans + * + * Append nodes are currently used to support inheritance + * queries, where several relations need to be scanned. + * For example, in our standard person/student/employee/student-emp + * example, where student and employee inherit from person + * and student-emp inherits from student and employee, the + * query: + * + * retrieve (e.name) from e in person* + * + * generates the plan: + * + * | + * Append -------+-------+--------+--------+ + * / \ | | | | + * nil nil Scan Scan Scan Scan + * | | | | + * person employee student student-emp + */ + +#include "executor/executor.h" +#include "executor/nodeAppend.h" +#include "executor/nodeIndexscan.h" +#include "utils/palloc.h" +#include "parser/parsetree.h" /* for rt_store() macro */ + +/* ---------------------------------------------------------------- + * exec-append-initialize-next + * + * Sets up the append node state (i.e. the append state node) + * for the "next" scan. + * + * Returns t iff there is a "next" scan to process. + * ---------------------------------------------------------------- + */ +bool +exec_append_initialize_next(Append *node) +{ + EState *estate; + AppendState *unionstate; + TupleTableSlot *result_slot; + List *rangeTable; + + int whichplan; + int nplans; + List *rtentries; + ResTarget *rtentry; + + Index unionrelid; + + /* ---------------- + * get information from the append node + * ---------------- + */ + estate = node->plan.state; + unionstate = node->unionstate; + result_slot = unionstate->cstate.cs_ResultTupleSlot; + rangeTable = estate->es_range_table; + + whichplan = unionstate->as_whichplan; + nplans = unionstate->as_nplans; + rtentries = node->unionrtentries; + + if (whichplan < 0) { + /* ---------------- + * if scanning in reverse, we start at + * the last scan in the list and then + * proceed back to the first.. in any case + * we inform ExecProcAppend that we are + * at the end of the line by returning FALSE + * ---------------- + */ + unionstate->as_whichplan = 0; + return FALSE; + + } else if (whichplan >= nplans) { + /* ---------------- + * as above, end the scan if we go beyond + * the last scan in our list.. + * ---------------- + */ + unionstate->as_whichplan = nplans - 1; + return FALSE; + + } else { + /* ---------------- + * initialize the scan + * (and update the range table appropriately) + * (doesn't this leave the range table hosed for anybody upstream + * of the Append node??? - jolly ) + * ---------------- + */ + if (node->unionrelid > 0) { + rtentry = nth(whichplan, rtentries); + if (rtentry == NULL) + elog(DEBUG, "exec_append_initialize_next: rtentry is nil"); + + unionrelid = node->unionrelid; + + rt_store(unionrelid, rangeTable, rtentry); + + if (unionstate->as_junkFilter_list) { + estate->es_junkFilter = + (JunkFilter*)nth(whichplan, + unionstate->as_junkFilter_list); + } + if (unionstate->as_result_relation_info_list) { + estate->es_result_relation_info = + (RelationInfo*) nth(whichplan, + unionstate->as_result_relation_info_list); + } + result_slot->ttc_whichplan = whichplan; + } + + return TRUE; + } +} + +/* ---------------------------------------------------------------- + * ExecInitAppend + * + * Begins all of the subscans of the append node, storing the + * scan structures in the 'initialized' vector of the append-state + * structure. + * + * (This is potentially wasteful, since the entire result of the + * append node may not be scanned, but this way all of the + * structures get allocated in the executor's top level memory + * block instead of that of the call to ExecProcAppend.) + * + * Returns the scan result of the first scan. + * ---------------------------------------------------------------- + */ +bool +ExecInitAppend(Append *node, EState *estate, Plan *parent) +{ + AppendState *unionstate; + int nplans; + List *resultList; + List *rtentries; + List *unionplans; + bool *initialized; + int i; + Plan *initNode; + List *junkList; + RelationInfo *es_rri = estate->es_result_relation_info; + + /* ---------------- + * assign execution state to node and get information + * for append state + * ---------------- + */ + node->plan.state = estate; + + unionplans = node->unionplans; + nplans = length(unionplans); + rtentries = node->unionrtentries; + + CXT1_printf("ExecInitAppend: context is %d\n", CurrentMemoryContext); + initialized = (bool *)palloc(nplans * sizeof(bool)); + + /* ---------------- + * create new AppendState for our append node + * ---------------- + */ + unionstate = makeNode(AppendState); + unionstate->as_whichplan = 0; + unionstate->as_nplans = nplans; + unionstate->as_initialized = initialized; + unionstate->as_rtentries = rtentries; + + node->unionstate = unionstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks + * + * Append plans don't have expression contexts because they + * never call ExecQual or ExecTargetList. + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &unionstate->cstate, parent); + +#define APPEND_NSLOTS 1 + /* ---------------- + * append nodes still have Result slots, which hold pointers + * to tuples, so we have to initialize them.. + * ---------------- + */ + ExecInitResultTupleSlot(estate, &unionstate->cstate); + + /* + * If the inherits rtentry is the result relation, we have to make + * a result relation info list for all inheritors so we can update + * their indices and put the result tuples in the right place etc. + * + * e.g. replace p (age = p.age + 1) from p in person* + */ + if ((es_rri != (RelationInfo*)NULL) && + (node->unionrelid == es_rri->ri_RangeTableIndex)) + { + RelationInfo *rri; + List *rtentryP; + + foreach(rtentryP,rtentries) + { + Oid reloid; + RangeTblEntry *rtentry = lfirst(rtentryP); + + reloid = rtentry->relid; + rri = makeNode(RelationInfo); + rri->ri_RangeTableIndex = es_rri->ri_RangeTableIndex; + rri->ri_RelationDesc = heap_open(reloid); + rri->ri_NumIndices = 0; + rri->ri_IndexRelationDescs = NULL; /* index descs */ + rri->ri_IndexRelationInfo = NULL; /* index key info */ + + resultList = lcons(rri,resultList); + ExecOpenIndices(reloid, rri); + } + unionstate->as_result_relation_info_list = resultList; + } + /* ---------------- + * call ExecInitNode on each of the plans in our list + * and save the results into the array "initialized" + * ---------------- + */ + junkList = NIL; + + for(i = 0; i < nplans ; i++ ) { + JunkFilter *j; + List *targetList; + /* ---------------- + * NOTE: we first modify range table in + * exec_append_initialize_next() and + * then initialize the subnode, + * since it may use the range table. + * ---------------- + */ + unionstate->as_whichplan = i; + exec_append_initialize_next(node); + + initNode = (Plan *) nth(i, unionplans); + initialized[i] = ExecInitNode(initNode, estate, (Plan*) node); + + /* --------------- + * Each targetlist in the subplan may need its own junk filter + * + * This is true only when the reln being replaced/deleted is + * the one that we're looking at the subclasses of + * --------------- + */ + if ((es_rri != (RelationInfo*)NULL) && + (node->unionrelid == es_rri->ri_RangeTableIndex)) { + + targetList = initNode->targetlist; + j = (JunkFilter *) ExecInitJunkFilter(targetList); + junkList = lappend(junkList, j); + } + + } + unionstate->as_junkFilter_list = junkList; + if (junkList != NIL) + estate->es_junkFilter = (JunkFilter *)lfirst(junkList); + + /* ---------------- + * initialize the return type from the appropriate subplan. + * ---------------- + */ + initNode = (Plan *) nth(0, unionplans); + ExecAssignResultType(&unionstate->cstate, +/* ExecGetExecTupDesc(initNode), */ + ExecGetTupType(initNode)); + unionstate->cstate.cs_ProjInfo = NULL; + + /* ---------------- + * return the result from the first subplan's initialization + * ---------------- + */ + unionstate->as_whichplan = 0; + exec_append_initialize_next(node); +#if 0 + result = (List *) initialized[0]; +#endif + return TRUE; +} + +int +ExecCountSlotsAppend(Append *node) +{ + List *plan; + List *unionplans = node->unionplans; + int nSlots = 0; + + foreach (plan,unionplans) { + nSlots += ExecCountSlotsNode((Plan *)lfirst(plan)); + } + return nSlots + APPEND_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecProcAppend + * + * Handles the iteration over the multiple scans. + * + * NOTE: Can't call this ExecAppend, that name is used in execMain.l + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecProcAppend(Append *node) +{ + EState *estate; + AppendState *unionstate; + + int whichplan; + List *unionplans; + Plan *subnode; + TupleTableSlot *result; + TupleTableSlot *result_slot; + ScanDirection direction; + + /* ---------------- + * get information from the node + * ---------------- + */ + unionstate = node->unionstate; + estate = node->plan.state; + direction = estate->es_direction; + + unionplans = node->unionplans; + whichplan = unionstate->as_whichplan; + result_slot = unionstate->cstate.cs_ResultTupleSlot; + + /* ---------------- + * figure out which subplan we are currently processing + * ---------------- + */ + subnode = (Plan *) nth(whichplan, unionplans); + + if (subnode == NULL) + elog(DEBUG, "ExecProcAppend: subnode is NULL"); + + /* ---------------- + * get a tuple from the subplan + * ---------------- + */ + result = ExecProcNode(subnode, (Plan*)node); + + if (! TupIsNull(result)) { + /* ---------------- + * if the subplan gave us something then place a copy of + * whatever we get into our result slot and return it, else.. + * ---------------- + */ + return ExecStoreTuple(result->val, + result_slot, result->ttc_buffer, false); + + } else { + /* ---------------- + * .. go on to the "next" subplan in the appropriate + * direction and try processing again (recursively) + * ---------------- + */ + whichplan = unionstate->as_whichplan; + + if (ScanDirectionIsForward(direction)) + { + unionstate->as_whichplan = whichplan + 1; + } + else + { + unionstate->as_whichplan = whichplan - 1; + } + + /* ---------------- + * return something from next node or an empty slot + * all of our subplans have been exhausted. + * ---------------- + */ + if (exec_append_initialize_next(node)) { + ExecSetSlotDescriptorIsNew(result_slot, true); + return + ExecProcAppend(node); + } else + return ExecClearTuple(result_slot); + } +} + +/* ---------------------------------------------------------------- + * ExecEndAppend + * + * Shuts down the subscans of the append node. + * + * Returns nothing of interest. + * ---------------------------------------------------------------- + */ +void +ExecEndAppend(Append *node) +{ + AppendState *unionstate; + int nplans; + List *unionplans; + bool *initialized; + int i; + List *resultRelationInfoList; + RelationInfo *resultRelationInfo; + + /* ---------------- + * get information from the node + * ---------------- + */ + unionstate = node->unionstate; + unionplans = node->unionplans; + nplans = unionstate->as_nplans; + initialized = unionstate->as_initialized; + + /* ---------------- + * shut down each of the subscans + * ---------------- + */ + for(i = 0; i < nplans; i++) { + if (initialized[i]==TRUE) { + ExecEndNode( (Plan *) nth(i, unionplans), (Plan*)node ); + } + } + + /* ---------------- + * close out the different result relations + * ---------------- + */ + resultRelationInfoList = unionstate->as_result_relation_info_list; + while (resultRelationInfoList != NIL) { + Relation resultRelationDesc; + + resultRelationInfo = (RelationInfo*) lfirst(resultRelationInfoList); + resultRelationDesc = resultRelationInfo->ri_RelationDesc; + heap_close(resultRelationDesc); + pfree(resultRelationInfo); + resultRelationInfoList = lnext(resultRelationInfoList); + } + if (unionstate->as_result_relation_info_list) + pfree(unionstate->as_result_relation_info_list); + + /* XXX should free unionstate->as_rtentries and unionstate->as_junkfilter_list here */ +} + diff --git a/src/backend/executor/nodeAppend.h b/src/backend/executor/nodeAppend.h new file mode 100644 index 00000000000..fd2cdbbe81e --- /dev/null +++ b/src/backend/executor/nodeAppend.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * nodeAppend.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeAppend.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEAPPEND_H +#define NODEAPPEND_H + +extern bool exec_append_initialize_next(Append *node); +extern bool ExecInitAppend(Append *node, EState *estate, Plan *parent); +extern int ExecCountSlotsAppend(Append *node); +extern TupleTableSlot *ExecProcAppend(Append *node); +extern void ExecEndAppend(Append *node); + +#endif /* NODEAPPEND_H */ diff --git a/src/backend/executor/nodeGroup.c b/src/backend/executor/nodeGroup.c new file mode 100644 index 00000000000..e9b0847b5f9 --- /dev/null +++ b/src/backend/executor/nodeGroup.c @@ -0,0 +1,407 @@ +/*------------------------------------------------------------------------- + * + * nodeGroup.c-- + * Routines to handle group nodes (used for queries with GROUP BY clause). + * + * Copyright (c) 1994, Regents of the University of California + * + * + * DESCRIPTION + * The Group node is designed for handling queries with a GROUP BY clause. + * It's outer plan must be a sort node. It assumes that the tuples it gets + * back from the outer plan is sorted in the order specified by the group + * columns. (ie. tuples from the same group are consecutive) + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeGroup.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "access/heapam.h" +#include "catalog/catalog.h" +#include "executor/executor.h" +#include "executor/nodeGroup.h" + +static TupleTableSlot *ExecGroupEveryTuple(Group *node); +static TupleTableSlot *ExecGroupOneTuple(Group *node); +static bool sameGroup(TupleTableSlot *oldslot, TupleTableSlot *newslot, + int numCols, AttrNumber *grpColIdx, TupleDesc tupdesc); + +/* --------------------------------------- + * ExecGroup - + * + * There are two modes in which tuples are returned by ExecGroup. If + * tuplePerGroup is TRUE, every tuple from the same group will be + * returned, followed by a NULL at the end of each group. This is + * useful for Agg node which needs to aggregate over tuples of the same + * group. (eg. SELECT salary, count{*} FROM emp GROUP BY salary) + * + * If tuplePerGroup is FALSE, only one tuple per group is returned. The + * tuple returned contains only the group columns. NULL is returned only + * at the end when no more groups is present. This is useful when + * the query does not involve aggregates. (eg. SELECT salary FROM emp + * GROUP BY salary) + * ------------------------------------------ + */ +TupleTableSlot * +ExecGroup(Group *node) +{ + if (node->tuplePerGroup) + return ExecGroupEveryTuple(node); + else + return ExecGroupOneTuple(node); +} + +/* + * ExecGroupEveryTuple - + * return every tuple with a NULL between each group + */ +static TupleTableSlot * +ExecGroupEveryTuple(Group *node) +{ + GroupState *grpstate; + EState *estate; + ExprContext *econtext; + + HeapTuple outerTuple = NULL; + TupleTableSlot *outerslot, *lastslot; + ProjectionInfo *projInfo; + TupleTableSlot *resultSlot; + + bool isDone; + + /* --------------------- + * get state info from node + * --------------------- + */ + grpstate = node->grpstate; + if (grpstate->grp_done) + return NULL; + + estate = node->plan.state; + + econtext = grpstate->csstate.cstate.cs_ExprContext; + + if (grpstate->grp_useLastTuple) { + /* + * we haven't returned last tuple yet because it is not of the + * same group + */ + grpstate->grp_useLastTuple = FALSE; + + ExecStoreTuple(grpstate->grp_lastSlot->val, + grpstate->csstate.css_ScanTupleSlot, + grpstate->grp_lastSlot->ttc_buffer, + false); + } else { + outerslot = ExecProcNode(outerPlan(node), (Plan*)node); + if (outerslot) + outerTuple = outerslot->val; + if (!HeapTupleIsValid(outerTuple)) { + grpstate->grp_done = TRUE; + return NULL; + } + + /* ---------------- + * Compare with last tuple and see if this tuple is of + * the same group. + * ---------------- + */ + lastslot = grpstate->csstate.css_ScanTupleSlot; + + if (lastslot->val != NULL && + (!sameGroup(lastslot, outerslot, + node->numCols, node->grpColIdx, + ExecGetScanType(&grpstate->csstate)))) { +/* ExecGetResultType(&grpstate->csstate.cstate)))) {*/ + + grpstate->grp_useLastTuple = TRUE; + + /* save it for next time */ + grpstate->grp_lastSlot = outerslot; + + /* + * signifies the end of the group + */ + return NULL; + } + + ExecStoreTuple(outerTuple, + grpstate->csstate.css_ScanTupleSlot, + outerslot->ttc_buffer, + false); + } + + /* ---------------- + * form a projection tuple, store it in the result tuple + * slot and return it. + * ---------------- + */ + projInfo = grpstate->csstate.cstate.cs_ProjInfo; + + econtext->ecxt_scantuple = grpstate->csstate.css_ScanTupleSlot; + resultSlot = ExecProject(projInfo, &isDone); + + return resultSlot; +} + +/* + * ExecGroupOneTuple - + * returns one tuple per group, a NULL at the end when there are no more + * tuples. + */ +static TupleTableSlot * +ExecGroupOneTuple(Group *node) +{ + GroupState *grpstate; + EState *estate; + ExprContext *econtext; + + HeapTuple outerTuple = NULL; + TupleTableSlot *outerslot, *lastslot; + ProjectionInfo *projInfo; + TupleTableSlot *resultSlot; + + bool isDone; + + /* --------------------- + * get state info from node + * --------------------- + */ + grpstate = node->grpstate; + if (grpstate->grp_done) + return NULL; + + estate = node->plan.state; + + econtext = node->grpstate->csstate.cstate.cs_ExprContext; + + if (grpstate->grp_useLastTuple) { + grpstate->grp_useLastTuple = FALSE; + ExecStoreTuple(grpstate->grp_lastSlot->val, + grpstate->csstate.css_ScanTupleSlot, + grpstate->grp_lastSlot->ttc_buffer, + false); + } else { + outerslot = ExecProcNode(outerPlan(node), (Plan*)node); + if (outerslot) outerTuple = outerslot->val; + if (!HeapTupleIsValid(outerTuple)) { + grpstate->grp_done = TRUE; + return NULL; + } + ExecStoreTuple(outerTuple, + grpstate->csstate.css_ScanTupleSlot, + outerslot->ttc_buffer, + false); + } + lastslot = grpstate->csstate.css_ScanTupleSlot; + + /* + * find all tuples that belong to a group + */ + for(;;) { + outerslot = ExecProcNode(outerPlan(node), (Plan*)node); + outerTuple = (outerslot) ? outerslot->val : NULL; + if (!HeapTupleIsValid(outerTuple)) { + /* + * we have at least one tuple (lastslot) if we reach here + */ + grpstate->grp_done = TRUE; + + /* return lastslot */ + break; + } + + /* ---------------- + * Compare with last tuple and see if this tuple is of + * the same group. + * ---------------- + */ + if ((!sameGroup(lastslot, outerslot, + node->numCols, node->grpColIdx, + ExecGetScanType(&grpstate->csstate)))) { +/* ExecGetResultType(&grpstate->csstate.cstate)))) {*/ + + grpstate->grp_useLastTuple = TRUE; + + /* save it for next time */ + grpstate->grp_lastSlot = outerslot; + + /* return lastslot */ + break; + } + + ExecStoreTuple(outerTuple, + grpstate->csstate.css_ScanTupleSlot, + outerslot->ttc_buffer, + false); + + lastslot = grpstate->csstate.css_ScanTupleSlot; + } + + ExecStoreTuple(lastslot->val, + grpstate->csstate.css_ScanTupleSlot, + lastslot->ttc_buffer, + false); + + /* ---------------- + * form a projection tuple, store it in the result tuple + * slot and return it. + * ---------------- + */ + projInfo = grpstate->csstate.cstate.cs_ProjInfo; + + econtext->ecxt_scantuple = lastslot; + resultSlot = ExecProject(projInfo, &isDone); + + return resultSlot; +} + +/* ----------------- + * ExecInitGroup + * + * Creates the run-time information for the group node produced by the + * planner and initializes its outer subtree + * ----------------- + */ +bool +ExecInitGroup(Group *node, EState *estate, Plan *parent) +{ + GroupState *grpstate; + Plan *outerPlan; + + /* + * assign the node's execution state + */ + node->plan.state = estate; + + /* + * create state structure + */ + grpstate = makeNode(GroupState); + node->grpstate = grpstate; + grpstate->grp_useLastTuple = FALSE; + grpstate->grp_done = FALSE; + + /* + * assign node's base id and create expression context + */ + ExecAssignNodeBaseInfo(estate, &grpstate->csstate.cstate, + (Plan*) parent); + ExecAssignExprContext(estate, &grpstate->csstate.cstate); + +#define GROUP_NSLOTS 2 + /* + * tuple table initialization + */ + ExecInitScanTupleSlot(estate, &grpstate->csstate); + ExecInitResultTupleSlot(estate, &grpstate->csstate.cstate); + + /* + * initializes child nodes + */ + outerPlan = outerPlan(node); + ExecInitNode(outerPlan, estate, (Plan *)node); + + /* ---------------- + * initialize tuple type. + * ---------------- + */ + ExecAssignScanTypeFromOuterPlan((Plan *) node, &grpstate->csstate); + + /* + * Initialize tuple type for both result and scan. + * This node does no projection + */ + ExecAssignResultTypeFromTL((Plan*) node, &grpstate->csstate.cstate); + ExecAssignProjectionInfo((Plan*)node, &grpstate->csstate.cstate); + + return TRUE; +} + +int +ExecCountSlotsGroup(Group *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + GROUP_NSLOTS; +} + +/* ------------------------ + * ExecEndGroup(node) + * + * ----------------------- + */ +void +ExecEndGroup(Group *node) +{ + GroupState *grpstate; + Plan *outerPlan; + + grpstate = node->grpstate; + + ExecFreeProjectionInfo(&grpstate->csstate.cstate); + + outerPlan = outerPlan(node); + ExecEndNode(outerPlan, (Plan*)node); + + /* clean up tuple table */ + ExecClearTuple(grpstate->csstate.css_ScanTupleSlot); +} + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * code swiped from nodeUnique.c + */ +static bool +sameGroup(TupleTableSlot *oldslot, + TupleTableSlot *newslot, + int numCols, + AttrNumber *grpColIdx, + TupleDesc tupdesc) +{ + bool isNull1,isNull2; + char *attr1, *attr2; + char *val1, *val2; + int i; + AttrNumber att; + Oid typoutput; + + for(i = 0; i < numCols; i++) { + att = grpColIdx[i]; + typoutput = typtoout((Oid)tupdesc->attrs[att-1]->atttypid); + + attr1 = heap_getattr(oldslot->val, + InvalidBuffer, + att, + tupdesc, + &isNull1); + + attr2 = heap_getattr(newslot->val, + InvalidBuffer, + att, + tupdesc, + &isNull2); + + if (isNull1 == isNull2) { + if (isNull1) /* both are null, they are equal */ + continue; + + val1 = fmgr(typoutput, attr1, + gettypelem(tupdesc->attrs[att-1]->atttypid)); + val2 = fmgr(typoutput, attr2, + gettypelem(tupdesc->attrs[att-1]->atttypid)); + + /* now, val1 and val2 are ascii representations so we can + use strcmp for comparison */ + if (strcmp(val1,val2) != 0) + return FALSE; + } else { + /* one is null and the other isn't, they aren't equal */ + return FALSE; + } + } + + return TRUE; +} diff --git a/src/backend/executor/nodeGroup.h b/src/backend/executor/nodeGroup.h new file mode 100644 index 00000000000..067028ea8e1 --- /dev/null +++ b/src/backend/executor/nodeGroup.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * nodeGroup.h-- + * prototypes for nodeGroup.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeGroup.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEGROUP_H +#define NODEGROUP_H + +extern TupleTableSlot *ExecGroup(Group *node); +extern bool ExecInitGroup(Group *node, EState *estate, Plan *parent); +extern int ExecCountSlotsGroup(Group *node); +extern void ExecEndGroup(Group *node); + +#endif /* NODEGROUP_H */ diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c new file mode 100644 index 00000000000..55a5e1f0276 --- /dev/null +++ b/src/backend/executor/nodeHash.c @@ -0,0 +1,828 @@ +/*------------------------------------------------------------------------- + * + * nodeHash.c-- + * Routines to hash relations for hashjoin + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecHash - generate an in-memory hash table of the relation + * ExecInitHash - initialize node and subnodes.. + * ExecEndHash - shutdown node and subnodes + * + */ + +#include /* for sprintf() */ +#include +#include +#include "storage/fd.h" /* for SEEK_ */ +#include "storage/ipc.h" +#include "storage/bufmgr.h" /* for BLCKSZ */ +#include "executor/executor.h" +#include "executor/nodeHash.h" +#include "executor/nodeHashjoin.h" +#include "utils/palloc.h" + +extern int NBuffers; +static int HashTBSize; + +static void mk_hj_temp(char *tempname); +static int hashFunc(char *key, int len); + +/* ---------------------------------------------------------------- + * ExecHash + * + * build hash table for hashjoin, all do partitioning if more + * than one batches are required. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecHash(Hash *node) +{ + EState *estate; + HashState *hashstate; + Plan *outerNode; + Var *hashkey; + HashJoinTable hashtable; + TupleTableSlot *slot; + ExprContext *econtext; + + int nbatch; + File *batches; + RelativeAddr *batchPos; + int *batchSizes; + int i; + RelativeAddr *innerbatchNames; + + /* ---------------- + * get state info from node + * ---------------- + */ + + hashstate = node->hashstate; + estate = node->plan.state; + outerNode = outerPlan(node); + + hashtable = node->hashtable; + if (hashtable == NULL) + elog(WARN, "ExecHash: hash table is NULL."); + + nbatch = hashtable->nbatch; + + if (nbatch > 0) { /* if needs hash partition */ + innerbatchNames = (RelativeAddr *) ABSADDR(hashtable->innerbatchNames); + + /* -------------- + * allocate space for the file descriptors of batch files + * then open the batch files in the current processes. + * -------------- + */ + batches = (File*)palloc(nbatch * sizeof(File)); + for (i=0; ihashBatches = batches; + batchPos = (RelativeAddr*) ABSADDR(hashtable->innerbatchPos); + batchSizes = (int*) ABSADDR(hashtable->innerbatchSizes); + } + + /* ---------------- + * set expression context + * ---------------- + */ + hashkey = node->hashkey; + econtext = hashstate->cstate.cs_ExprContext; + + /* ---------------- + * get tuple and insert into the hash table + * ---------------- + */ + for (;;) { + slot = ExecProcNode(outerNode, (Plan*)node); + if (TupIsNull(slot)) + break; + + econtext->ecxt_innertuple = slot; + ExecHashTableInsert(hashtable, econtext, hashkey, + hashstate->hashBatches); + + ExecClearTuple(slot); + } + + /* + * end of build phase, flush all the last pages of the batches. + */ + for (i=0; ibatch)+i*BLCKSZ,BLCKSZ) < 0) + perror("FileWrite"); + NDirectFileWrite++; + } + + /* --------------------- + * Return the slot so that we have the tuple descriptor + * when we need to save/restore them. -Jeff 11 July 1991 + * --------------------- + */ + return slot; +} + +/* ---------------------------------------------------------------- + * ExecInitHash + * + * Init routine for Hash node + * ---------------------------------------------------------------- + */ +bool +ExecInitHash(Hash *node, EState *estate, Plan *parent) +{ + HashState *hashstate; + Plan *outerPlan; + + SO1_printf("ExecInitHash: %s\n", + "initializing hash node"); + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create state structure + * ---------------- + */ + hashstate = makeNode(HashState); + node->hashstate = hashstate; + hashstate->hashBatches = NULL; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &hashstate->cstate, parent); + ExecAssignExprContext(estate, &hashstate->cstate); + +#define HASH_NSLOTS 1 + /* ---------------- + * initialize our result slot + * ---------------- + */ + ExecInitResultTupleSlot(estate, &hashstate->cstate); + + /* ---------------- + * initializes child nodes + * ---------------- + */ + outerPlan = outerPlan(node); + ExecInitNode(outerPlan, estate, (Plan *)node); + + /* ---------------- + * initialize tuple type. no need to initialize projection + * info because this node doesn't do projections + * ---------------- + */ + ExecAssignResultTypeFromOuterPlan((Plan *) node, &hashstate->cstate); + hashstate->cstate.cs_ProjInfo = NULL; + + return TRUE; +} + +int +ExecCountSlotsHash(Hash *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + HASH_NSLOTS; +} + +/* --------------------------------------------------------------- + * ExecEndHash + * + * clean up routine for Hash node + * ---------------------------------------------------------------- + */ +void +ExecEndHash(Hash *node) +{ + HashState *hashstate; + Plan *outerPlan; + File *batches; + + /* ---------------- + * get info from the hash state + * ---------------- + */ + hashstate = node->hashstate; + batches = hashstate->hashBatches; + if (batches != NULL) + pfree(batches); + + /* ---------------- + * free projection info. no need to free result type info + * because that came from the outer plan... + * ---------------- + */ + ExecFreeProjectionInfo(&hashstate->cstate); + + /* ---------------- + * shut down the subplan + * ---------------- + */ + outerPlan = outerPlan(node); + ExecEndNode(outerPlan, (Plan*)node); +} + +RelativeAddr +hashTableAlloc(int size, HashJoinTable hashtable) +{ + RelativeAddr p; + p = hashtable->top; + hashtable->top += size; + return p; +} + +/* ---------------------------------------------------------------- + * ExecHashTableCreate + * + * create a hashtable in shared memory for hashjoin. + * ---------------------------------------------------------------- + */ +#define NTUP_PER_BUCKET 10 +#define FUDGE_FAC 1.5 + +HashJoinTable +ExecHashTableCreate(Hash *node) +{ + Plan *outerNode; + int nbatch; + int ntuples; + int tupsize; + IpcMemoryId shmid; + HashJoinTable hashtable; + HashBucket bucket; + int nbuckets; + int totalbuckets; + int bucketsize; + int i; + RelativeAddr *outerbatchNames; + RelativeAddr *outerbatchPos; + RelativeAddr *innerbatchNames; + RelativeAddr *innerbatchPos; + int *innerbatchSizes; + RelativeAddr tempname; + + nbatch = -1; + HashTBSize = NBuffers/2; + while (nbatch < 0) { + /* + * determine number of batches for the hashjoin + */ + HashTBSize *= 2; + nbatch = ExecHashPartition(node); + } + /* ---------------- + * get information about the size of the relation + * ---------------- + */ + outerNode = outerPlan(node); + ntuples = outerNode->plan_size; + if (ntuples <= 0) + ntuples = 1000; /* XXX just a hack */ + tupsize = outerNode->plan_width + sizeof(HeapTupleData); + + /* + * totalbuckets is the total number of hash buckets needed for + * the entire relation + */ + totalbuckets = ceil((double)ntuples/NTUP_PER_BUCKET); + bucketsize = LONGALIGN (NTUP_PER_BUCKET * tupsize + sizeof(*bucket)); + + /* + * nbuckets is the number of hash buckets for the first pass + * of hybrid hashjoin + */ + nbuckets = (HashTBSize - nbatch) * BLCKSZ / (bucketsize * FUDGE_FAC); + if (totalbuckets < nbuckets) + totalbuckets = nbuckets; + if (nbatch == 0) + nbuckets = totalbuckets; +#ifdef HJDEBUG + printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n", nbatch, totalbuckets, nbuckets); +#endif + + /* ---------------- + * in non-parallel machines, we don't need to put the hash table + * in the shared memory. We just palloc it. + * ---------------- + */ + hashtable = (HashJoinTable)palloc((HashTBSize+1)*BLCKSZ); + shmid = 0; + + if (hashtable == NULL) { + elog(WARN, "not enough memory for hashjoin."); + } + /* ---------------- + * initialize the hash table header + * ---------------- + */ + hashtable->nbuckets = nbuckets; + hashtable->totalbuckets = totalbuckets; + hashtable->bucketsize = bucketsize; + hashtable->shmid = shmid; + hashtable->top = sizeof(HashTableData); + hashtable->bottom = HashTBSize * BLCKSZ; + /* + * hashtable->readbuf has to be long aligned!!! + */ + hashtable->readbuf = hashtable->bottom; + hashtable->nbatch = nbatch; + hashtable->curbatch = 0; + hashtable->pcount = hashtable->nprocess = 0; + if (nbatch > 0) { + /* --------------- + * allocate and initialize the outer batches + * --------------- + */ + outerbatchNames = (RelativeAddr*)ABSADDR( + hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable)); + outerbatchPos = (RelativeAddr*)ABSADDR( + hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable)); + for (i=0; iouterbatchNames = RELADDR(outerbatchNames); + hashtable->outerbatchPos = RELADDR(outerbatchPos); + /* --------------- + * allocate and initialize the inner batches + * --------------- + */ + innerbatchNames = (RelativeAddr*)ABSADDR( + hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable)); + innerbatchPos = (RelativeAddr*)ABSADDR( + hashTableAlloc(nbatch * sizeof(RelativeAddr), hashtable)); + innerbatchSizes = (int*)ABSADDR( + hashTableAlloc(nbatch * sizeof(int), hashtable)); + for (i=0; iinnerbatchNames = RELADDR(innerbatchNames); + hashtable->innerbatchPos = RELADDR(innerbatchPos); + hashtable->innerbatchSizes = RELADDR(innerbatchSizes); + } + else { + hashtable->outerbatchNames = (RelativeAddr)NULL; + hashtable->outerbatchPos = (RelativeAddr)NULL; + hashtable->innerbatchNames = (RelativeAddr)NULL; + hashtable->innerbatchPos = (RelativeAddr)NULL; + hashtable->innerbatchSizes = (RelativeAddr)NULL; + } + + hashtable->batch = (RelativeAddr)LONGALIGN(hashtable->top + + bucketsize * nbuckets); + hashtable->overflownext=hashtable->batch + nbatch * BLCKSZ; + /* ---------------- + * initialize each hash bucket + * ---------------- + */ + bucket = (HashBucket)ABSADDR(hashtable->top); + for (i=0; itop = RELADDR((char*)bucket + sizeof(*bucket)); + bucket->bottom = bucket->top; + bucket->firstotuple = bucket->lastotuple = -1; + bucket = (HashBucket)LONGALIGN(((char*)bucket + bucketsize)); + } + return(hashtable); +} + +/* ---------------------------------------------------------------- + * ExecHashTableInsert + * + * insert a tuple into the hash table depending on the hash value + * it may just go to a tmp file for other batches + * ---------------------------------------------------------------- + */ +void +ExecHashTableInsert(HashJoinTable hashtable, + ExprContext *econtext, + Var *hashkey, + File *batches) +{ + TupleTableSlot *slot; + HeapTuple heapTuple; + HashBucket bucket; + int bucketno; + int nbatch; + int batchno; + char *buffer; + RelativeAddr *batchPos; + int *batchSizes; + char *pos; + + nbatch = hashtable->nbatch; + batchPos = (RelativeAddr*)ABSADDR(hashtable->innerbatchPos); + batchSizes = (int*)ABSADDR(hashtable->innerbatchSizes); + + slot = econtext->ecxt_innertuple; + heapTuple = slot->val; + +#ifdef HJDEBUG + printf("Inserting "); +#endif + + bucketno = ExecHashGetBucket(hashtable, econtext, hashkey); + + /* ---------------- + * decide whether to put the tuple in the hash table or a tmp file + * ---------------- + */ + if (bucketno < hashtable->nbuckets) { + /* --------------- + * put the tuple in hash table + * --------------- + */ + bucket = (HashBucket) + (ABSADDR(hashtable->top) + bucketno * hashtable->bucketsize); + if ((char*)LONGALIGN(ABSADDR(bucket->bottom)) + -(char*)bucket+heapTuple->t_len > hashtable->bucketsize) + ExecHashOverflowInsert(hashtable, bucket, heapTuple); + else { + memmove((char*)LONGALIGN(ABSADDR(bucket->bottom)), + heapTuple, + heapTuple->t_len); + bucket->bottom = + ((RelativeAddr)LONGALIGN(bucket->bottom) + heapTuple->t_len); + } + } + else { + /* ----------------- + * put the tuple into a tmp file for other batches + * ----------------- + */ + batchno = (float)(bucketno - hashtable->nbuckets)/ + (float)(hashtable->totalbuckets - hashtable->nbuckets) + * nbatch; + buffer = ABSADDR(hashtable->batch) + batchno * BLCKSZ; + batchSizes[batchno]++; + pos= (char *) + ExecHashJoinSaveTuple(heapTuple, + buffer, + batches[batchno], + (char*)ABSADDR(batchPos[batchno])); + batchPos[batchno] = RELADDR(pos); + } +} + +/* ---------------------------------------------------------------- + * ExecHashTableDestroy + * + * destroy a hash table + * ---------------------------------------------------------------- + */ +void +ExecHashTableDestroy(HashJoinTable hashtable) +{ + pfree(hashtable); +} + +/* ---------------------------------------------------------------- + * ExecHashGetBucket + * + * Get the hash value for a tuple + * ---------------------------------------------------------------- + */ +int +ExecHashGetBucket(HashJoinTable hashtable, + ExprContext *econtext, + Var *hashkey) +{ + int bucketno; + Datum keyval; + bool isNull; + + + /* ---------------- + * Get the join attribute value of the tuple + * ---------------- + */ + keyval = ExecEvalVar(hashkey, econtext, &isNull); + + /* ------------------ + * compute the hash function + * ------------------ + */ + if (execConstByVal) + bucketno = + hashFunc((char *) &keyval, execConstLen) % hashtable->totalbuckets; + else + bucketno = + hashFunc((char *) keyval, execConstLen) % hashtable->totalbuckets; +#ifdef HJDEBUG + if (bucketno >= hashtable->nbuckets) + printf("hash(%d) = %d SAVED\n", keyval, bucketno); + else + printf("hash(%d) = %d\n", keyval, bucketno); +#endif + + return(bucketno); +} + +/* ---------------------------------------------------------------- + * ExecHashOverflowInsert + * + * insert into the overflow area of a hash bucket + * ---------------------------------------------------------------- + */ +void +ExecHashOverflowInsert(HashJoinTable hashtable, + HashBucket bucket, + HeapTuple heapTuple) +{ + OverflowTuple otuple; + RelativeAddr newend; + OverflowTuple firstotuple; + OverflowTuple lastotuple; + + firstotuple = (OverflowTuple)ABSADDR(bucket->firstotuple); + lastotuple = (OverflowTuple)ABSADDR(bucket->lastotuple); + /* ---------------- + * see if we run out of overflow space + * ---------------- + */ + newend = (RelativeAddr)LONGALIGN(hashtable->overflownext + sizeof(*otuple) + + heapTuple->t_len); + if (newend > hashtable->bottom) { + elog(DEBUG, "hash table out of memory. expanding."); + /* ------------------ + * XXX this is a temporary hack + * eventually, recursive hash partitioning will be + * implemented + * ------------------ + */ + hashtable->readbuf = hashtable->bottom = 2 * hashtable->bottom; + hashtable = + (HashJoinTable)repalloc(hashtable, hashtable->bottom+BLCKSZ); + if (hashtable == NULL) { + perror("repalloc"); + elog(WARN, "can't expand hashtable."); + } + } + + /* ---------------- + * establish the overflow chain + * ---------------- + */ + otuple = (OverflowTuple)ABSADDR(hashtable->overflownext); + hashtable->overflownext = newend; + if (firstotuple == NULL) + bucket->firstotuple = bucket->lastotuple = RELADDR(otuple); + else { + lastotuple->next = RELADDR(otuple); + bucket->lastotuple = RELADDR(otuple); + } + + /* ---------------- + * copy the tuple into the overflow area + * ---------------- + */ + otuple->next = -1; + otuple->tuple = RELADDR(LONGALIGN(((char*)otuple + sizeof(*otuple)))); + memmove(ABSADDR(otuple->tuple), + heapTuple, + heapTuple->t_len); +} + +/* ---------------------------------------------------------------- + * ExecScanHashBucket + * + * scan a hash bucket of matches + * ---------------------------------------------------------------- + */ +HeapTuple +ExecScanHashBucket(HashJoinState *hjstate, + HashBucket bucket, + HeapTuple curtuple, + List *hjclauses, + ExprContext *econtext) +{ + HeapTuple heapTuple; + bool qualResult; + OverflowTuple otuple = NULL; + OverflowTuple curotuple; + TupleTableSlot *inntuple; + OverflowTuple firstotuple; + OverflowTuple lastotuple; + HashJoinTable hashtable; + + hashtable = hjstate->hj_HashTable; + firstotuple = (OverflowTuple)ABSADDR(bucket->firstotuple); + lastotuple = (OverflowTuple)ABSADDR(bucket->lastotuple); + + /* ---------------- + * search the hash bucket + * ---------------- + */ + if (curtuple == NULL || curtuple < (HeapTuple)ABSADDR(bucket->bottom)) { + if (curtuple == NULL) + heapTuple = (HeapTuple) + LONGALIGN(ABSADDR(bucket->top)); + else + heapTuple = (HeapTuple) + LONGALIGN(((char*)curtuple+curtuple->t_len)); + + while (heapTuple < (HeapTuple)ABSADDR(bucket->bottom)) { + + inntuple = ExecStoreTuple(heapTuple, /* tuple to store */ + hjstate->hj_HashTupleSlot, /* slot */ + InvalidBuffer,/* tuple has no buffer */ + false); /* do not pfree this tuple */ + + econtext->ecxt_innertuple = inntuple; + qualResult = ExecQual((List*)hjclauses, econtext); + + if (qualResult) + return heapTuple; + + heapTuple = (HeapTuple) + LONGALIGN(((char*)heapTuple+heapTuple->t_len)); + } + + if (firstotuple == NULL) + return NULL; + otuple = firstotuple; + } + + /* ---------------- + * search the overflow area of the hash bucket + * ---------------- + */ + if (otuple == NULL) { + curotuple = hjstate->hj_CurOTuple; + otuple = (OverflowTuple)ABSADDR(curotuple->next); + } + + while (otuple != NULL) { + heapTuple = (HeapTuple)ABSADDR(otuple->tuple); + + inntuple = ExecStoreTuple(heapTuple, /* tuple to store */ + hjstate->hj_HashTupleSlot, /* slot */ + InvalidBuffer, /* SP?? this tuple has no buffer */ + false); /* do not pfree this tuple */ + + econtext->ecxt_innertuple = inntuple; + qualResult = ExecQual((List*)hjclauses, econtext); + + if (qualResult) { + hjstate->hj_CurOTuple = otuple; + return heapTuple; + } + + otuple = (OverflowTuple)ABSADDR(otuple->next); + } + + /* ---------------- + * no match + * ---------------- + */ + return NULL; +} + +/* ---------------------------------------------------------------- + * hashFunc + * + * the hash function, copied from Margo + * ---------------------------------------------------------------- + */ +static int +hashFunc(char *key, int len) +{ + register unsigned int h; + register int l; + register unsigned char *k; + + /* + * If this is a variable length type, then 'k' points + * to a "struct varlena" and len == -1. + * NOTE: + * VARSIZE returns the "real" data length plus the sizeof the + * "vl_len" attribute of varlena (the length information). + * 'k' points to the beginning of the varlena struct, so + * we have to use "VARDATA" to find the beginning of the "real" + * data. + */ + if (len == -1) { + l = VARSIZE(key) - VARHDRSZ; + k = (unsigned char*) VARDATA(key); + } else { + l = len; + k = (unsigned char *) key; + } + + h = 0; + + /* + * Convert string to integer + */ + while (l--) h = h * PRIME1 ^ (*k++); + h %= PRIME2; + + return (h); +} + +/* ---------------------------------------------------------------- + * ExecHashPartition + * + * determine the number of batches needed for a hashjoin + * ---------------------------------------------------------------- + */ +int +ExecHashPartition(Hash *node) +{ + Plan *outerNode; + int b; + int pages; + int ntuples; + int tupsize; + + /* + * get size information for plan node + */ + outerNode = outerPlan(node); + ntuples = outerNode->plan_size; + if (ntuples == 0) ntuples = 1000; + tupsize = outerNode->plan_width + sizeof(HeapTupleData); + pages = ceil((double)ntuples * tupsize * FUDGE_FAC / BLCKSZ); + + /* + * if amount of buffer space below hashjoin threshold, + * return negative + */ + if (ceil(sqrt((double)pages)) > HashTBSize) + return -1; + if (pages <= HashTBSize) + b = 0; /* fit in memory, no partitioning */ + else + b = ceil((double)(pages - HashTBSize)/(double)(HashTBSize - 1)); + + return b; +} + +/* ---------------------------------------------------------------- + * ExecHashTableReset + * + * reset hash table header for new batch + * ---------------------------------------------------------------- + */ +void +ExecHashTableReset(HashJoinTable hashtable, int ntuples) +{ + int i; + HashBucket bucket; + + hashtable->nbuckets = hashtable->totalbuckets + = ceil((double)ntuples/NTUP_PER_BUCKET); + + hashtable->overflownext = hashtable->top + hashtable->bucketsize * + hashtable->nbuckets; + + bucket = (HashBucket)ABSADDR(hashtable->top); + for (i=0; inbuckets; i++) { + bucket->top = RELADDR((char*)bucket + sizeof(*bucket)); + bucket->bottom = bucket->top; + bucket->firstotuple = bucket->lastotuple = -1; + bucket = (HashBucket)((char*)bucket + hashtable->bucketsize); + } + hashtable->pcount = hashtable->nprocess; +} + +static int hjtmpcnt = 0; + +static void +mk_hj_temp(char *tempname) +{ + sprintf(tempname, "HJ%d.%d", getpid(), hjtmpcnt); + hjtmpcnt = (hjtmpcnt + 1) % 1000; +} + + + diff --git a/src/backend/executor/nodeHash.h b/src/backend/executor/nodeHash.h new file mode 100644 index 00000000000..cec479dbb01 --- /dev/null +++ b/src/backend/executor/nodeHash.h @@ -0,0 +1,35 @@ +/*------------------------------------------------------------------------- + * + * nodeHash.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeHash.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEHASH_H +#define NODEHASH_H + +extern TupleTableSlot *ExecHash(Hash *node); +extern bool ExecInitHash(Hash *node, EState *estate, Plan *parent); +extern int ExecCountSlotsHash(Hash *node); +extern void ExecEndHash(Hash *node); +extern RelativeAddr hashTableAlloc(int size, HashJoinTable hashtable); +extern HashJoinTable ExecHashTableCreate(Hash *node); +extern void ExecHashTableInsert(HashJoinTable hashtable, ExprContext *econtext, + Var *hashkey, File *batches); +extern void ExecHashTableDestroy(HashJoinTable hashtable); +extern int ExecHashGetBucket(HashJoinTable hashtable, ExprContext *econtext, + Var *hashkey); +extern void ExecHashOverflowInsert(HashJoinTable hashtable, HashBucket bucket, + HeapTuple heapTuple); +extern HeapTuple ExecScanHashBucket(HashJoinState *hjstate, HashBucket bucket, + HeapTuple curtuple, List *hjclauses, + ExprContext *econtext); +extern int ExecHashPartition(Hash *node); +extern void ExecHashTableReset(HashJoinTable hashtable, int ntuples); + +#endif /* NODEHASH_H */ diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c new file mode 100644 index 00000000000..7ed4c141b95 --- /dev/null +++ b/src/backend/executor/nodeHashjoin.c @@ -0,0 +1,792 @@ +/*------------------------------------------------------------------------- + * + * nodeHashjoin.c-- + * Routines to handle hash join nodes + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeHashjoin.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "storage/bufmgr.h" /* for BLCKSZ */ +#include "storage/fd.h" /* for SEEK_ */ +#include "executor/executor.h" +#include "executor/nodeHash.h" +#include "executor/nodeHashjoin.h" + +#include "optimizer/clauses.h" /* for get_leftop */ + + +#include "utils/palloc.h" + +static TupleTableSlot * +ExecHashJoinOuterGetTuple(Plan *node, Plan* parent, HashJoinState *hjstate); + +static TupleTableSlot * +ExecHashJoinGetSavedTuple(HashJoinState *hjstate, char *buffer, + File file, TupleTableSlot *tupleSlot, int *block, char **position); + +/* ---------------------------------------------------------------- + * ExecHashJoin + * + * This function implements the Hybrid Hashjoin algorithm. + * recursive partitioning remains to be added. + * Note: the relation we build hash table on is the inner + * the other one is outer. + * ---------------------------------------------------------------- + */ +TupleTableSlot * /* return: a tuple or NULL */ +ExecHashJoin(HashJoin *node) +{ + HashJoinState *hjstate; + EState *estate; + Plan *outerNode; + Hash *hashNode; + List *hjclauses; + Expr *clause; + List *qual; + ScanDirection dir; + TupleTableSlot *inntuple; + Var *outerVar; + ExprContext *econtext; + + HashJoinTable hashtable; + int bucketno; + HashBucket bucket; + HeapTuple curtuple; + + bool qualResult; + + TupleTableSlot *outerTupleSlot; + TupleTableSlot *innerTupleSlot; + int nbatch; + int curbatch; + File *outerbatches; + RelativeAddr *outerbatchNames; + RelativeAddr *outerbatchPos; + Var *innerhashkey; + int batch; + int batchno; + char *buffer; + int i; + bool hashPhaseDone; + char *pos; + + /* ---------------- + * get information from HashJoin node + * ---------------- + */ + hjstate = node->hashjoinstate; + hjclauses = node->hashclauses; + clause = lfirst(hjclauses); + estate = node->join.state; + qual = node->join.qual; + hashNode = (Hash *)innerPlan(node); + outerNode = outerPlan(node); + hashPhaseDone = node->hashdone; + + dir = estate->es_direction; + + /* ----------------- + * get information from HashJoin state + * ----------------- + */ + hashtable = hjstate->hj_HashTable; + bucket = hjstate->hj_CurBucket; + curtuple = hjstate->hj_CurTuple; + + /* -------------------- + * initialize expression context + * -------------------- + */ + econtext = hjstate->jstate.cs_ExprContext; + + if (hjstate->jstate.cs_TupFromTlist) { + TupleTableSlot *result; + bool isDone; + + result = ExecProject(hjstate->jstate.cs_ProjInfo, &isDone); + if (!isDone) + return result; + } + /* ---------------- + * if this is the first call, build the hash table for inner relation + * ---------------- + */ + if (!hashPhaseDone) { /* if the hash phase not completed */ + hashtable = node->hashjointable; + if (hashtable == NULL) { /* if the hash table has not been created */ + /* ---------------- + * create the hash table + * ---------------- + */ + hashtable = ExecHashTableCreate(hashNode); + hjstate->hj_HashTable = hashtable; + innerhashkey = hashNode->hashkey; + hjstate->hj_InnerHashKey = innerhashkey; + + /* ---------------- + * execute the Hash node, to build the hash table + * ---------------- + */ + hashNode->hashtable = hashtable; + innerTupleSlot = ExecProcNode((Plan *)hashNode, (Plan*) node); + } + bucket = NULL; + curtuple = NULL; + curbatch = 0; + node->hashdone = true; + } + nbatch = hashtable->nbatch; + outerbatches = hjstate->hj_OuterBatches; + if (nbatch > 0 && outerbatches == NULL) { /* if needs hash partition */ + /* ----------------- + * allocate space for file descriptors of outer batch files + * then open the batch files in the current process + * ----------------- + */ + innerhashkey = hashNode->hashkey; + hjstate->hj_InnerHashKey = innerhashkey; + outerbatchNames = (RelativeAddr*) + ABSADDR(hashtable->outerbatchNames); + outerbatches = (File*) + palloc(nbatch * sizeof(File)); + for (i=0; ihj_OuterBatches = outerbatches; + + /* ------------------ + * get the inner batch file descriptors from the + * hash node + * ------------------ + */ + hjstate->hj_InnerBatches = + hashNode->hashstate->hashBatches; + } + outerbatchPos = (RelativeAddr*)ABSADDR(hashtable->outerbatchPos); + curbatch = hashtable->curbatch; + outerbatchNames = (RelativeAddr*)ABSADDR(hashtable->outerbatchNames); + + /* ---------------- + * Now get an outer tuple and probe into the hash table for matches + * ---------------- + */ + outerTupleSlot = hjstate->jstate.cs_OuterTupleSlot; + outerVar = get_leftop(clause); + + bucketno = -1; /* if bucketno remains -1, means use old outer tuple */ + if (TupIsNull(outerTupleSlot)) { + /* + * if the current outer tuple is nil, get a new one + */ + outerTupleSlot = (TupleTableSlot*) + ExecHashJoinOuterGetTuple(outerNode, (Plan*)node, hjstate); + + while (curbatch <= nbatch && TupIsNull(outerTupleSlot)) { + /* + * if the current batch runs out, switch to new batch + */ + curbatch = ExecHashJoinNewBatch(hjstate); + if (curbatch > nbatch) { + /* + * when the last batch runs out, clean up + */ + ExecHashTableDestroy(hashtable); + hjstate->hj_HashTable = NULL; + return NULL; + } + else + outerTupleSlot = (TupleTableSlot*) + ExecHashJoinOuterGetTuple(outerNode, (Plan*)node, hjstate); + } + /* + * now we get an outer tuple, find the corresponding bucket for + * this tuple from the hash table + */ + econtext->ecxt_outertuple = outerTupleSlot; + +#ifdef HJDEBUG + printf("Probing "); +#endif + bucketno = ExecHashGetBucket(hashtable, econtext, outerVar); + bucket=(HashBucket)(ABSADDR(hashtable->top) + + bucketno * hashtable->bucketsize); + } + + for (;;) { + /* ---------------- + * Now we've got an outer tuple and the corresponding hash bucket, + * but this tuple may not belong to the current batch. + * ---------------- + */ + if (curbatch == 0 && bucketno != -1) /* if this is the first pass */ + batch = ExecHashJoinGetBatch(bucketno, hashtable, nbatch); + else + batch = 0; + if (batch > 0) { + /* + * if the current outer tuple does not belong to + * the current batch, save to the tmp file for + * the corresponding batch. + */ + buffer = ABSADDR(hashtable->batch) + (batch - 1) * BLCKSZ; + batchno = batch - 1; + pos = ExecHashJoinSaveTuple(outerTupleSlot->val, + buffer, + outerbatches[batchno], + ABSADDR(outerbatchPos[batchno])); + + outerbatchPos[batchno] = RELADDR(pos); + } + else if (bucket != NULL) { + do { + /* + * scan the hash bucket for matches + */ + curtuple = ExecScanHashBucket(hjstate, + bucket, + curtuple, + hjclauses, + econtext); + + if (curtuple != NULL) { + /* + * we've got a match, but still need to test qpqual + */ + inntuple = ExecStoreTuple(curtuple, + hjstate->hj_HashTupleSlot, + InvalidBuffer, + false); /* don't pfree this tuple */ + + econtext->ecxt_innertuple = inntuple; + + /* ---------------- + * test to see if we pass the qualification + * ---------------- + */ + qualResult = ExecQual((List*)qual, econtext); + + /* ---------------- + * if we pass the qual, then save state for next call and + * have ExecProject form the projection, store it + * in the tuple table, and return the slot. + * ---------------- + */ + if (qualResult) { + ProjectionInfo *projInfo; + TupleTableSlot *result; + bool isDone; + + hjstate->hj_CurBucket = bucket; + hjstate->hj_CurTuple = curtuple; + hashtable->curbatch = curbatch; + hjstate->jstate.cs_OuterTupleSlot = outerTupleSlot; + + projInfo = hjstate->jstate.cs_ProjInfo; + result = ExecProject(projInfo, &isDone); + hjstate->jstate.cs_TupFromTlist = !isDone; + return result; + } + } + } + while (curtuple != NULL); + } + + /* ---------------- + * Now the current outer tuple has run out of matches, + * so we free it and get a new outer tuple. + * ---------------- + */ + outerTupleSlot = (TupleTableSlot*) + ExecHashJoinOuterGetTuple(outerNode, (Plan*) node, hjstate); + + while (curbatch <= nbatch && TupIsNull(outerTupleSlot)) { + /* + * if the current batch runs out, switch to new batch + */ + curbatch = ExecHashJoinNewBatch(hjstate); + if (curbatch > nbatch) { + /* + * when the last batch runs out, clean up + */ + ExecHashTableDestroy(hashtable); + hjstate->hj_HashTable = NULL; + return NULL; + } + else + outerTupleSlot = (TupleTableSlot*) + ExecHashJoinOuterGetTuple(outerNode, (Plan*)node, hjstate); + } + + /* ---------------- + * Now get the corresponding hash bucket for the new + * outer tuple. + * ---------------- + */ + econtext->ecxt_outertuple = outerTupleSlot; +#ifdef HJDEBUG + printf("Probing "); +#endif + bucketno = ExecHashGetBucket(hashtable, econtext, outerVar); + bucket=(HashBucket)(ABSADDR(hashtable->top) + + bucketno * hashtable->bucketsize); + curtuple = NULL; + } +} + +/* ---------------------------------------------------------------- + * ExecInitHashJoin + * + * Init routine for HashJoin node. + * ---------------------------------------------------------------- + */ +bool /* return: initialization status */ +ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent) +{ + HashJoinState *hjstate; + Plan *outerNode; + Hash *hashNode; + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + node->join.state = estate; + + /* ---------------- + * create state structure + * ---------------- + */ + hjstate = makeNode(HashJoinState); + + node->hashjoinstate = hjstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &hjstate->jstate, parent); + ExecAssignExprContext(estate, &hjstate->jstate); + +#define HASHJOIN_NSLOTS 2 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &hjstate->jstate); + ExecInitOuterTupleSlot(estate, hjstate); + + /* ---------------- + * initializes child nodes + * ---------------- + */ + outerNode = outerPlan((Plan *)node); + hashNode = (Hash*)innerPlan((Plan *)node); + + ExecInitNode(outerNode, estate, (Plan *) node); + ExecInitNode((Plan*)hashNode, estate, (Plan *) node); + + /* ---------------- + * now for some voodoo. our temporary tuple slot + * is actually the result tuple slot of the Hash node + * (which is our inner plan). we do this because Hash + * nodes don't return tuples via ExecProcNode() -- instead + * the hash join node uses ExecScanHashBucket() to get + * at the contents of the hash table. -cim 6/9/91 + * ---------------- + */ + { + HashState *hashstate = hashNode->hashstate; + TupleTableSlot *slot = + hashstate->cstate.cs_ResultTupleSlot; + hjstate->hj_HashTupleSlot = slot; + } + hjstate->hj_OuterTupleSlot->ttc_tupleDescriptor = + ExecGetTupType(outerNode); + +/* + hjstate->hj_OuterTupleSlot->ttc_execTupDescriptor = + ExecGetExecTupDesc(outerNode); +*/ + + /* ---------------- + * initialize tuple type and projection info + * ---------------- + */ + ExecAssignResultTypeFromTL((Plan*) node, &hjstate->jstate); + ExecAssignProjectionInfo((Plan*) node, &hjstate->jstate); + + /* ---------------- + * XXX comment me + * ---------------- + */ + + node->hashdone = false; + + hjstate->hj_HashTable = (HashJoinTable)NULL; + hjstate->hj_HashTableShmId = (IpcMemoryId)0; + hjstate->hj_CurBucket = (HashBucket )NULL; + hjstate->hj_CurTuple = (HeapTuple )NULL; + hjstate->hj_CurOTuple = (OverflowTuple )NULL; + hjstate->hj_InnerHashKey = (Var*)NULL; + hjstate->hj_OuterBatches = (File*)NULL; + hjstate->hj_InnerBatches = (File*)NULL; + hjstate->hj_OuterReadPos = (char*)NULL; + hjstate->hj_OuterReadBlk = (int)0; + + hjstate->jstate.cs_OuterTupleSlot = (TupleTableSlot*) NULL; + hjstate->jstate.cs_TupFromTlist = (bool) false; + + return TRUE; +} + +int +ExecCountSlotsHashJoin(HashJoin *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + HASHJOIN_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndHashJoin + * + * clean up routine for HashJoin node + * ---------------------------------------------------------------- + */ +void +ExecEndHashJoin(HashJoin *node) +{ + HashJoinState *hjstate; + + /* ---------------- + * get info from the HashJoin state + * ---------------- + */ + hjstate = node->hashjoinstate; + + /* ---------------- + * free hash table in case we end plan before all tuples are retrieved + * --------------- + */ + if (hjstate->hj_HashTable) { + ExecHashTableDestroy(hjstate->hj_HashTable); + hjstate->hj_HashTable = NULL; + } + + /* ---------------- + * Free the projection info and the scan attribute info + * + * Note: we don't ExecFreeResultType(hjstate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&hjstate->jstate); + + /* ---------------- + * clean up subtrees + * ---------------- + */ + ExecEndNode(outerPlan((Plan *) node), (Plan*)node); + ExecEndNode(innerPlan((Plan *) node), (Plan*)node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(hjstate->jstate.cs_ResultTupleSlot); + ExecClearTuple(hjstate->hj_OuterTupleSlot); + ExecClearTuple(hjstate->hj_HashTupleSlot); + +} + +/* ---------------------------------------------------------------- + * ExecHashJoinOuterGetTuple + * + * get the next outer tuple for hashjoin: either by + * executing a plan node as in the first pass, or from + * the tmp files for the hashjoin batches. + * ---------------------------------------------------------------- + */ + +static TupleTableSlot * +ExecHashJoinOuterGetTuple(Plan *node, Plan* parent, HashJoinState *hjstate) +{ + TupleTableSlot *slot; + HashJoinTable hashtable; + int curbatch; + File *outerbatches; + char *outerreadPos; + int batchno; + char *outerreadBuf; + int outerreadBlk; + + hashtable = hjstate->hj_HashTable; + curbatch = hashtable->curbatch; + + if (curbatch == 0) { /* if it is the first pass */ + slot = ExecProcNode(node, parent); + return slot; + } + + /* + * otherwise, read from the tmp files + */ + outerbatches = hjstate->hj_OuterBatches; + outerreadPos = hjstate->hj_OuterReadPos; + outerreadBlk = hjstate->hj_OuterReadBlk; + outerreadBuf = ABSADDR(hashtable->readbuf); + batchno = curbatch - 1; + + slot = ExecHashJoinGetSavedTuple(hjstate, + outerreadBuf, + outerbatches[batchno], + hjstate->hj_OuterTupleSlot, + &outerreadBlk, + &outerreadPos); + + hjstate->hj_OuterReadPos = outerreadPos; + hjstate->hj_OuterReadBlk = outerreadBlk; + + return slot; +} + +/* ---------------------------------------------------------------- + * ExecHashJoinGetSavedTuple + * + * read the next tuple from a tmp file using a certain buffer + * ---------------------------------------------------------------- + */ + +static TupleTableSlot * +ExecHashJoinGetSavedTuple(HashJoinState *hjstate, + char *buffer, + File file, + TupleTableSlot *tupleSlot, + int *block, /* return parameter */ + char **position) /* return parameter */ +{ + char *bufstart; + char *bufend; + int cc; + HeapTuple heapTuple; + HashJoinTable hashtable; + + hashtable = hjstate->hj_HashTable; + bufend = buffer + *(long*)buffer; + bufstart = (char*)(buffer + sizeof(long)); + if ((*position == NULL) || (*position >= bufend)) { + if (*position == NULL) + (*block) = 0; + else + (*block)++; + FileSeek(file, *block * BLCKSZ, SEEK_SET); + cc = FileRead(file, buffer, BLCKSZ); + NDirectFileRead++; + if (cc < 0) + perror("FileRead"); + if (cc == 0) /* end of file */ + return NULL; + else + (*position) = bufstart; + } + heapTuple = (HeapTuple) (*position); + (*position) = (char*)LONGALIGN(*position + heapTuple->t_len); + + return ExecStoreTuple(heapTuple,tupleSlot,InvalidBuffer,false); +} + +/* ---------------------------------------------------------------- + * ExecHashJoinNewBatch + * + * switch to a new hashjoin batch + * ---------------------------------------------------------------- + */ +int +ExecHashJoinNewBatch(HashJoinState *hjstate) +{ + File *innerBatches; + File *outerBatches; + int *innerBatchSizes; + Var *innerhashkey; + HashJoinTable hashtable; + int nbatch; + char *readPos; + int readBlk; + char *readBuf; + TupleTableSlot *slot; + ExprContext *econtext; + int i; + int cc; + int newbatch; + + hashtable = hjstate->hj_HashTable; + outerBatches = hjstate->hj_OuterBatches; + innerBatches = hjstate->hj_InnerBatches; + nbatch = hashtable->nbatch; + newbatch = hashtable->curbatch + 1; + + /* ------------------ + * this is the last process, so it will do the cleanup and + * batch-switching. + * ------------------ + */ + if (newbatch == 1) { + /* + * if it is end of the first pass, flush all the last pages for + * the batches. + */ + outerBatches = hjstate->hj_OuterBatches; + for (i=0; ibatch) + i * BLCKSZ, BLCKSZ); + NDirectFileWrite++; + if (cc < 0) + perror("FileWrite"); + } + } + if (newbatch > 1) { + /* + * remove the previous outer batch + */ + FileUnlink(outerBatches[newbatch - 2]); + } + /* + * rebuild the hash table for the new inner batch + */ + innerBatchSizes = (int*)ABSADDR(hashtable->innerbatchSizes); + /* -------------- + * skip over empty inner batches + * -------------- + */ + while (newbatch <= nbatch && innerBatchSizes[newbatch - 1] == 0) { + FileUnlink(outerBatches[newbatch-1]); + FileUnlink(innerBatches[newbatch-1]); + newbatch++; + } + if (newbatch > nbatch) { + hashtable->pcount = hashtable->nprocess; + + return newbatch; + } + ExecHashTableReset(hashtable, innerBatchSizes[newbatch - 1]); + + + econtext = hjstate->jstate.cs_ExprContext; + innerhashkey = hjstate->hj_InnerHashKey; + readPos = NULL; + readBlk = 0; + readBuf = ABSADDR(hashtable->readbuf); + + while ((slot = ExecHashJoinGetSavedTuple(hjstate, + readBuf, + innerBatches[newbatch-1], + hjstate->hj_HashTupleSlot, + &readBlk, + &readPos)) + && ! TupIsNull(slot)) { + econtext->ecxt_innertuple = slot; + ExecHashTableInsert(hashtable, econtext, innerhashkey,NULL); + /* possible bug - glass */ + } + + + /* ----------------- + * only the last process comes to this branch + * now all the processes have finished the build phase + * ---------------- + */ + + /* + * after we build the hash table, the inner batch is no longer needed + */ + FileUnlink(innerBatches[newbatch - 1]); + hjstate->hj_OuterReadPos = NULL; + hashtable->pcount = hashtable->nprocess; + + hashtable->curbatch = newbatch; + return newbatch; +} + +/* ---------------------------------------------------------------- + * ExecHashJoinGetBatch + * + * determine the batch number for a bucketno + * +----------------+-------+-------+ ... +-------+ + * 0 nbuckets totalbuckets + * batch 0 1 2 ... + * ---------------------------------------------------------------- + */ +int +ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable, int nbatch) +{ + int b; + if (bucketno < hashtable->nbuckets || nbatch == 0) + return 0; + + b = (float)(bucketno - hashtable->nbuckets) / + (float)(hashtable->totalbuckets - hashtable->nbuckets) * + nbatch; + return b+1; +} + +/* ---------------------------------------------------------------- + * ExecHashJoinSaveTuple + * + * save a tuple to a tmp file using a buffer. + * the first few bytes in a page is an offset to the end + * of the page. + * ---------------------------------------------------------------- + */ + +char * +ExecHashJoinSaveTuple(HeapTuple heapTuple, + char *buffer, + File file, + char *position) +{ + long *pageend; + char *pagestart; + char *pagebound; + int cc; + + pageend = (long*)buffer; + pagestart = (char*)(buffer + sizeof(long)); + pagebound = buffer + BLCKSZ; + if (position == NULL) + position = pagestart; + + if (position + heapTuple->t_len >= pagebound) { + cc = FileSeek(file, 0L, SEEK_END); + if (cc < 0) + perror("FileSeek"); + cc = FileWrite(file, buffer, BLCKSZ); + NDirectFileWrite++; + if (cc < 0) + perror("FileWrite"); + position = pagestart; + *pageend = 0; + } + memmove(position, heapTuple, heapTuple->t_len); + position = (char*)LONGALIGN(position + heapTuple->t_len); + *pageend = position - buffer; + + return position; +} diff --git a/src/backend/executor/nodeHashjoin.h b/src/backend/executor/nodeHashjoin.h new file mode 100644 index 00000000000..b8c12942b3b --- /dev/null +++ b/src/backend/executor/nodeHashjoin.h @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * nodeHashjoin.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeHashjoin.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEHASHJOIN_H +#define NODEHASHJOIN_H + +extern TupleTableSlot *ExecHashJoin(HashJoin *node); + +extern bool ExecInitHashJoin(HashJoin *node, EState *estate, Plan *parent); + +extern int ExecCountSlotsHashJoin(HashJoin *node); + +extern void ExecEndHashJoin(HashJoin *node); + +extern int ExecHashJoinNewBatch(HashJoinState *hjstate); + +extern char *ExecHashJoinSaveTuple(HeapTuple heapTuple, char *buffer, + File file, char *position); + +extern int ExecHashJoinGetBatch(int bucketno, HashJoinTable hashtable, + int nbatch); + + +#endif /* NODEHASHJOIN_H */ diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c new file mode 100644 index 00000000000..758fabdefe5 --- /dev/null +++ b/src/backend/executor/nodeIndexscan.c @@ -0,0 +1,902 @@ +/*------------------------------------------------------------------------- + * + * nodeIndexscan.c-- + * Routines to support indexes and indexed scans of relations + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeIndexscan.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecInsertIndexTuples inserts tuples into indices on result relation + * + * ExecIndexScan scans a relation using indices + * ExecIndexNext using index to retrieve next tuple + * ExecInitIndexScan creates and initializes state info. + * ExecIndexReScan rescans the indexed relation. + * ExecEndIndexScan releases all storage. + * ExecIndexMarkPos marks scan position. + * ExecIndexRestrPos restores scan position. + * + * NOTES + * the code supporting ExecInsertIndexTuples should be + * collected and merged with the genam stuff. + * + */ +#include "executor/executor.h" +#include "executor/nodeIndexscan.h" + +#include "optimizer/clauses.h" /* for get_op, get_leftop, get_rightop */ +#include "parser/parsetree.h" /* for rt_fetch() */ + +#include "access/skey.h" +#include "utils/palloc.h" +#include "catalog/index.h" +#include "storage/bufmgr.h" +#include "storage/lmgr.h" +#include "nodes/nodeFuncs.h" + +/* ---------------- + * Misc stuff to move to executor.h soon -cim 6/5/90 + * ---------------- + */ +#define NO_OP 0 +#define LEFT_OP 1 +#define RIGHT_OP 2 + +static TupleTableSlot *IndexNext(IndexScan *node); + +/* ---------------------------------------------------------------- + * IndexNext + * + * Retrieve a tuple from the IndexScan node's currentRelation + * using the indices in the IndexScanState information. + * + * note: the old code mentions 'Primary indices'. to my knowledge + * we only support a single secondary index. -cim 9/11/89 + * + * old comments: + * retrieve a tuple from relation using the indices given. + * The indices are used in the order they appear in 'indices'. + * The indices may be primary or secondary indices: + * * primary index -- scan the relation 'relID' using keys supplied. + * * secondary index -- scan the index relation to get the 'tid' for + * a tuple in the relation 'relID'. + * If the current index(pointed by 'indexPtr') fails to return a + * tuple, the next index in the indices is used. + * + * bug fix so that it should retrieve on a null scan key. + * ---------------------------------------------------------------- + */ +static TupleTableSlot * +IndexNext(IndexScan *node) +{ + EState *estate; + CommonScanState *scanstate; + IndexScanState *indexstate; + ScanDirection direction; + int indexPtr; + IndexScanDescPtr scanDescs; + IndexScanDesc scandesc; + Relation heapRelation; + RetrieveIndexResult result; + ItemPointer iptr; + HeapTuple tuple; + TupleTableSlot *slot; + Buffer buffer = InvalidBuffer; + + /* ---------------- + * extract necessary information from index scan node + * ---------------- + */ + estate = node->scan.plan.state; + direction = estate->es_direction; + scanstate = node->scan.scanstate; + indexstate = node->indxstate; + indexPtr = indexstate->iss_IndexPtr; + scanDescs = indexstate->iss_ScanDescs; + scandesc = scanDescs[ indexPtr ]; + heapRelation = scanstate->css_currentRelation; + + slot = scanstate->css_ScanTupleSlot; + + /* ---------------- + * ok, now that we have what we need, fetch an index tuple. + * ---------------- + */ + + for(;;) { + result = index_getnext(scandesc, direction); + /* ---------------- + * if scanning this index succeeded then return the + * appropriate heap tuple.. else return NULL. + * ---------------- + */ + if (result) { + iptr = &result->heap_iptr; + tuple = heap_fetch(heapRelation, + NowTimeQual, + iptr, + &buffer); + /* be tidy */ + pfree(result); + + if (tuple == NULL) { + /* ---------------- + * we found a deleted tuple, so keep on scanning.. + * ---------------- + */ + if (BufferIsValid(buffer)) + ReleaseBuffer(buffer); + continue; + } + + /* ---------------- + * store the scanned tuple in the scan tuple slot of + * the scan state. Eventually we will only do this and not + * return a tuple. Note: we pass 'false' because tuples + * returned by amgetnext are pointers onto disk pages and + * were not created with palloc() and so should not be pfree()'d. + * ---------------- + */ + ExecStoreTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + buffer, /* buffer associated with tuple */ + false); /* don't pfree */ + + return slot; + } + + /* ---------------- + * if we get here it means the index scan failed so we + * are at the end of the scan.. + * ---------------- + */ + return ExecClearTuple(slot); + } +} + +/* ---------------------------------------------------------------- + * ExecIndexScan(node) + * + * old comments: + * Scans the relation using primary or secondary indices and returns + * the next qualifying tuple in the direction specified. + * It calls ExecScan() and passes it the access methods which returns + * the next tuple using the indices. + * + * Conditions: + * -- the "cursor" maintained by the AMI is positioned at the tuple + * returned previously. + * + * Initial States: + * -- the relation indicated is opened for scanning so that the + * "cursor" is positioned before the first qualifying tuple. + * -- all index realtions are opened for scanning. + * -- indexPtr points to the first index. + * -- state variable ruleFlag = nil. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecIndexScan(IndexScan *node) +{ + TupleTableSlot *returnTuple; + + /* ---------------- + * use IndexNext as access method + * ---------------- + */ + returnTuple = ExecScan(&node->scan, IndexNext); + return returnTuple; +} + +/* ---------------------------------------------------------------- + * ExecIndexReScan(node) + * + * Recalculates the value of the scan keys whose value depends on + * information known at runtime and rescans the indexed relation. + * Updating the scan key was formerly done separately in + * ExecUpdateIndexScanKeys. Integrating it into ReScan + * makes rescans of indices and + * relations/general streams more uniform. + * + * ---------------------------------------------------------------- + */ +void +ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan* parent) +{ + EState *estate; + IndexScanState *indexstate; + ScanDirection direction; + IndexScanDescPtr scanDescs; + ScanKey *scanKeys; + IndexScanDesc sdesc; + ScanKey skey; + int numIndices; + int i; + + Pointer *runtimeKeyInfo; + int indexPtr; + int *numScanKeys; + List *indxqual; + List *qual; + int n_keys; + ScanKey scan_keys; + int *run_keys; + int j; + Expr *clause; + Node *scanexpr; + Datum scanvalue; + bool isNull; + bool isDone; + + indexstate = node->indxstate; + estate = node->scan.plan.state; + direction = estate->es_direction; + indexstate = node->indxstate; + numIndices = indexstate->iss_NumIndices; + scanDescs = indexstate->iss_ScanDescs; + scanKeys = indexstate->iss_ScanKeys; + + runtimeKeyInfo = (Pointer *) indexstate->iss_RuntimeKeyInfo; + + if (runtimeKeyInfo != NULL) { + /* + * get the index qualifications and + * recalculate the appropriate values + */ + indexPtr = indexstate->iss_IndexPtr; + indxqual = node->indxqual; + qual = nth(indexPtr, indxqual); + numScanKeys = indexstate->iss_NumScanKeys; + n_keys = numScanKeys[indexPtr]; + run_keys = (int *) runtimeKeyInfo[indexPtr]; + scan_keys = (ScanKey) scanKeys[indexPtr]; + + for (j=0; j < n_keys; j++) { + /* + * If we have a run-time key, then extract the run-time + * expression and evaluate it with respect to the current + * outer tuple. We then stick the result into the scan + * key. + */ + if (run_keys[j] != NO_OP) { + clause = nth(j, qual); + scanexpr = (run_keys[j] == RIGHT_OP) ? + (Node*) get_rightop(clause) : (Node*) get_leftop(clause) ; + /* pass in isDone but ignore it. We don't iterate in quals */ + scanvalue = (Datum) + ExecEvalExpr(scanexpr, exprCtxt, &isNull, &isDone); + scan_keys[j].sk_argument = scanvalue; + } + } + } + + /* + * rescans all indices + * + * note: AMrescan assumes only one scan key. This may have + * to change if we ever decide to support multiple keys. + */ + for (i = 0; i < numIndices; i++) { + sdesc = scanDescs[ i ]; + skey = scanKeys[ i ]; + index_rescan(sdesc, direction, skey); + } + + /* ---------------- + * perhaps return something meaningful + * ---------------- + */ + return; +} + +/* ---------------------------------------------------------------- + * ExecEndIndexScan + * + * old comments + * Releases any storage allocated through C routines. + * Returns nothing. + * ---------------------------------------------------------------- + */ +void +ExecEndIndexScan(IndexScan *node) +{ + CommonScanState *scanstate; + IndexScanState *indexstate; + ScanKey *scanKeys; + int numIndices; + int i; + + scanstate = node->scan.scanstate; + indexstate = node->indxstate; + + /* ---------------- + * extract information from the node + * ---------------- + */ + numIndices = indexstate->iss_NumIndices; + scanKeys = indexstate->iss_ScanKeys; + + /* ---------------- + * Free the projection info and the scan attribute info + * + * Note: we don't ExecFreeResultType(scanstate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&scanstate->cstate); + + /* ---------------- + * close the heap and index relations + * ---------------- + */ + ExecCloseR((Plan *) node); + + /* ---------------- + * free the scan keys used in scanning the indices + * ---------------- + */ + for (i=0; icstate.cs_ResultTupleSlot); + ExecClearTuple(scanstate->css_ScanTupleSlot); +/* ExecClearTuple(scanstate->css_RawTupleSlot); */ +} + +/* ---------------------------------------------------------------- + * ExecIndexMarkPos + * + * old comments + * Marks scan position by marking the current index. + * Returns nothing. + * ---------------------------------------------------------------- + */ +void +ExecIndexMarkPos(IndexScan *node) +{ + IndexScanState *indexstate; + IndexScanDescPtr indexScanDescs; + IndexScanDesc scanDesc; + int indexPtr; + + indexstate = node->indxstate; + indexPtr = indexstate->iss_IndexPtr; + indexScanDescs = indexstate->iss_ScanDescs; + scanDesc = indexScanDescs[ indexPtr ]; + + /* ---------------- + * XXX access methods don't return marked positions so + * ---------------- + */ + IndexScanMarkPosition( scanDesc ); + return; +} + +/* ---------------------------------------------------------------- + * ExecIndexRestrPos + * + * old comments + * Restores scan position by restoring the current index. + * Returns nothing. + * + * XXX Assumes previously marked scan position belongs to current index + * ---------------------------------------------------------------- + */ +void +ExecIndexRestrPos(IndexScan *node) +{ + IndexScanState *indexstate; + IndexScanDescPtr indexScanDescs; + IndexScanDesc scanDesc; + int indexPtr; + + indexstate = node->indxstate; + indexPtr = indexstate->iss_IndexPtr; + indexScanDescs = indexstate->iss_ScanDescs; + scanDesc = indexScanDescs[ indexPtr ]; + + IndexScanRestorePosition( scanDesc ); +} + +/* ---------------------------------------------------------------- + * ExecInitIndexScan + * + * Initializes the index scan's state information, creates + * scan keys, and opens the base and index relations. + * + * Note: index scans have 2 sets of state information because + * we have to keep track of the base relation and the + * index relations. + * + * old comments + * Creates the run-time state information for the node and + * sets the relation id to contain relevant decriptors. + * + * Parameters: + * node: IndexNode node produced by the planner. + * estate: the execution state initialized in InitPlan. + * ---------------------------------------------------------------- + */ +bool +ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent) +{ + IndexScanState *indexstate; + CommonScanState *scanstate; + List *indxqual; + List *indxid; + int i; + int numIndices; + int indexPtr; + ScanKey *scanKeys; + int *numScanKeys; + RelationPtr relationDescs; + IndexScanDescPtr scanDescs; + Pointer *runtimeKeyInfo; + bool have_runtime_keys; + List *rangeTable; + RangeTblEntry *rtentry; + Index relid; + Oid reloid; + TimeQual timeQual; + + Relation currentRelation; + HeapScanDesc currentScanDesc; + ScanDirection direction; + int baseid; + + /* ---------------- + * assign execution state to node + * ---------------- + */ + node->scan.plan.state = estate; + + /* -------------------------------- + * Part 1) initialize scan state + * + * create new CommonScanState for node + * -------------------------------- + */ + scanstate = makeNode(CommonScanState); +/* + scanstate->ss_ProcOuterFlag = false; + scanstate->ss_OldRelId = 0; +*/ + + node->scan.scanstate = scanstate; + + /* ---------------- + * assign node's base_id .. we don't use AssignNodeBaseid() because + * the increment is done later on after we assign the index scan's + * scanstate. see below. + * ---------------- + */ + baseid = estate->es_BaseId; +/* scanstate->csstate.cstate.bnode.base_id = baseid; */ + scanstate->cstate.cs_base_id = baseid; + + /* ---------------- + * create expression context for node + * ---------------- + */ + ExecAssignExprContext(estate, &scanstate->cstate); + +#define INDEXSCAN_NSLOTS 3 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &scanstate->cstate); + ExecInitScanTupleSlot(estate, scanstate); +/* ExecInitRawTupleSlot(estate, scanstate); */ + + /* ---------------- + * initialize projection info. result type comes from scan desc + * below.. + * ---------------- + */ + ExecAssignProjectionInfo((Plan *) node, &scanstate->cstate); + + /* -------------------------------- + * Part 2) initialize index scan state + * + * create new IndexScanState for node + * -------------------------------- + */ + indexstate = makeNode(IndexScanState); + indexstate->iss_NumIndices = 0; + indexstate->iss_IndexPtr = 0; + indexstate->iss_ScanKeys = NULL; + indexstate->iss_NumScanKeys = NULL; + indexstate->iss_RuntimeKeyInfo = NULL; + indexstate->iss_RelationDescs = NULL; + indexstate->iss_ScanDescs = NULL; + + node->indxstate = indexstate; + + /* ---------------- + * assign base id to index scan state also + * ---------------- + */ + indexstate->cstate.cs_base_id = baseid; + baseid++; + estate->es_BaseId = baseid; + + /* ---------------- + * get the index node information + * ---------------- + */ + indxid = node->indxid; + indxqual = node->indxqual; + numIndices = length(indxid); + indexPtr = 0; + + CXT1_printf("ExecInitIndexScan: context is %d\n", CurrentMemoryContext); + + /* ---------------- + * scanKeys is used to keep track of the ScanKey's. This is needed + * because a single scan may use several indices and each index has + * its own ScanKey. + * ---------------- + */ + numScanKeys = (int *) palloc(numIndices * sizeof(int)); + scanKeys = (ScanKey *) palloc(numIndices * sizeof(ScanKey)); + relationDescs = (RelationPtr) palloc(numIndices * sizeof(Relation)); + scanDescs = (IndexScanDescPtr) palloc(numIndices * sizeof(IndexScanDesc)); + + /* ---------------- + * initialize runtime key info. + * ---------------- + */ + have_runtime_keys = false; + runtimeKeyInfo = (Pointer *) + palloc(numIndices * sizeof(Pointer)); + + /* ---------------- + * build the index scan keys from the index qualification + * ---------------- + */ + for (i=0; i < numIndices; i++) { + int j; + List *qual; + int n_keys; + ScanKey scan_keys; + int *run_keys; + + qual = nth(i, indxqual); + n_keys = length(qual); + scan_keys = (n_keys <= 0) ? NULL : + (ScanKey)palloc(n_keys * sizeof(ScanKeyData)); + + CXT1_printf("ExecInitIndexScan: context is %d\n", + CurrentMemoryContext); + + if (n_keys > 0) { + run_keys = (int *) palloc(n_keys * sizeof(int)); + } + + /* ---------------- + * for each opclause in the given qual, + * convert each qual's opclause into a single scan key + * ---------------- + */ + for (j=0; j < n_keys; j++) { + Expr *clause; /* one part of index qual */ + Oper *op; /* operator used in scan.. */ + Node *leftop; /* expr on lhs of operator */ + Node *rightop; /* expr on rhs ... */ + + int scanvar; /* which var identifies varattno */ + AttrNumber varattno; /* att number used in scan */ + Oid opid; /* operator id used in scan */ + Datum scanvalue; /* value used in scan (if const) */ + + /* ---------------- + * extract clause information from the qualification + * ---------------- + */ + clause = nth(j, qual); + + op = (Oper*)clause->oper; + if (!IsA(op,Oper)) + elog(WARN, "ExecInitIndexScan: op not an Oper!"); + + opid = op->opid; + + /* ---------------- + * Here we figure out the contents of the index qual. + * The usual case is (op var const) or (op const var) + * which means we form a scan key for the attribute + * listed in the var node and use the value of the const. + * + * If we don't have a const node, then it means that + * one of the var nodes refers to the "scan" tuple and + * is used to determine which attribute to scan, and the + * other expression is used to calculate the value used in + * scanning the index. + * + * This means our index scan's scan key is a function of + * information obtained during the execution of the plan + * in which case we need to recalculate the index scan key + * at run time. + * + * Hence, we set have_runtime_keys to true and then set + * the appropriate flag in run_keys to LEFT_OP or RIGHT_OP. + * The corresponding scan keys are recomputed at run time. + * ---------------- + */ + + scanvar = NO_OP; + + /* ---------------- + * determine information in leftop + * ---------------- + */ + leftop = (Node*) get_leftop(clause); + + if (IsA(leftop,Var) && var_is_rel((Var*)leftop)) { + /* ---------------- + * if the leftop is a "rel-var", then it means + * that it is a var node which tells us which + * attribute to use for our scan key. + * ---------------- + */ + varattno = ((Var*) leftop)->varattno; + scanvar = LEFT_OP; + } else if (IsA(leftop,Const)) { + /* ---------------- + * if the leftop is a const node then it means + * it identifies the value to place in our scan key. + * ---------------- + */ + run_keys[ j ] = NO_OP; + scanvalue = ((Const*) leftop)->constvalue; + } else if (leftop != NULL && + is_funcclause(leftop) && + var_is_rel(lfirst(((Expr*)leftop)->args))) { + /* ---------------- + * if the leftop is a func node then it means + * it identifies the value to place in our scan key. + * Since functional indices have only one attribute + * the attno must always be set to 1. + * ---------------- + */ + varattno = 1; + scanvar = LEFT_OP; + + } else { + /* ---------------- + * otherwise, the leftop contains information usable + * at runtime to figure out the value to place in our + * scan key. + * ---------------- + */ + have_runtime_keys = true; + run_keys[ j ] = LEFT_OP; + scanvalue = Int32GetDatum((int32) true); + } + + /* ---------------- + * now determine information in rightop + * ---------------- + */ + rightop = (Node*) get_rightop(clause); + + if (IsA(rightop,Var) && var_is_rel((Var*)rightop)) { + /* ---------------- + * here we make sure only one op identifies the + * scan-attribute... + * ---------------- + */ + if (scanvar == LEFT_OP) + elog(WARN, "ExecInitIndexScan: %s", + "both left and right op's are rel-vars"); + + /* ---------------- + * if the rightop is a "rel-var", then it means + * that it is a var node which tells us which + * attribute to use for our scan key. + * ---------------- + */ + varattno = ((Var*) rightop)->varattno; + scanvar = RIGHT_OP; + + } else if (IsA(rightop,Const)) { + /* ---------------- + * if the leftop is a const node then it means + * it identifies the value to place in our scan key. + * ---------------- + */ + run_keys[ j ] = NO_OP; + scanvalue = ((Const*) rightop)->constvalue; + + } else if (rightop!=NULL && + is_funcclause(rightop) && + var_is_rel(lfirst(((Expr*)rightop)->args))) { + /* ---------------- + * if the rightop is a func node then it means + * it identifies the value to place in our scan key. + * Since functional indices have only one attribute + * the attno must always be set to 1. + * ---------------- + */ + if (scanvar == LEFT_OP) + elog(WARN, "ExecInitIndexScan: %s", + "both left and right ops are rel-vars"); + + varattno = 1; + scanvar = RIGHT_OP; + + } else { + /* ---------------- + * otherwise, the leftop contains information usable + * at runtime to figure out the value to place in our + * scan key. + * ---------------- + */ + have_runtime_keys = true; + run_keys[ j ] = RIGHT_OP; + scanvalue = Int32GetDatum((int32) true); + } + + /* ---------------- + * now check that at least one op tells us the scan + * attribute... + * ---------------- + */ + if (scanvar == NO_OP) + elog(WARN, "ExecInitIndexScan: %s", + "neither leftop nor rightop refer to scan relation"); + + /* ---------------- + * initialize the scan key's fields appropriately + * ---------------- + */ + ScanKeyEntryInitialize(&scan_keys[j], + 0, + varattno, /* attribute number to scan */ + (RegProcedure) opid, /* reg proc to use */ + (Datum) scanvalue); /* constant */ + } + + /* ---------------- + * store the key information into our array. + * ---------------- + */ + numScanKeys[ i ] = n_keys; + scanKeys[ i ] = scan_keys; + runtimeKeyInfo[ i ] = (Pointer) run_keys; + } + + indexstate->iss_NumIndices = numIndices; + indexstate->iss_IndexPtr = indexPtr; + indexstate->iss_ScanKeys = scanKeys; + indexstate->iss_NumScanKeys = numScanKeys; + + /* ---------------- + * If all of our keys have the form (op var const) , then we have no + * runtime keys so we store NULL in the runtime key info. + * Otherwise runtime key info contains an array of pointers + * (one for each index) to arrays of flags (one for each key) + * which indicate that the qual needs to be evaluated at runtime. + * -cim 10/24/89 + * ---------------- + */ + if (have_runtime_keys) + { + indexstate->iss_RuntimeKeyInfo = (Pointer) runtimeKeyInfo; + } + else { + indexstate->iss_RuntimeKeyInfo = NULL; + for (i=0; i < numIndices; i++) { + List *qual; + int n_keys; + qual = nth(i, indxqual); + n_keys = length(qual); + if (n_keys > 0) + pfree(runtimeKeyInfo[i]); + } + pfree(runtimeKeyInfo); + } + + /* ---------------- + * get the range table and direction information + * from the execution state (these are needed to + * open the relations). + * ---------------- + */ + rangeTable = estate->es_range_table; + direction = estate->es_direction; + + /* ---------------- + * open the base relation + * ---------------- + */ + relid = node->scan.scanrelid; + rtentry = rt_fetch(relid, rangeTable); + reloid = rtentry->relid; + timeQual = rtentry->timeQual; + + ExecOpenScanR(reloid, /* relation */ + 0, /* nkeys */ + (ScanKey) NULL, /* scan key */ + 0, /* is index */ + direction, /* scan direction */ + timeQual, /* time qual */ + ¤tRelation, /* return: rel desc */ + (Pointer *) ¤tScanDesc); /* return: scan desc */ + + scanstate->css_currentRelation = currentRelation; + scanstate->css_currentScanDesc = currentScanDesc; + + + /* ---------------- + * get the scan type from the relation descriptor. + * ---------------- + */ + ExecAssignScanType(scanstate, RelationGetTupleDescriptor(currentRelation)); + ExecAssignResultTypeFromTL((Plan *) node, &scanstate->cstate); + + /* ---------------- + * index scans don't have subtrees.. + * ---------------- + */ +/* scanstate->ss_ProcOuterFlag = false; */ + + /* ---------------- + * open the index relations and initialize + * relation and scan descriptors. + * ---------------- + */ + for (i=0; i < numIndices; i++) { + Oid indexOid; + + indexOid = (Oid)nth(i, indxid); + + if (indexOid != 0) { + ExecOpenScanR(indexOid, /* relation */ + numScanKeys[ i ], /* nkeys */ + scanKeys[ i ], /* scan key */ + true, /* is index */ + direction, /* scan direction */ + timeQual, /* time qual */ + &(relationDescs[ i ]), /* return: rel desc */ + (Pointer *) &(scanDescs[ i ])); + /* return: scan desc */ + } + } + + indexstate->iss_RelationDescs = relationDescs; + indexstate->iss_ScanDescs = scanDescs; + + indexstate->cstate.cs_TupFromTlist = false; + + /* ---------------- + * all done. + * ---------------- + */ + return TRUE; +} + +int +ExecCountSlotsIndexScan(IndexScan *node) +{ + return ExecCountSlotsNode(outerPlan((Plan *)node)) + + ExecCountSlotsNode(innerPlan((Plan *)node)) + + INDEXSCAN_NSLOTS; +} diff --git a/src/backend/executor/nodeIndexscan.h b/src/backend/executor/nodeIndexscan.h new file mode 100644 index 00000000000..27bbff0a293 --- /dev/null +++ b/src/backend/executor/nodeIndexscan.h @@ -0,0 +1,32 @@ +/*------------------------------------------------------------------------- + * + * nodeIndexscan.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeIndexscan.h,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEINDEXSCAN_H +#define NODEINDEXSCAN_H + +extern TupleTableSlot *ExecIndexScan(IndexScan *node); + +extern void ExecIndexReScan(IndexScan *node, ExprContext *exprCtxt, Plan *parent); + +extern void ExecEndIndexScan(IndexScan *node); + +extern void ExecIndexMarkPos(IndexScan *node); + +extern void ExecIndexRestrPos(IndexScan *node); + +extern void ExecUpdateIndexScanKeys(IndexScan *node, ExprContext *econtext); + +extern bool ExecInitIndexScan(IndexScan *node, EState *estate, Plan *parent); + +extern int ExecCountSlotsIndexScan(IndexScan *node); + +#endif /* NODEINDEXSCAN_H */ diff --git a/src/backend/executor/nodeMaterial.c b/src/backend/executor/nodeMaterial.c new file mode 100644 index 00000000000..88fc93d2d4e --- /dev/null +++ b/src/backend/executor/nodeMaterial.c @@ -0,0 +1,392 @@ +/*------------------------------------------------------------------------- + * + * nodeMaterial.c-- + * Routines to handle materialization nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeMaterial.c,v 1.1.1.1 1996/07/09 06:21:26 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecMaterial - generate a temporary relation + * ExecInitMaterial - initialize node and subnodes.. + * ExecEndMaterial - shutdown node and subnodes + * + */ + +#include "executor/executor.h" +#include "executor/nodeMaterial.h" +#include "catalog/catalog.h" +#include "optimizer/internal.h" /* for _TEMP_RELATION_ID_ */ + +/* ---------------------------------------------------------------- + * ExecMaterial + * + * The first time this is called, ExecMaterial retrieves tuples + * this node's outer subplan and inserts them into a temporary + * relation. After this is done, a flag is set indicating that + * the subplan has been materialized. Once the relation is + * materialized, the first tuple is then returned. Successive + * calls to ExecMaterial return successive tuples from the temp + * relation. + * + * Initial State: + * + * ExecMaterial assumes the temporary relation has been + * created and openend by ExecInitMaterial during the prior + * InitPlan() phase. + * + * ---------------------------------------------------------------- + */ +TupleTableSlot * /* result tuple from subplan */ +ExecMaterial(Material *node) +{ + EState *estate; + MaterialState *matstate; + Plan *outerNode; + ScanDirection dir; + Relation tempRelation; + Relation currentRelation; + HeapScanDesc currentScanDesc; + HeapTuple heapTuple; + TupleTableSlot *slot; + Buffer buffer; + + /* ---------------- + * get state info from node + * ---------------- + */ + matstate = node->matstate; + estate = node->plan.state; + dir = estate->es_direction; + + /* ---------------- + * the first time we call this, we retrieve all tuples + * from the subplan into a temporary relation and then + * we sort the relation. Subsequent calls return tuples + * from the temporary relation. + * ---------------- + */ + + if (matstate->mat_Flag == false) { + /* ---------------- + * set all relations to be scanned in the forward direction + * while creating the temporary relation. + * ---------------- + */ + estate->es_direction = EXEC_FRWD; + + /* ---------------- + * if we couldn't create the temp or current relations then + * we print a warning and return NULL. + * ---------------- + */ + tempRelation = matstate->mat_TempRelation; + if (tempRelation == NULL) { + elog(DEBUG, "ExecMaterial: temp relation is NULL! aborting..."); + return NULL; + } + + currentRelation = matstate->csstate.css_currentRelation; + if (currentRelation == NULL) { + elog(DEBUG, "ExecMaterial: current relation is NULL! aborting..."); + return NULL; + } + + /* ---------------- + * retrieve tuples from the subplan and + * insert them in the temporary relation + * ---------------- + */ + outerNode = outerPlan((Plan *) node); + for (;;) { + slot = ExecProcNode(outerNode, (Plan*) node); + + heapTuple = slot->val; + if (heapTuple == NULL) + break; + + heap_insert(tempRelation, /* relation desc */ + heapTuple); /* heap tuple to insert */ + + ExecClearTuple( slot); + } + currentRelation = tempRelation; + + /* ---------------- + * restore to user specified direction + * ---------------- + */ + estate->es_direction = dir; + + /* ---------------- + * now initialize the scan descriptor to scan the + * sorted relation and update the sortstate information + * ---------------- + */ + currentScanDesc = heap_beginscan(currentRelation, /* relation */ + ScanDirectionIsBackward(dir), + /* bkwd flag */ + NowTimeQual, /* time qual */ + 0, /* num scan keys */ + NULL); /* scan keys */ + matstate->csstate.css_currentRelation = currentRelation; + matstate->csstate.css_currentScanDesc = currentScanDesc; + + ExecAssignScanType(&matstate->csstate, + RelationGetTupleDescriptor(currentRelation)); + + /* ---------------- + * finally set the sorted flag to true + * ---------------- + */ + matstate->mat_Flag = true; + } + + /* ---------------- + * at this point we know we have a sorted relation so + * we preform a simple scan on it with amgetnext().. + * ---------------- + */ + currentScanDesc = matstate->csstate.css_currentScanDesc; + + heapTuple = heap_getnext(currentScanDesc, /* scan desc */ + ScanDirectionIsBackward(dir), + /* bkwd flag */ + &buffer); /* return: buffer */ + + /* ---------------- + * put the tuple into the scan tuple slot and return the slot. + * Note: since the tuple is really a pointer to a page, we don't want + * to call pfree() on it.. + * ---------------- + */ + slot = (TupleTableSlot *)matstate->csstate.css_ScanTupleSlot; + + return ExecStoreTuple(heapTuple, /* tuple to store */ + slot, /* slot to store in */ + buffer, /* buffer for this tuple */ + false); /* don't pfree this pointer */ + +} + +/* ---------------------------------------------------------------- + * ExecInitMaterial + * ---------------------------------------------------------------- + */ +bool /* initialization status */ +ExecInitMaterial(Material *node, EState *estate, Plan *parent) +{ + MaterialState *matstate; + Plan *outerPlan; + TupleDesc tupType; + Relation tempDesc; + int len; + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create state structure + * ---------------- + */ + matstate = makeNode(MaterialState); + matstate->mat_Flag = false; + matstate->mat_TempRelation = NULL; + node->matstate = matstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + assign result tuple slot + * + * Materialization nodes don't need ExprContexts because + * they never call ExecQual or ExecTargetList. + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &matstate->csstate.cstate, parent); + +#define MATERIAL_NSLOTS 1 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitScanTupleSlot(estate, &matstate->csstate); + + /* ---------------- + * initializes child nodes + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + ExecInitNode(outerPlan, estate, (Plan *) node); + + /* ---------------- + * initialize matstate information + * ---------------- + */ + matstate->mat_Flag = false; + + /* ---------------- + * initialize tuple type. no need to initialize projection + * info because this node doesn't do projections. + * ---------------- + */ + ExecAssignScanTypeFromOuterPlan((Plan *) node, &matstate->csstate); + matstate->csstate.cstate.cs_ProjInfo = NULL; + + /* ---------------- + * get type information needed for ExecCreatR + * ---------------- + */ + tupType = ExecGetScanType(&matstate->csstate); + + /* ---------------- + * ExecCreatR wants it's second argument to be an object id of + * a relation in the range table or a _TEMP_RELATION_ID + * indicating that the relation is not in the range table. + * + * In the second case ExecCreatR creates a temp relation. + * (currently this is the only case we support -cim 10/16/89) + * ---------------- + */ + /* ---------------- + * create the temporary relation + * ---------------- + */ +/* len = ExecTargetListLength(node->plan.targetlist); */ + tempDesc = ExecCreatR(tupType, _TEMP_RELATION_ID_); + + /* ---------------- + * save the relation descriptor in the sortstate + * ---------------- + */ + matstate->mat_TempRelation = tempDesc; + matstate->csstate.css_currentRelation = tempDesc; + + /* ---------------- + * return relation oid of temporary relation in a list + * (someday -- for now we return LispTrue... cim 10/12/89) + * ---------------- + */ + return TRUE; +} + +int +ExecCountSlotsMaterial(Material *node) +{ + return ExecCountSlotsNode(outerPlan((Plan *)node)) + + ExecCountSlotsNode(innerPlan((Plan *)node)) + + MATERIAL_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndMaterial + * + * old comments + * destroys the temporary relation. + * ---------------------------------------------------------------- + */ +void +ExecEndMaterial(Material *node) +{ + MaterialState *matstate; + Relation tempRelation; + Plan *outerPlan; + + /* ---------------- + * get info from the material state + * ---------------- + */ + matstate = node->matstate; + tempRelation = matstate->mat_TempRelation; + + heap_destroyr(tempRelation); + + /* ---------------- + * close the temp relation and shut down the scan. + * ---------------- + */ + ExecCloseR((Plan *) node); + + /* ---------------- + * shut down the subplan + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + ExecEndNode(outerPlan, (Plan*) node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(matstate->csstate.css_ScanTupleSlot); +} + +#if 0 /* not used */ +/* ---------------------------------------------------------------- + * ExecMaterialMarkPos + * ---------------------------------------------------------------- + */ +List /* nothing of interest */ +ExecMaterialMarkPos(Material node) +{ + MaterialState matstate; + HeapScanDesc sdesc; + + /* ---------------- + * if we haven't materialized yet, just return NIL. + * ---------------- + */ + matstate = get_matstate(node); + if (get_mat_Flag(matstate) == false) + return NIL; + + /* ---------------- + * XXX access methods don't return positions yet so + * for now we return NIL. It's possible that + * they will never return positions for all I know -cim 10/16/89 + * ---------------- + */ + sdesc = get_css_currentScanDesc((CommonScanState)matstate); + heap_markpos(sdesc); + + return NIL; +} + +/* ---------------------------------------------------------------- + * ExecMaterialRestrPos + * ---------------------------------------------------------------- + */ +void +ExecMaterialRestrPos(Material node) +{ + MaterialState matstate; + HeapScanDesc sdesc; + + /* ---------------- + * if we haven't materialized yet, just return. + * ---------------- + */ + matstate = get_matstate(node); + if (get_mat_Flag(matstate) == false) + return; + + /* ---------------- + * restore the scan to the previously marked position + * ---------------- + */ + sdesc = get_css_currentScanDesc((CommonScanState)matstate); + heap_restrpos(sdesc); +} +#endif + diff --git a/src/backend/executor/nodeMaterial.h b/src/backend/executor/nodeMaterial.h new file mode 100644 index 00000000000..d85b025b7bf --- /dev/null +++ b/src/backend/executor/nodeMaterial.h @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * nodeMaterial.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeMaterial.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEMATERIAL_H +#define NODEMATERIAL_H + +extern TupleTableSlot *ExecMaterial(Material *node); +extern bool ExecInitMaterial(Material *node, EState *estate, Plan *parent); +extern int ExecCountSlotsMaterial(Material *node); +extern void ExecEndMaterial(Material *node); +extern List ExecMaterialMarkPos(Material *node); +extern void ExecMaterialRestrPos(Material *node); + +#endif /* NODEMATERIAL_H */ diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c new file mode 100644 index 00000000000..54a3aa28c4e --- /dev/null +++ b/src/backend/executor/nodeMergejoin.c @@ -0,0 +1,1194 @@ +/*------------------------------------------------------------------------- + * + * nodeMergejoin.c-- + * routines supporting merge joins + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeMergejoin.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecMergeJoin mergejoin outer and inner relations. + * ExecInitMergeJoin creates and initializes run time states + * ExecEndMergeJoin cleand up the node. + * + * NOTES + * Essential operation of the merge join algorithm is as follows: + * (** indicates the tuples satisify the merge clause). + * + * Join { - + * get initial outer and inner tuples INITIALIZE + * Skip Inner SKIPINNER + * mark inner position JOINMARK + * do forever { - + * while (outer ** inner) { JOINTEST + * join tuples JOINTUPLES + * advance inner position NEXTINNER + * } - + * advance outer position NEXTOUTER + * if (outer ** mark) { TESTOUTER + * restore inner position to mark TESTOUTER + * continue - + * } else { - + * Skip Outer SKIPOUTER + * mark inner position JOINMARK + * } - + * } - + * } - + * + * Skip Outer { SKIPOUTER + * if (inner ** outer) Join Tuples JOINTUPLES + * while (outer < inner) SKIPOUTER + * advance outer SKIPOUTER + * if (outer > inner) SKIPOUTER + * Skip Inner SKIPINNER + * } - + * + * Skip Inner { SKIPINNER + * if (inner ** outer) Join Tuples JOINTUPLES + * while (outer > inner) SKIPINNER + * advance inner SKIPINNER + * if (outer < inner) SKIPINNER + * Skip Outer SKIPOUTER + * } - + * + * Currently, the merge join operation is coded in the fashion + * of a state machine. At each state, we do something and then + * proceed to another state. This state is stored in the node's + * execution state information and is preserved across calls to + * ExecMergeJoin. -cim 10/31/89 + * + * Warning: This code is known to fail for inequality operations + * and is being redesigned. Specifically, = and > work + * but the logic is not correct for <. Since mergejoins + * are no better then nestloops for inequalitys, the planner + * should not plan them anyways. Alternatively, the + * planner could just exchange the inner/outer relations + * if it ever sees a <... -cim 7/1/90 + * + * Update: The executor tuple table has long since alleviated the + * problem described above -cim 4/23/91 + * + */ +#include "executor/executor.h" +#include "executor/nodeMergejoin.h" +#include "utils/lsyscache.h" + +/* ---------------------------------------------------------------- + * MarkInnerTuple and RestoreInnerTuple macros + * + * when we "mark" a tuple, we place a pointer to it + * in the marked tuple slot. now there are two pointers + * to this tuple and we don't want it to be freed until + * next time we mark a tuple, so we move the policy to + * the marked tuple slot and set the inner tuple slot policy + * to false. + * + * But, when we restore the inner tuple, the marked tuple + * retains the policy. Basically once a tuple is marked, it + * should only be freed when we mark another tuple. -cim 9/27/90 + * + * Note: now that we store buffers in the tuple table, + * we have to also increment buffer reference counts + * correctly whenever we propagate an additional pointer + * to a buffer item. Later, when ExecStoreTuple() is + * called again on this slot, the refcnt is decremented + * when the old tuple is replaced. + * ---------------------------------------------------------------- + */ +#define MarkInnerTuple(innerTupleSlot, mergestate) \ +{ \ + bool shouldFree; \ + shouldFree = ExecSetSlotPolicy(innerTupleSlot, false); \ + ExecStoreTuple(innerTupleSlot->val, \ + mergestate->mj_MarkedTupleSlot, \ + innerTupleSlot->ttc_buffer, \ + shouldFree); \ + ExecIncrSlotBufferRefcnt(innerTupleSlot); \ +} + +#define RestoreInnerTuple(innerTupleSlot, markedTupleSlot) \ + ExecStoreTuple(markedTupleSlot->val, \ + innerTupleSlot, \ + markedTupleSlot->ttc_buffer, \ + false); \ + ExecIncrSlotBufferRefcnt(innerTupleSlot) + +/* ---------------------------------------------------------------- + * MJFormOSortopI + * + * This takes the mergeclause which is a qualification of the + * form ((= expr expr) (= expr expr) ...) and forms a new + * qualification like ((> expr expr) (> expr expr) ...) which + * is used by ExecMergeJoin() in order to determine if we should + * skip tuples. + * + * old comments + * The 'qual' must be of the form: + * {(= outerkey1 innerkey1)(= outerkey2 innerkey2) ...} + * The "sortOp outerkey innerkey" is formed by substituting the "=" + * by "sortOp". + * ---------------------------------------------------------------- + */ +static List * +MJFormOSortopI(List *qualList, Oid sortOp) +{ + List *qualCopy; + List *qualcdr; + Expr *qual; + Oper *op; + + /* ---------------- + * qualList is a list: ((op .. ..) ...) + * first we make a copy of it. copyObject() makes a deep copy + * so let's use it instead of the old fashoned lispCopy()... + * ---------------- + */ + qualCopy = (List*) copyObject((Node*) qualList); + + foreach (qualcdr, qualCopy) { + /* ---------------- + * first get the current (op .. ..) list + * ---------------- + */ + qual = lfirst(qualcdr); + + /* ---------------- + * now get at the op + * ---------------- + */ + op = (Oper*)qual->oper; + if (!IsA(op,Oper)) { + elog(DEBUG, "MJFormOSortopI: op not an Oper!"); + return NIL; + } + + /* ---------------- + * change it's opid and since Op nodes now carry around a + * cached pointer to the associated op function, we have + * to make sure we invalidate this. Otherwise you get bizarre + * behavior when someone runs a mergejoin with _exec_repeat_ > 1 + * -cim 4/23/91 + * ---------------- + */ + op->opid = sortOp; + op->op_fcache = NULL; + } + + return qualCopy; +} + +/* ---------------------------------------------------------------- + * MJFormISortopO + * + * This does the same thing as MJFormOSortopI() except that + * it also reverses the expressions in the qualifications. + * For example: ((= expr1 expr2)) produces ((> expr2 expr1)) + * + * old comments + * The 'qual' must be of the form: + * {(= outerkey1 innerkey1) (= outerkey2 innerkey2) ...} + * The 'sortOp innerkey1 outerkey" is formed by substituting the "=" + * by "sortOp" and reversing the positions of the keys. + * ---------------------------------------------------------------- + */ +List * +MJFormISortopO(List *qualList, Oid sortOp) +{ + List *ISortopO; + List *qualcdr; + + /* ---------------- + * first generate OSortopI, a list of the form + * ((op outer inner) (op outer inner) ... ) + * ---------------- + */ + ISortopO = MJFormOSortopI(qualList, sortOp); + + /* ---------------- + * now swap the cadr and caddr of each qual to form ISortopO, + * ((op inner outer) (op inner outer) ... ) + * ---------------- + */ + foreach (qualcdr, ISortopO) { + Expr *qual; + List *inner; + List *outer; + qual = lfirst(qualcdr); + + inner = lfirst(qual->args); + outer = lfirst(lnext(qual->args)); + lfirst(qual->args) = outer; + lfirst(lnext(qual->args)) = inner; + } + + return ISortopO; +} + +/* ---------------------------------------------------------------- + * MergeCompare + * + * Compare the keys according to 'compareQual' which is of the + * form: {(key1a > key2a)(key1b > key2b) ...}. + * + * (actually, it could also be the form (key1a < key2a)..) + * + * This is different from calling ExecQual because ExecQual returns + * true only if ALL the comparisions clauses are satisfied. + * However, there is an order of significance among the keys with + * the first keys being most significant. Therefore, the clauses + * are evaluated in order and the 'compareQual' is satisfied + * if (key1i > key2i) is true and (key1j = key2j) for 0 < j < i. + * ---------------------------------------------------------------- + */ +bool +MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext) +{ + List *clause; + List *eqclause; + Datum const_value; + bool isNull; + bool isDone; + + /* ---------------- + * if we have no compare qualification, return nil + * ---------------- + */ + if (compareQual == NIL) + return false; + + /* ---------------- + * for each pair of clauses, test them until + * our compare conditions are satisified + * ---------------- + */ + eqclause = eqQual; + foreach (clause, compareQual) { + /* ---------------- + * first test if our compare clause is satisified. + * if so then return true. ignore isDone, don't iterate in + * quals. + * ---------------- + */ + const_value = (Datum) + ExecEvalExpr((Node*) lfirst(clause), econtext, &isNull, &isDone); + + if (DatumGetInt32(const_value) != 0) + return true; + + /* ---------------- + * ok, the compare clause failed so we test if the keys + * are equal... if key1 != key2, we return false. + * otherwise key1 = key2 so we move on to the next pair of keys. + * + * ignore isDone, don't iterate in quals. + * ---------------- + */ + const_value = ExecEvalExpr((Node*) lfirst(eqclause), + econtext, + &isNull, + &isDone); + + if (DatumGetInt32(const_value) == 0) + return false; + eqclause = lnext(eqclause); + } + + /* ---------------- + * if we get here then it means none of our key greater-than + * conditions were satisified so we return false. + * ---------------- + */ + return false; +} + +/* ---------------------------------------------------------------- + * ExecMergeTupleDump + * + * This function is called through the MJ_dump() macro + * when EXEC_MERGEJOINDEBUG is defined + * ---------------------------------------------------------------- + */ +void +ExecMergeTupleDumpInner(ExprContext *econtext) +{ + TupleTableSlot *innerSlot; + + printf("==== inner tuple ====\n"); + innerSlot = econtext->ecxt_innertuple; + if (TupIsNull(innerSlot)) + printf("(nil)\n"); + else + debugtup(innerSlot->val, + innerSlot->ttc_tupleDescriptor); +} + +void +ExecMergeTupleDumpOuter(ExprContext *econtext) +{ + TupleTableSlot *outerSlot; + + printf("==== outer tuple ====\n"); + outerSlot = econtext->ecxt_outertuple; + if (TupIsNull(outerSlot)) + printf("(nil)\n"); + else + debugtup(outerSlot->val, + outerSlot->ttc_tupleDescriptor); +} + +void +ExecMergeTupleDumpMarked(ExprContext *econtext, + MergeJoinState *mergestate) +{ + TupleTableSlot *markedSlot; + + printf("==== marked tuple ====\n"); + markedSlot = mergestate->mj_MarkedTupleSlot; + + if (TupIsNull(markedSlot)) + printf("(nil)\n"); + else + debugtup(markedSlot->val, + markedSlot->ttc_tupleDescriptor); +} + +void +ExecMergeTupleDump(ExprContext *econtext, MergeJoinState *mergestate) +{ + printf("******** ExecMergeTupleDump ********\n"); + + ExecMergeTupleDumpInner(econtext); + ExecMergeTupleDumpOuter(econtext); + ExecMergeTupleDumpMarked(econtext, mergestate); + + printf("******** \n"); +} + +/* ---------------------------------------------------------------- + * ExecMergeJoin + * + * old comments + * Details of the merge-join routines: + * + * (1) ">" and "<" operators + * + * Merge-join is done by joining the inner and outer tuples satisfying + * the join clauses of the form ((= outerKey innerKey) ...). + * The join clauses is provided by the query planner and may contain + * more than one (= outerKey innerKey) clauses (for composite key). + * + * However, the query executor needs to know whether an outer + * tuple is "greater/smaller" than an inner tuple so that it can + * "synchronize" the two relations. For e.g., consider the following + * relations: + * + * outer: (0 ^1 1 2 5 5 5 6 6 7) current tuple: 1 + * inner: (1 ^3 5 5 5 5 6) current tuple: 3 + * + * To continue the merge-join, the executor needs to scan both inner + * and outer relations till the matching tuples 5. It needs to know + * that currently inner tuple 3 is "greater" than outer tuple 1 and + * therefore it should scan the outer relation first to find a + * matching tuple and so on. + * + * Therefore, when initializing the merge-join node, the executor + * creates the "greater/smaller" clause by substituting the "=" + * operator in the join clauses with the sort operator used to + * sort the outer and inner relation forming (outerKey sortOp innerKey). + * The sort operator is "<" if the relations are in ascending order + * otherwise, it is ">" if the relations are in descending order. + * The opposite "smaller/greater" clause is formed by reversing the + * outer and inner keys forming (innerKey sortOp outerKey). + * + * (2) repositioning inner "cursor" + * + * Consider the above relations and suppose that the executor has + * just joined the first outer "5" with the last inner "5". The + * next step is of course to join the second outer "5" with all + * the inner "5's". This requires repositioning the inner "cursor" + * to point at the first inner "5". This is done by "marking" the + * first inner 5 and restore the "cursor" to it before joining + * with the second outer 5. The access method interface provides + * routines to mark and restore to a tuple. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecMergeJoin(MergeJoin *node) +{ + EState *estate; + MergeJoinState *mergestate; + ScanDirection direction; + List *innerSkipQual; + List *outerSkipQual; + List *mergeclauses; + List *qual; + bool qualResult; + bool compareResult; + + Plan *innerPlan; + TupleTableSlot *innerTupleSlot; + + Plan *outerPlan; + TupleTableSlot *outerTupleSlot; + + TupleTableSlot *markedTupleSlot; + + ExprContext *econtext; + + /* ---------------- + * get information from node + * ---------------- + */ + mergestate = node->mergestate; + estate = node->join.state; + direction = estate->es_direction; + innerPlan = innerPlan((Plan *)node); + outerPlan = outerPlan((Plan *)node); + econtext = mergestate->jstate.cs_ExprContext; + mergeclauses = node->mergeclauses; + qual = node->join.qual; + + if (ScanDirectionIsForward(direction)) { + outerSkipQual = mergestate->mj_OSortopI; + innerSkipQual = mergestate->mj_ISortopO; + } else { + outerSkipQual = mergestate->mj_ISortopO; + innerSkipQual = mergestate->mj_OSortopI; + } + + /* ---------------- + * ok, everything is setup.. let's go to work + * ---------------- + */ + if (mergestate->jstate.cs_TupFromTlist) { + TupleTableSlot *result; + ProjectionInfo *projInfo; + bool isDone; + + projInfo = mergestate->jstate.cs_ProjInfo; + result = ExecProject(projInfo, &isDone); + if (!isDone) + return result; + } + for (;;) { + /* ---------------- + * get the current state of the join and do things accordingly. + * Note: The join states are highlighted with 32-* comments for + * improved readability. + * ---------------- + */ + MJ_dump(econtext, mergestate); + + switch (mergestate->mj_JoinState) { + /* ******************************** + * EXEC_MJ_INITIALIZE means that this is the first time + * ExecMergeJoin() has been called and so we have to + * initialize the inner, outer and marked tuples as well + * as various stuff in the expression context. + * ******************************** + */ + case EXEC_MJ_INITIALIZE: + MJ_printf("ExecMergeJoin: EXEC_MJ_INITIALIZE\n"); + /* ---------------- + * Note: at this point, if either of our inner or outer + * tuples are nil, then the join ends immediately because + * we know one of the subplans is empty. + * ---------------- + */ + innerTupleSlot = ExecProcNode(innerPlan, (Plan*)node); + if (TupIsNull(innerTupleSlot)) { + MJ_printf("ExecMergeJoin: **** inner tuple is nil ****\n"); + return NULL; + } + + outerTupleSlot = ExecProcNode(outerPlan, (Plan*)node); + if (TupIsNull(outerTupleSlot)) { + MJ_printf("ExecMergeJoin: **** outer tuple is nil ****\n"); + return NULL; + } + + /* ---------------- + * store the inner and outer tuple in the merge state + * ---------------- + */ + econtext->ecxt_innertuple = innerTupleSlot; + econtext->ecxt_outertuple = outerTupleSlot; + + /* ---------------- + * set the marked tuple to nil + * and initialize its tuple descriptor atttributes. + * -jeff 10 july 1991 + * ---------------- + */ + ExecClearTuple(mergestate->mj_MarkedTupleSlot); + mergestate->mj_MarkedTupleSlot->ttc_tupleDescriptor = + innerTupleSlot->ttc_tupleDescriptor; +/* + mergestate->mj_MarkedTupleSlot->ttc_execTupDescriptor = + innerTupleSlot->ttc_execTupDescriptor; +*/ + /* ---------------- + * initialize merge join state to skip inner tuples. + * ---------------- + */ + mergestate->mj_JoinState = EXEC_MJ_SKIPINNER; + break; + + /* ******************************** + * EXEC_MJ_JOINMARK means we have just found a new + * outer tuple and a possible matching inner tuple. + * This is the case after the INITIALIZE, SKIPOUTER + * or SKIPINNER states. + * ******************************** + */ + case EXEC_MJ_JOINMARK: + MJ_printf("ExecMergeJoin: EXEC_MJ_JOINMARK\n"); + ExecMarkPos(innerPlan); + + innerTupleSlot = econtext->ecxt_innertuple; + MarkInnerTuple(innerTupleSlot, mergestate); + + mergestate->mj_JoinState = EXEC_MJ_JOINTEST; + break; + + /* ******************************** + * EXEC_MJ_JOINTEST means we have two tuples which + * might satisify the merge clause, so we test them. + * + * If they do satisify, then we join them and move + * on to the next inner tuple (EXEC_MJ_JOINTUPLES). + * + * If they do not satisify then advance to next outer tuple. + * ******************************** + */ + case EXEC_MJ_JOINTEST: + MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTEST\n"); + + qualResult = ExecQual((List*)mergeclauses, econtext); + MJ_DEBUG_QUAL(mergeclauses, qualResult); + + if (qualResult) + { + mergestate->mj_JoinState = EXEC_MJ_JOINTUPLES; + } + else + { + mergestate->mj_JoinState = EXEC_MJ_NEXTOUTER; + } + break; + + /* ******************************** + * EXEC_MJ_JOINTUPLES means we have two tuples which + * satisified the merge clause so we join them and then + * proceed to get the next inner tuple (EXEC_NEXT_INNER). + * ******************************** + */ + case EXEC_MJ_JOINTUPLES: + MJ_printf("ExecMergeJoin: EXEC_MJ_JOINTUPLES\n"); + mergestate->mj_JoinState = EXEC_MJ_NEXTINNER; + + qualResult = ExecQual((List*)qual, econtext); + MJ_DEBUG_QUAL(qual, qualResult); + + if (qualResult) { + /* ---------------- + * qualification succeeded. now form the desired + * projection tuple and return the slot containing it. + * ---------------- + */ + ProjectionInfo *projInfo; + TupleTableSlot *result; + bool isDone; + + MJ_printf("ExecMergeJoin: **** returning tuple ****\n"); + + projInfo = mergestate->jstate.cs_ProjInfo; + + result = ExecProject(projInfo, &isDone); + mergestate->jstate.cs_TupFromTlist = !isDone; + return result; + } + break; + + /* ******************************** + * EXEC_MJ_NEXTINNER means advance the inner scan + * to the next tuple. If the tuple is not nil, we then + * proceed to test it against the join qualification. + * ******************************** + */ + case EXEC_MJ_NEXTINNER: + MJ_printf("ExecMergeJoin: EXEC_MJ_NEXTINNER\n"); + + /* ---------------- + * now we get the next inner tuple, if any + * ---------------- + */ + innerTupleSlot = ExecProcNode(innerPlan, (Plan*)node); + MJ_DEBUG_PROC_NODE(innerTupleSlot); + econtext->ecxt_innertuple = innerTupleSlot; + + if (TupIsNull(innerTupleSlot)) + { + mergestate->mj_JoinState = EXEC_MJ_NEXTOUTER; + } + else + { + mergestate->mj_JoinState = EXEC_MJ_JOINTEST; + } + break; + + /* ******************************** + * EXEC_MJ_NEXTOUTER means + * + * outer inner + * outer tuple - 5 5 - marked tuple + * 5 5 + * 6 6 - inner tuple + * 7 7 + * + * we know we just bumped into + * the first inner tuple > current outer tuple + * so get a new outer tuple and then proceed to test + * it against the marked tuple (EXEC_MJ_TESTOUTER) + * ******************************** + */ + case EXEC_MJ_NEXTOUTER: + MJ_printf("ExecMergeJoin: EXEC_MJ_NEXTOUTER\n"); + + outerTupleSlot = ExecProcNode(outerPlan, (Plan*)node); + MJ_DEBUG_PROC_NODE(outerTupleSlot); + econtext->ecxt_outertuple = outerTupleSlot; + + /* ---------------- + * if the outer tuple is null then we know + * we are done with the join + * ---------------- + */ + if (TupIsNull(outerTupleSlot)) { + MJ_printf("ExecMergeJoin: **** outer tuple is nil ****\n"); + return NULL; + } + + mergestate->mj_JoinState = EXEC_MJ_TESTOUTER; + break; + + /* ******************************** + * EXEC_MJ_TESTOUTER + * If the new outer tuple and the marked tuple satisify + * the merge clause then we know we have duplicates in + * the outer scan so we have to restore the inner scan + * to the marked tuple and proceed to join the new outer + * tuples with the inner tuples (EXEC_MJ_JOINTEST) + * + * This is the case when + * + * outer inner + * 4 5 - marked tuple + * outer tuple - 5 5 + * new outer tuple - 5 5 + * 6 8 - inner tuple + * 7 12 + * + * new outer tuple = marked tuple + * + * If the outer tuple fails the test, then we know we have + * to proceed to skip outer tuples until outer >= inner + * (EXEC_MJ_SKIPOUTER). + * + * This is the case when + * + * outer inner + * 5 5 - marked tuple + * outer tuple - 5 5 + * new outer tuple - 6 8 - inner tuple + * 7 12 + * + * new outer tuple > marked tuple + * + * ******************************** + */ + case EXEC_MJ_TESTOUTER: + MJ_printf("ExecMergeJoin: EXEC_MJ_TESTOUTER\n"); + + /* ---------------- + * here we compare the outer tuple with the marked inner tuple + * by using the marked tuple in place of the inner tuple. + * ---------------- + */ + innerTupleSlot = econtext->ecxt_innertuple; + markedTupleSlot = mergestate->mj_MarkedTupleSlot; + econtext->ecxt_innertuple = markedTupleSlot; + + qualResult = ExecQual((List*)mergeclauses, econtext); + MJ_DEBUG_QUAL(mergeclauses, qualResult); + + if (qualResult) { + /* ---------------- + * the merge clause matched so now we juggle the slots + * back the way they were and proceed to JOINTEST. + * ---------------- + */ + econtext->ecxt_innertuple = innerTupleSlot; + + RestoreInnerTuple(innerTupleSlot, markedTupleSlot); + + ExecRestrPos(innerPlan); + mergestate->mj_JoinState = EXEC_MJ_JOINTEST; + + } else { + /* ---------------- + * if the inner tuple was nil and the new outer + * tuple didn't match the marked outer tuple then + * we may have the case: + * + * outer inner + * 4 4 - marked tuple + * new outer - 5 4 + * 6 nil - inner tuple + * 7 + * + * which means that all subsequent outer tuples will be + * larger than our inner tuples. + * ---------------- + */ + if (TupIsNull(innerTupleSlot)) { + MJ_printf("ExecMergeJoin: **** wierd case 1 ****\n"); + return NULL; + } + + /* ---------------- + * restore the inner tuple and continue on to + * skip outer tuples. + * ---------------- + */ + econtext->ecxt_innertuple = innerTupleSlot; + mergestate->mj_JoinState = EXEC_MJ_SKIPOUTER; + } + break; + + /* ******************************** + * EXEC_MJ_SKIPOUTER means skip over tuples in the outer plan + * until we find an outer tuple > current inner tuple. + * + * For example: + * + * outer inner + * 5 5 + * 5 5 + * outer tuple - 6 8 - inner tuple + * 7 12 + * 8 14 + * + * we have to advance the outer scan + * until we find the outer 8. + * + * ******************************** + */ + case EXEC_MJ_SKIPOUTER: + MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPOUTER\n"); + /* ---------------- + * before we advance, make sure the current tuples + * do not satisify the mergeclauses. If they do, then + * we update the marked tuple and go join them. + * ---------------- + */ + qualResult = ExecQual((List*)mergeclauses, econtext); + MJ_DEBUG_QUAL(mergeclauses, qualResult); + + if (qualResult) { + ExecMarkPos(innerPlan); + innerTupleSlot = econtext->ecxt_innertuple; + + MarkInnerTuple(innerTupleSlot, mergestate); + + mergestate->mj_JoinState = EXEC_MJ_JOINTUPLES; + break; + } + + /* ---------------- + * ok, now test the skip qualification + * ---------------- + */ + compareResult = MergeCompare(mergeclauses, + outerSkipQual, + econtext); + + MJ_DEBUG_MERGE_COMPARE(outerSkipQual, compareResult); + + /* ---------------- + * compareResult is true as long as we should + * continue skipping tuples. + * ---------------- + */ + if (compareResult) { + + outerTupleSlot = ExecProcNode(outerPlan, (Plan*)node); + MJ_DEBUG_PROC_NODE(outerTupleSlot); + econtext->ecxt_outertuple = outerTupleSlot; + + /* ---------------- + * if the outer tuple is null then we know + * we are done with the join + * ---------------- + */ + if (TupIsNull(outerTupleSlot)) { + MJ_printf("ExecMergeJoin: **** outerTuple is nil ****\n"); + return NULL; + } + /* ---------------- + * otherwise test the new tuple against the skip qual. + * (we remain in the EXEC_MJ_SKIPOUTER state) + * ---------------- + */ + break; + } + + /* ---------------- + * now check the inner skip qual to see if we + * should now skip inner tuples... if we fail the + * inner skip qual, then we know we have a new pair + * of matching tuples. + * ---------------- + */ + compareResult = MergeCompare(mergeclauses, + innerSkipQual, + econtext); + + MJ_DEBUG_MERGE_COMPARE(innerSkipQual, compareResult); + + if (compareResult) + { + mergestate->mj_JoinState = EXEC_MJ_SKIPINNER; + } + else + { + mergestate->mj_JoinState = EXEC_MJ_JOINMARK; + } + break; + + /* ******************************** + * EXEC_MJ_SKIPINNER means skip over tuples in the inner plan + * until we find an inner tuple > current outer tuple. + * + * For example: + * + * outer inner + * 5 5 + * 5 5 + * outer tuple - 12 8 - inner tuple + * 14 10 + * 17 12 + * + * we have to advance the inner scan + * until we find the inner 12. + * + * ******************************** + */ + case EXEC_MJ_SKIPINNER: + MJ_printf("ExecMergeJoin: EXEC_MJ_SKIPINNER\n"); + /* ---------------- + * before we advance, make sure the current tuples + * do not satisify the mergeclauses. If they do, then + * we update the marked tuple and go join them. + * ---------------- + */ + qualResult = ExecQual((List*)mergeclauses, econtext); + MJ_DEBUG_QUAL(mergeclauses, qualResult); + + if (qualResult) { + ExecMarkPos(innerPlan); + innerTupleSlot = econtext->ecxt_innertuple; + + MarkInnerTuple(innerTupleSlot, mergestate); + + mergestate->mj_JoinState = EXEC_MJ_JOINTUPLES; + break; + } + + /* ---------------- + * ok, now test the skip qualification + * ---------------- + */ + compareResult = MergeCompare(mergeclauses, + innerSkipQual, + econtext); + + MJ_DEBUG_MERGE_COMPARE(innerSkipQual, compareResult); + + /* ---------------- + * compareResult is true as long as we should + * continue skipping tuples. + * ---------------- + */ + if (compareResult) { + /* ---------------- + * now try and get a new inner tuple + * ---------------- + */ + innerTupleSlot = ExecProcNode(innerPlan, (Plan*)node); + MJ_DEBUG_PROC_NODE(innerTupleSlot); + econtext->ecxt_innertuple = innerTupleSlot; + + /* ---------------- + * if the inner tuple is null then we know + * we have to restore the inner scan + * and advance to the next outer tuple + * ---------------- + */ + if (TupIsNull(innerTupleSlot)) { + /* ---------------- + * this is an interesting case.. all our + * inner tuples are smaller then our outer + * tuples so we never found an inner tuple + * to mark. + * + * outer inner + * outer tuple - 5 4 + * 5 4 + * 6 nil - inner tuple + * 7 + * + * This means the join should end. + * ---------------- + */ + MJ_printf("ExecMergeJoin: **** wierd case 2 ****\n"); + return NULL; + } + + /* ---------------- + * otherwise test the new tuple against the skip qual. + * (we remain in the EXEC_MJ_SKIPINNER state) + * ---------------- + */ + break; + } + + /* ---------------- + * compare finally failed and we have stopped skipping + * inner tuples so now check the outer skip qual + * to see if we should now skip outer tuples... + * ---------------- + */ + compareResult = MergeCompare(mergeclauses, + outerSkipQual, + econtext); + + MJ_DEBUG_MERGE_COMPARE(outerSkipQual, compareResult); + + if (compareResult) + { + mergestate->mj_JoinState = EXEC_MJ_SKIPOUTER; + } + else + { + mergestate->mj_JoinState = EXEC_MJ_JOINMARK; + } + + break; + + /* ******************************** + * if we get here it means our code is fucked up and + * so we just end the join prematurely. + * ******************************** + */ + default: + elog(NOTICE, "ExecMergeJoin: invalid join state. aborting"); + return NULL; + } + } +} + +/* ---------------------------------------------------------------- + * ExecInitMergeJoin + * + * old comments + * Creates the run-time state information for the node and + * sets the relation id to contain relevant decriptors. + * ---------------------------------------------------------------- + */ +bool +ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent) +{ + MergeJoinState *mergestate; + List *joinclauses; + RegProcedure rightsortop; + RegProcedure leftsortop; + RegProcedure sortop; + + List *OSortopI; + List *ISortopO; + + MJ1_printf("ExecInitMergeJoin: %s\n", + "initializing node"); + + /* ---------------- + * assign the node's execution state and + * get the range table and direction from it + * ---------------- + */ + node->join.state = estate; + + /* ---------------- + * create new merge state for node + * ---------------- + */ + mergestate = makeNode(MergeJoinState); + mergestate->mj_OSortopI = NIL; + mergestate->mj_ISortopO = NIL; + mergestate->mj_JoinState = 0; + mergestate->mj_MarkedTupleSlot = NULL; + node->mergestate = mergestate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &mergestate->jstate, parent); + ExecAssignExprContext(estate, &mergestate->jstate); + +#define MERGEJOIN_NSLOTS 2 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &mergestate->jstate); + ExecInitMarkedTupleSlot(estate, mergestate); + + /* ---------------- + * get merge sort operators. + * + * XXX for now we assume all quals in the joinclauses were + * sorted with the same operator in both the inner and + * outer relations. -cim 11/2/89 + * ---------------- + */ + joinclauses = node->mergeclauses; + + rightsortop = get_opcode(node->mergerightorder[0]); + leftsortop = get_opcode(node->mergeleftorder[0]); + + if (leftsortop != rightsortop) + elog(NOTICE, "ExecInitMergeJoin: %s", + "left and right sortop's are unequal!"); + + sortop = rightsortop; + + /* ---------------- + * form merge skip qualifications + * + * XXX MJform routines need to be extended + * to take a list of sortops.. -cim 11/2/89 + * ---------------- + */ + OSortopI = MJFormOSortopI(joinclauses, sortop); + ISortopO = MJFormISortopO(joinclauses, sortop); + mergestate->mj_OSortopI = OSortopI; + mergestate->mj_ISortopO = ISortopO; + + MJ_printf("\nExecInitMergeJoin: OSortopI is "); + MJ_nodeDisplay(OSortopI); + MJ_printf("\nExecInitMergeJoin: ISortopO is "); + MJ_nodeDisplay(ISortopO); + MJ_printf("\n"); + + /* ---------------- + * initialize join state + * ---------------- + */ + mergestate->mj_JoinState = EXEC_MJ_INITIALIZE; + + /* ---------------- + * initialize subplans + * ---------------- + */ + ExecInitNode(outerPlan((Plan *) node), estate, (Plan *) node); + ExecInitNode(innerPlan((Plan *) node), estate, (Plan *) node); + + /* ---------------- + * initialize tuple type and projection info + * ---------------- + */ + ExecAssignResultTypeFromTL((Plan *) node, &mergestate->jstate); + ExecAssignProjectionInfo((Plan *) node, &mergestate->jstate); + + mergestate->jstate.cs_TupFromTlist = false; + /* ---------------- + * initialization successful + * ---------------- + */ + MJ1_printf("ExecInitMergeJoin: %s\n", + "node initialized"); + + return TRUE; +} + +int +ExecCountSlotsMergeJoin(MergeJoin *node) +{ + return ExecCountSlotsNode(outerPlan((Plan *)node)) + + ExecCountSlotsNode(innerPlan((Plan *)node)) + + MERGEJOIN_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndMergeJoin + * + * old comments + * frees storage allocated through C routines. + * ---------------------------------------------------------------- + */ +void +ExecEndMergeJoin(MergeJoin *node) +{ + MergeJoinState *mergestate; + + MJ1_printf("ExecEndMergeJoin: %s\n", + "ending node processing"); + + /* ---------------- + * get state information from the node + * ---------------- + */ + mergestate = node->mergestate; + + /* ---------------- + * Free the projection info and the scan attribute info + * + * Note: we don't ExecFreeResultType(mergestate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&mergestate->jstate); + + /* ---------------- + * shut down the subplans + * ---------------- + */ + ExecEndNode((Plan*) innerPlan((Plan *) node), (Plan*)node); + ExecEndNode((Plan*) outerPlan((Plan *) node), (Plan*)node); + + /* ---------------- + * clean out the tuple table so that we don't try and + * pfree the marked tuples.. see HACK ALERT at the top of + * this file. + * ---------------- + */ + ExecClearTuple(mergestate->jstate.cs_ResultTupleSlot); + ExecClearTuple(mergestate->mj_MarkedTupleSlot); + + MJ1_printf("ExecEndMergeJoin: %s\n", + "node processing ended"); +} + diff --git a/src/backend/executor/nodeMergejoin.h b/src/backend/executor/nodeMergejoin.h new file mode 100644 index 00000000000..ebdca08e32f --- /dev/null +++ b/src/backend/executor/nodeMergejoin.h @@ -0,0 +1,40 @@ +/*------------------------------------------------------------------------- + * + * nodeMergejoin.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeMergejoin.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEMERGEJOIN_H +#define NODEMERGEJOIN_H + +#if 0 /* aren't these static? */ +extern List MJFormOSortopI(List qualList, Oid sortOp); +extern List MJFormISortopO(List qualList, Oid sortOp); +#endif +extern bool MergeCompare(List *eqQual, List *compareQual, ExprContext *econtext); + +extern void ExecMergeTupleDumpInner(ExprContext *econtext); + +extern void ExecMergeTupleDumpOuter(ExprContext *econtext); + +extern void ExecMergeTupleDumpMarked(ExprContext *econtext, + MergeJoinState *mergestate); + +extern void ExecMergeTupleDump(ExprContext *econtext, + MergeJoinState *mergestate); + +extern TupleTableSlot *ExecMergeJoin(MergeJoin *node); + +extern bool ExecInitMergeJoin(MergeJoin *node, EState *estate, Plan *parent); + +extern int ExecCountSlotsMergeJoin(MergeJoin *node); + +extern void ExecEndMergeJoin(MergeJoin *node); + +#endif /* NODEMERGEJOIN_H; */ diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c new file mode 100644 index 00000000000..1ef30ce431f --- /dev/null +++ b/src/backend/executor/nodeNestloop.c @@ -0,0 +1,370 @@ +/*------------------------------------------------------------------------- + * + * nodeNestloop.c-- + * routines to support nest-loop joins + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeNestloop.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecNestLoop - process a nestloop join of two plans + * ExecInitNestLoop - initialize the join + * ExecEndNestLoop - shut down the join + */ +#include "executor/executor.h" +#include "executor/nodeNestloop.h" +#include "executor/nodeIndexscan.h" + +/* ---------------------------------------------------------------- + * ExecNestLoop(node) + * + * old comments + * Returns the tuple joined from inner and outer tuples which + * satisfies the qualification clause. + * + * It scans the inner relation to join with current outer tuple. + * + * If none is found, next tuple form the outer relation is retrieved + * and the inner relation is scanned from the beginning again to join + * with the outer tuple. + * + * Nil is returned if all the remaining outer tuples are tried and + * all fail to join with the inner tuples. + * + * Nil is also returned if there is no tuple from inner realtion. + * + * Conditions: + * -- outerTuple contains current tuple from outer relation and + * the right son(inner realtion) maintains "cursor" at the tuple + * returned previously. + * This is achieved by maintaining a scan position on the outer + * relation. + * + * Initial States: + * -- the outer child and the inner child + * are prepared to return the first tuple. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecNestLoop(NestLoop *node, Plan* parent) +{ + NestLoopState *nlstate; + Plan *innerPlan; + Plan *outerPlan; + bool needNewOuterTuple; + + TupleTableSlot *outerTupleSlot; + TupleTableSlot *innerTupleSlot; + + List *qual; + bool qualResult; + ExprContext *econtext; + + /* ---------------- + * get information from the node + * ---------------- + */ + ENL1_printf("getting info from node"); + + nlstate = node->nlstate; + qual = node->join.qual; + outerPlan = outerPlan(&node->join); + innerPlan = innerPlan(&node->join); + + /* ---------------- + * initialize expression context + * ---------------- + */ + econtext = nlstate->jstate.cs_ExprContext; + + /* ---------------- * get the current outer tuple + * ---------------- + */ + outerTupleSlot = nlstate->jstate.cs_OuterTupleSlot; + econtext->ecxt_outertuple = outerTupleSlot; + + /* ---------------- + * Ok, everything is setup for the join so now loop until + * we return a qualifying join tuple.. + * ---------------- + */ + + if (nlstate->jstate.cs_TupFromTlist) { + TupleTableSlot *result; + bool isDone; + + result = ExecProject(nlstate->jstate.cs_ProjInfo, &isDone); + if (!isDone) + return result; + } + + ENL1_printf("entering main loop"); + for(;;) { + /* ---------------- + * The essential idea now is to get the next inner tuple + * and join it with the current outer tuple. + * ---------------- + */ + needNewOuterTuple = false; + + /* ---------------- + * If outer tuple is not null then that means + * we are in the middle of a scan and we should + * restore our previously saved scan position. + * ---------------- + */ + if (! TupIsNull(outerTupleSlot)) { + ENL1_printf("have outer tuple, restoring outer plan"); + ExecRestrPos(outerPlan); + } else { + ENL1_printf("outer tuple is nil, need new outer tuple"); + needNewOuterTuple = true; + } + + /* ---------------- + * if we have an outerTuple, try to get the next inner tuple. + * ---------------- + */ + if (!needNewOuterTuple) { + ENL1_printf("getting new inner tuple"); + + innerTupleSlot = ExecProcNode(innerPlan, (Plan*)node); + econtext->ecxt_innertuple = innerTupleSlot; + + if (TupIsNull(innerTupleSlot)) { + ENL1_printf("no inner tuple, need new outer tuple"); + needNewOuterTuple = true; + } + } + + /* ---------------- + * loop until we have a new outer tuple and a new + * inner tuple. + * ---------------- + */ + while (needNewOuterTuple) { + /* ---------------- + * now try to get the next outer tuple + * ---------------- + */ + ENL1_printf("getting new outer tuple"); + outerTupleSlot = ExecProcNode(outerPlan, (Plan*)node); + econtext->ecxt_outertuple = outerTupleSlot; + + /* ---------------- + * if there are no more outer tuples, then the join + * is complete.. + * ---------------- + */ + if (TupIsNull(outerTupleSlot)) { + ENL1_printf("no outer tuple, ending join"); + return NULL; + } + + /* ---------------- + * we have a new outer tuple so we mark our position + * in the outer scan and save the outer tuple in the + * NestLoop state + * ---------------- + */ + ENL1_printf("saving new outer tuple information"); + ExecMarkPos(outerPlan); + nlstate->jstate.cs_OuterTupleSlot = outerTupleSlot; + + /* ---------------- + * now rescan the inner plan and get a new inner tuple + * ---------------- + */ + + ENL1_printf("rescanning inner plan"); + /* + * The scan key of the inner plan might depend on the current + * outer tuple (e.g. in index scans), that's why we pass our + * expr context. + */ + ExecReScan(innerPlan, econtext, parent); + + ENL1_printf("getting new inner tuple"); + + innerTupleSlot = ExecProcNode(innerPlan, (Plan*)node); + econtext->ecxt_innertuple = innerTupleSlot; + + if (TupIsNull(innerTupleSlot)) { + ENL1_printf("couldn't get inner tuple - need new outer tuple"); + } else { + ENL1_printf("got inner and outer tuples"); + needNewOuterTuple = false; + } + } /* while (needNewOuterTuple) */ + + /* ---------------- + * at this point we have a new pair of inner and outer + * tuples so we test the inner and outer tuples to see + * if they satisify the node's qualification. + * ---------------- + */ + ENL1_printf("testing qualification"); + qualResult = ExecQual((List*)qual, econtext); + + if (qualResult) { + /* ---------------- + * qualification was satisified so we project and + * return the slot containing the result tuple + * using ExecProject(). + * ---------------- + */ + ProjectionInfo *projInfo; + TupleTableSlot *result; + bool isDone; + + ENL1_printf("qualification succeeded, projecting tuple"); + + projInfo = nlstate->jstate.cs_ProjInfo; + result = ExecProject(projInfo, &isDone); + nlstate->jstate.cs_TupFromTlist = !isDone; + return result; + } + + /* ---------------- + * qualification failed so we have to try again.. + * ---------------- + */ + ENL1_printf("qualification failed, looping"); + } +} + +/* ---------------------------------------------------------------- + * ExecInitNestLoop + * + * Creates the run-time state information for the nestloop node + * produced by the planner and initailizes inner and outer relations + * (child nodes). + * ---------------------------------------------------------------- + */ +bool +ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent) +{ + NestLoopState *nlstate; + + NL1_printf("ExecInitNestLoop: %s\n", + "initializing node"); + + /* ---------------- + * assign execution state to node + * ---------------- + */ + node->join.state = estate; + + /* ---------------- + * create new nest loop state + * ---------------- + */ + nlstate = makeNode(NestLoopState); + nlstate->nl_PortalFlag = false; + node->nlstate = nlstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &nlstate->jstate, parent); + ExecAssignExprContext(estate, &nlstate->jstate); + +#define NESTLOOP_NSLOTS 1 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &nlstate->jstate); + + /* ---------------- + * now initialize children + * ---------------- + */ + ExecInitNode(outerPlan((Plan*)node), estate, (Plan*)node); + ExecInitNode(innerPlan((Plan*)node), estate, (Plan*)node); + + /* ---------------- + * initialize tuple type and projection info + * ---------------- + */ + ExecAssignResultTypeFromTL((Plan *) node, &nlstate->jstate); + ExecAssignProjectionInfo((Plan *) node, &nlstate->jstate); + + /* ---------------- + * finally, wipe the current outer tuple clean. + * ---------------- + */ + nlstate->jstate.cs_OuterTupleSlot = NULL; + nlstate->jstate.cs_TupFromTlist = false; + + NL1_printf("ExecInitNestLoop: %s\n", + "node initialized"); + return TRUE; +} + +int +ExecCountSlotsNestLoop(NestLoop *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + NESTLOOP_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndNestLoop + * + * closes down scans and frees allocated storage + * ---------------------------------------------------------------- + */ +void +ExecEndNestLoop(NestLoop *node) +{ + NestLoopState *nlstate; + + NL1_printf("ExecEndNestLoop: %s\n", + "ending node processing"); + + /* ---------------- + * get info from the node + * ---------------- + */ + nlstate = node->nlstate; + + /* ---------------- + * Free the projection info + * + * Note: we don't ExecFreeResultType(nlstate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&nlstate->jstate); + + /* ---------------- + * close down subplans + * ---------------- + */ + ExecEndNode(outerPlan((Plan *) node), (Plan*)node); + ExecEndNode(innerPlan((Plan *) node), (Plan*)node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(nlstate->jstate.cs_ResultTupleSlot); + + NL1_printf("ExecEndNestLoop: %s\n", + "node processing ended"); +} diff --git a/src/backend/executor/nodeNestloop.h b/src/backend/executor/nodeNestloop.h new file mode 100644 index 00000000000..c227c90a735 --- /dev/null +++ b/src/backend/executor/nodeNestloop.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * nodeNestloop.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeNestloop.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODENESTLOOP_H +#define NODENESTLOOP_H + +extern TupleTableSlot *ExecNestLoop(NestLoop *node, Plan *parent); +extern bool ExecInitNestLoop(NestLoop *node, EState *estate, Plan *parent); +extern int ExecCountSlotsNestLoop(NestLoop *node); +extern void ExecEndNestLoop(NestLoop *node); + +#endif /* NODENESTLOOP_H */ diff --git a/src/backend/executor/nodeResult.c b/src/backend/executor/nodeResult.c new file mode 100644 index 00000000000..793119244db --- /dev/null +++ b/src/backend/executor/nodeResult.c @@ -0,0 +1,288 @@ +/*------------------------------------------------------------------------- + * + * nodeResult.c-- + * support for constant nodes needing special code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * DESCRIPTION + * + * Example: in constant queries where no relations are scanned, + * the planner generates result nodes. Examples of such queries are: + * + * retrieve (x = 1) + * and + * append emp (name = "mike", salary = 15000) + * + * Result nodes are also used to optimise queries + * with tautological qualifications like: + * + * retrieve (emp.all) where 2 > 1 + * + * In this case, the plan generated is + * + * Result (with 2 > 1 qual) + * / + * SeqScan (emp.all) + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeResult.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#include "executor/executor.h" +#include "executor/nodeResult.h" + +/* ---------------------------------------------------------------- + * ExecResult(node) + * + * returns the tuples from the outer plan which satisify the + * qualification clause. Since result nodes with right + * subtrees are never planned, we ignore the right subtree + * entirely (for now).. -cim 10/7/89 + * + * The qualification containing only constant clauses are + * checked first before any processing is done. It always returns + * 'nil' if the constant qualification is not satisfied. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecResult(Result *node) +{ + ResultState *resstate; + TupleTableSlot *outerTupleSlot; + TupleTableSlot *resultSlot; + Plan *outerPlan; + ExprContext *econtext; + Node *qual; + bool qualResult; + bool isDone; + ProjectionInfo *projInfo; + + /* ---------------- + * initialize the result node's state + * ---------------- + */ + resstate = node->resstate; + + /* ---------------- + * get the expression context + * ---------------- + */ + econtext = resstate->cstate.cs_ExprContext; + + /* ---------------- + * check tautological qualifications like (2 > 1) + * ---------------- + */ + qual = node->resconstantqual; + if (qual != NULL) { + qualResult = ExecQual((List*)qual, econtext); + /* ---------------- + * if we failed the constant qual, then there + * is no need to continue processing because regardless of + * what happens, the constant qual will be false.. + * ---------------- + */ + if (qualResult == false) + return NULL; + + /* ---------------- + * our constant qualification succeeded so now we + * throw away the qual because we know it will always + * succeed. + * ---------------- + */ + node->resconstantqual = NULL; + } + + if (resstate->cstate.cs_TupFromTlist) { + ProjectionInfo *projInfo; + + projInfo = resstate->cstate.cs_ProjInfo; + resultSlot = ExecProject(projInfo, &isDone); + if (!isDone) + return resultSlot; + } + + /* ---------------- + * retrieve a tuple that satisfy the qual from the outer plan until + * there are no more. + * + * if rs_done is 1 then it means that we were asked to return + * a constant tuple and we alread did the last time ExecResult() + * was called, so now we are through. + * ---------------- + */ + outerPlan = outerPlan(node); + + while (!resstate->rs_done) { + + /* ---------------- + * get next outer tuple if necessary. + * ---------------- + */ + if (outerPlan != NULL) { + outerTupleSlot = ExecProcNode(outerPlan, (Plan*)node); + + if (TupIsNull(outerTupleSlot)) + return NULL; + + resstate->cstate.cs_OuterTupleSlot = outerTupleSlot; + } else { + + /* ---------------- + * if we don't have an outer plan, then it's probably + * the case that we are doing a retrieve or an append + * with a constant target list, so we should only return + * the constant tuple once or never if we fail the qual. + * ---------------- + */ + resstate->rs_done = 1; + } + + /* ---------------- + * get the information to place into the expr context + * ---------------- + */ + resstate = node->resstate; + + outerTupleSlot = resstate->cstate.cs_OuterTupleSlot; + + /* ---------------- + * fill in the information in the expression context + * XXX gross hack. use outer tuple as scan tuple + * ---------------- + */ + econtext->ecxt_outertuple = outerTupleSlot; + econtext->ecxt_scantuple = outerTupleSlot; + + /* ---------------- + * form the result tuple and pass it back using ExecProject() + * ---------------- + */ + projInfo = resstate->cstate.cs_ProjInfo; + resultSlot = ExecProject(projInfo, &isDone); + resstate->cstate.cs_TupFromTlist = !isDone; + return resultSlot; + } + + return NULL; +} + +/* ---------------------------------------------------------------- + * ExecInitResult + * + * Creates the run-time state information for the result node + * produced by the planner and initailizes outer relations + * (child nodes). + * ---------------------------------------------------------------- + */ +bool +ExecInitResult(Result *node, EState *estate, Plan *parent) +{ + ResultState *resstate; + + /* ---------------- + * assign execution state to node + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create new ResultState for node + * ---------------- + */ + resstate = makeNode(ResultState); + resstate->rs_done = 0; + node->resstate = resstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &resstate->cstate, parent); + ExecAssignExprContext(estate, &resstate->cstate); + +#define RESULT_NSLOTS 1 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &resstate->cstate); + + /* ---------------- + * then initialize children + * ---------------- + */ + ExecInitNode(outerPlan(node), estate, (Plan*)node); + + /* + * we don't use inner plan + */ + Assert(innerPlan(node)==NULL); + + /* ---------------- + * initialize tuple type and projection info + * ---------------- + */ + ExecAssignResultTypeFromTL((Plan*)node, &resstate->cstate); + ExecAssignProjectionInfo((Plan*)node, &resstate->cstate); + + /* ---------------- + * set "are we done yet" to false + * ---------------- + */ + resstate->rs_done = 0; + + return TRUE; +} + +int +ExecCountSlotsResult(Result *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + RESULT_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndResult + * + * fees up storage allocated through C routines + * ---------------------------------------------------------------- + */ +void +ExecEndResult(Result *node) +{ + ResultState *resstate; + + resstate = node->resstate; + + /* ---------------- + * Free the projection info + * + * Note: we don't ExecFreeResultType(resstate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&resstate->cstate); + + /* ---------------- + * shut down subplans + * ---------------- + */ + ExecEndNode(outerPlan(node), (Plan*)node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(resstate->cstate.cs_ResultTupleSlot); +} diff --git a/src/backend/executor/nodeResult.h b/src/backend/executor/nodeResult.h new file mode 100644 index 00000000000..a2ab286c089 --- /dev/null +++ b/src/backend/executor/nodeResult.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * nodeResult.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeResult.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODERESULT_H +#define NODERESULT_H + +extern TupleTableSlot *ExecResult(Result *node); +extern bool ExecInitResult(Result *node, EState *estate, Plan *parent); +extern int ExecCountSlotsResult(Result *node); +extern void ExecEndResult(Result *node); + +#endif /* NODERESULT_H */ diff --git a/src/backend/executor/nodeSeqscan.c b/src/backend/executor/nodeSeqscan.c new file mode 100644 index 00000000000..a0e434c4bb0 --- /dev/null +++ b/src/backend/executor/nodeSeqscan.c @@ -0,0 +1,449 @@ +/*------------------------------------------------------------------------- + * + * nodeSeqscan.c-- + * Support routines for sequential scans of relations. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeSeqscan.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecSeqScan sequentially scans a relation. + * ExecSeqNext retrieve next tuple in sequential order. + * ExecInitSeqScan creates and initializes a seqscan node. + * ExecEndSeqScan releases any storage allocated. + * ExecSeqReScan rescans the relation + * ExecMarkPos marks scan position + * ExecRestrPos restores scan position + * + */ +#include "executor/executor.h" +#include "executor/nodeSeqscan.h" +#include "parser/parsetree.h" + +/* ---------------------------------------------------------------- + * Scan Support + * ---------------------------------------------------------------- + */ +/* ---------------------------------------------------------------- + * SeqNext + * + * This is a workhorse for ExecSeqScan + * ---------------------------------------------------------------- + */ +TupleTableSlot * +SeqNext(SeqScan *node) +{ + HeapTuple tuple; + HeapScanDesc scandesc; + CommonScanState *scanstate; + EState *estate; + ScanDirection direction; + TupleTableSlot *slot; + Buffer buffer; + + /* ---------------- + * get information from the estate and scan state + * ---------------- + */ + estate = node->plan.state; + scanstate = node->scanstate; + scandesc = scanstate->css_currentScanDesc; + direction = estate->es_direction; + + /* ---------------- + * get the next tuple from the access methods + * ---------------- + */ + tuple = heap_getnext(scandesc, /* scan desc */ + ScanDirectionIsBackward(direction), /*backward flag*/ + &buffer); /* return: buffer */ + + /* ---------------- + * save the tuple and the buffer returned to us by the access methods + * in our scan tuple slot and return the slot. Note: we pass 'false' + * because tuples returned by heap_getnext() are pointers onto + * disk pages and were not created with palloc() and so should not + * be pfree()'d. + * ---------------- + */ + slot = scanstate->css_ScanTupleSlot; + + slot = ExecStoreTuple(tuple, /* tuple to store */ + slot, /* slot to store in */ + buffer, /* buffer associated with this tuple */ + false); /* don't pfree this pointer */ + + /* ---------------- + * XXX -- mao says: The sequential scan for heap relations will + * automatically unpin the buffer this tuple is on when we cross + * a page boundary. The clearslot code also does this. We bump + * the pin count on the page here, since we actually have two + * pointers to it -- one in the scan desc and one in the tuple + * table slot. --mar 20 91 + * ---------------- + */ + ExecIncrSlotBufferRefcnt(slot); + + return slot; +} + +/* ---------------------------------------------------------------- + * ExecSeqScan(node) + * + * Scans the relation sequentially and returns the next qualifying + * tuple. + * It calls the ExecScan() routine and passes it the access method + * which retrieve tuples sequentially. + * + */ + +TupleTableSlot * +ExecSeqScan(SeqScan *node) +{ + TupleTableSlot *slot; + Plan *outerPlan; + +S_printf("ExecSeqScan: scanning node: "); S_nodeDisplay(node); + + /* ---------------- + * if there is an outer subplan, get a tuple from it + * else, scan the relation + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + if (outerPlan) { + slot = ExecProcNode(outerPlan, (Plan*) node); + } else { + slot = ExecScan(node, SeqNext); + } + +S1_printf("ExecSeqScan: returned tuple slot: %d\n", slot); + + return slot; +} + +/* ---------------------------------------------------------------- + * InitScanRelation + * + * This does the initialization for scan relations and + * subplans of scans. + * ---------------------------------------------------------------- + */ +Oid +InitScanRelation(SeqScan *node, EState *estate, + CommonScanState *scanstate, Plan *outerPlan) +{ + Index relid; + List *rangeTable; + RangeTblEntry *rtentry; + Oid reloid; + TimeQual timeQual; + ScanDirection direction; + Relation currentRelation; + HeapScanDesc currentScanDesc; + RelationInfo *resultRelationInfo; + + if (outerPlan == NULL) { + /* ---------------- + * if the outer node is nil then we are doing a simple + * sequential scan of a relation... + * + * get the relation object id from the relid'th entry + * in the range table, open that relation and initialize + * the scan state... + * ---------------- + */ + relid = node->scanrelid; + rangeTable = estate->es_range_table; + rtentry = rt_fetch(relid, rangeTable); + reloid = rtentry->relid; + timeQual = rtentry->timeQual; + direction = estate->es_direction; + resultRelationInfo = estate->es_result_relation_info; + + ExecOpenScanR(reloid, /* relation */ + 0, /* nkeys */ + NULL, /* scan key */ + 0, /* is index */ + direction, /* scan direction */ + timeQual, /* time qual */ + ¤tRelation, /* return: rel desc */ + (Pointer *) ¤tScanDesc); /* return: scan desc */ + + scanstate->css_currentRelation = currentRelation; + scanstate->css_currentScanDesc = currentScanDesc; + + ExecAssignScanType(scanstate, + RelationGetTupleDescriptor(currentRelation)); + } else { + /* ---------------- + * otherwise we are scanning tuples from the + * outer subplan so we initialize the outer plan + * and nullify + * ---------------- + */ + ExecInitNode(outerPlan, estate, (Plan*)node); + + node->scanrelid = 0; + scanstate->css_currentRelation = NULL; + scanstate->css_currentScanDesc = NULL; + ExecAssignScanType(scanstate, NULL); + reloid = InvalidOid; + } + + /* ---------------- + * return the relation + * ---------------- + */ + return reloid; +} + + +/* ---------------------------------------------------------------- + * ExecInitSeqScan + * + * old comments + * Creates the run-time state information for the seqscan node + * and sets the relation id to contain relevant descriptors. + * + * If there is a outer subtree (sort), the outer subtree + * is initialized and the relation id is set to the descriptors + * returned by the subtree. + * ---------------------------------------------------------------- + */ +bool +ExecInitSeqScan(SeqScan *node, EState *estate, Plan *parent) +{ + CommonScanState *scanstate; + Plan *outerPlan; + Oid reloid; + HeapScanDesc scandesc; + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create new CommonScanState for node + * ---------------- + */ + scanstate = makeNode(CommonScanState); + node->scanstate = scanstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &scanstate->cstate, parent); + ExecAssignExprContext(estate, &scanstate->cstate); + +#define SEQSCAN_NSLOTS 3 + /* ---------------- + * tuple table initialization + * ---------------- + */ + ExecInitResultTupleSlot(estate, &scanstate->cstate); + ExecInitScanTupleSlot(estate, scanstate); + + /* ---------------- + * initialize scan relation or outer subplan + * ---------------- + */ + outerPlan = outerPlan((Plan *)node); + + reloid = InitScanRelation(node, estate, scanstate, outerPlan); + + scandesc = scanstate->css_currentScanDesc; + scanstate->cstate.cs_TupFromTlist = false; + + /* ---------------- + * initialize tuple type + * ---------------- + */ + ExecAssignResultTypeFromTL((Plan*)node, &scanstate->cstate); + ExecAssignProjectionInfo((Plan*)node, &scanstate->cstate); + + return TRUE; +} + +int +ExecCountSlotsSeqScan(SeqScan *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + SEQSCAN_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndSeqScan + * + * frees any storage allocated through C routines. + *| ...and also closes relations and/or shuts down outer subplan + *| -cim 8/14/89 + * ---------------------------------------------------------------- + */ +void +ExecEndSeqScan(SeqScan *node) +{ + CommonScanState *scanstate; + Plan *outerPlan; + + /* ---------------- + * get information from node + * ---------------- + */ + scanstate = node->scanstate; + + /* ---------------- + * Free the projection info and the scan attribute info + * + * Note: we don't ExecFreeResultType(scanstate) + * because the rule manager depends on the tupType + * returned by ExecMain(). So for now, this + * is freed at end-transaction time. -cim 6/2/91 + * ---------------- + */ + ExecFreeProjectionInfo(&scanstate->cstate); + + /* ---------------- + * close scan relation + * ---------------- + */ + ExecCloseR((Plan*) node); + + /* ---------------- + * clean up outer subtree (does nothing if there is no outerPlan) + * ---------------- + */ + outerPlan = outerPlan((Plan *)node); + ExecEndNode(outerPlan, (Plan*)node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(scanstate->cstate.cs_ResultTupleSlot); + ExecClearTuple(scanstate->css_ScanTupleSlot); +} + +/* ---------------------------------------------------------------- + * Join Support + * ---------------------------------------------------------------- + */ +/* ---------------------------------------------------------------- + * ExecSeqReScan + * + * Rescans the relation. + * ---------------------------------------------------------------- + */ +void +ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan* parent) +{ + CommonScanState *scanstate; + EState *estate; + Plan *outerPlan; + Relation rdesc; + HeapScanDesc sdesc; + ScanDirection direction; + + scanstate = node->scanstate; + estate = node->plan.state; + + outerPlan = outerPlan((Plan*)node); + if (outerPlan) { + /* we are scanning a subplan */ + outerPlan = outerPlan((Plan *)node); + ExecReScan(outerPlan, exprCtxt, parent); + } else { + /* otherwise, we are scanning a relation */ + rdesc = scanstate->css_currentRelation; + sdesc = scanstate->css_currentScanDesc; + direction = estate->es_direction; + sdesc = ExecReScanR(rdesc, sdesc, direction, 0, NULL); + scanstate->css_currentScanDesc = sdesc; + } +} + +/* ---------------------------------------------------------------- + * ExecSeqMarkPos(node) + * + * Marks scan position. + * ---------------------------------------------------------------- + */ +void +ExecSeqMarkPos(SeqScan *node) +{ + CommonScanState *scanstate; + Plan *outerPlan; + HeapScanDesc sdesc; + + scanstate = node->scanstate; + + /* ---------------- + * if we are scanning a subplan then propagate + * the ExecMarkPos() request to the subplan + * ---------------- + */ + outerPlan = outerPlan((Plan*)node); + if (outerPlan) { + ExecMarkPos(outerPlan); + return; + } + + /* ---------------- + * otherwise we are scanning a relation so mark the + * position using the access methods.. + * + * ---------------- + */ + sdesc = scanstate->css_currentScanDesc; + heap_markpos(sdesc); + + return; +} + +/* ---------------------------------------------------------------- + * ExecSeqRestrPos + * + * Restores scan position. + * ---------------------------------------------------------------- + */ +void +ExecSeqRestrPos(SeqScan *node) +{ + CommonScanState *scanstate; + Plan *outerPlan; + HeapScanDesc sdesc; + + scanstate = node->scanstate; + + /* ---------------- + * if we are scanning a subplan then propagate + * the ExecRestrPos() request to the subplan + * ---------------- + */ + outerPlan = outerPlan((Plan*)node); + if (outerPlan) { + ExecRestrPos(outerPlan); + return; + } + + /* ---------------- + * otherwise we are scanning a relation so restore the + * position using the access methods.. + * ---------------- + */ + sdesc = scanstate->css_currentScanDesc; + heap_restrpos(sdesc); +} diff --git a/src/backend/executor/nodeSeqscan.h b/src/backend/executor/nodeSeqscan.h new file mode 100644 index 00000000000..cce029d40b7 --- /dev/null +++ b/src/backend/executor/nodeSeqscan.h @@ -0,0 +1,27 @@ +/*------------------------------------------------------------------------- + * + * nodeSeqscan.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeSeqscan.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODESEQSCAN_H +#define NODESEQSCAN_H + +extern TupleTableSlot *SeqNext(SeqScan *node); +extern TupleTableSlot *ExecSeqScan(SeqScan *node); +extern Oid InitScanRelation(SeqScan *node, EState *estate, + CommonScanState *scanstate, Plan *outerPlan); +extern bool ExecInitSeqScan(SeqScan *node, EState *estate, Plan *parent); +extern int ExecCountSlotsSeqScan(SeqScan *node); +extern void ExecEndSeqScan(SeqScan *node); +extern void ExecSeqReScan(SeqScan *node, ExprContext *exprCtxt, Plan* parent); +extern void ExecSeqMarkPos(SeqScan *node); +extern void ExecSeqRestrPos(SeqScan *node); + +#endif /* NODESEQSCAN_H */ diff --git a/src/backend/executor/nodeSort.c b/src/backend/executor/nodeSort.c new file mode 100644 index 00000000000..9a7f4f9456f --- /dev/null +++ b/src/backend/executor/nodeSort.c @@ -0,0 +1,523 @@ +/*------------------------------------------------------------------------- + * + * nodeSort.c-- + * Routines to handle sorting of relations into temporaries. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeSort.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "executor/executor.h" +#include "executor/nodeSort.h" +#include "utils/palloc.h" +#include "utils/psort.h" +#include "catalog/catalog.h" +#include "storage/bufmgr.h" +#include "optimizer/internal.h" /* for _TEMP_RELATION_ID_ */ + +/* ---------------------------------------------------------------- + * FormSortKeys(node) + * + * Forms the structure containing information used to sort the relation. + * + * Returns an array of ScanKeyData. + * ---------------------------------------------------------------- + */ +static ScanKey +FormSortKeys(Sort *sortnode) +{ + ScanKey sortkeys; + List *targetList; + List *tl; + int keycount; + Resdom *resdom; + AttrNumber resno; + Index reskey; + Oid reskeyop; + + /* ---------------- + * get information from the node + * ---------------- + */ + targetList = sortnode->plan.targetlist; + keycount = sortnode->keycount; + + /* ---------------- + * first allocate space for scan keys + * ---------------- + */ + if (keycount <= 0) + elog(WARN, "FormSortKeys: keycount <= 0"); + sortkeys = (ScanKey) palloc(keycount * sizeof(ScanKeyData)); + + /* ---------------- + * form each scan key from the resdom info in the target list + * ---------------- + */ + foreach(tl, targetList) { + TargetEntry *target = (TargetEntry *)lfirst(tl); + resdom = target->resdom; + resno = resdom->resno; + reskey = resdom->reskey; + reskeyop = resdom->reskeyop; + + if (reskey > 0) { + ScanKeyEntryInitialize(&sortkeys[reskey-1], + 0, + resno, + (RegProcedure) DatumGetInt32(reskeyop), + (Datum) 0); + } + } + + return sortkeys; +} + +/* ---------------------------------------------------------------- + * ExecSort + * + * old comments + * Retrieves tuples fron the outer subtree and insert them into a + * temporary relation. The temporary relation is then sorted and + * the sorted relation is stored in the relation whose ID is indicated + * in the 'tempid' field of this node. + * Assumes that heap access method is used. + * + * Conditions: + * -- none. + * + * Initial States: + * -- the outer child is prepared to return the first tuple. + * ---------------------------------------------------------------- + */ +TupleTableSlot * +ExecSort(Sort *node) +{ + EState *estate; + SortState *sortstate; + Plan *outerNode; + ScanDirection dir; + int keycount; + ScanKey sortkeys; + Relation tempRelation; + Relation currentRelation; + HeapScanDesc currentScanDesc; + HeapTuple heapTuple; + TupleTableSlot *slot; + Buffer buffer; + int tupCount = 0; + + /* ---------------- + * get state info from node + * ---------------- + */ + SO1_printf("ExecSort: %s\n", + "entering routine"); + + sortstate = node->sortstate; + estate = node->plan.state; + dir = estate->es_direction; + + /* ---------------- + * the first time we call this, we retrieve all tuples + * from the subplan into a temporary relation and then + * we sort the relation. Subsequent calls return tuples + * from the temporary relation. + * ---------------- + */ + + if (sortstate->sort_Flag == false) { + SO1_printf("ExecSort: %s\n", + "sortstate == false -> sorting subplan"); + /* ---------------- + * set all relations to be scanned in the forward direction + * while creating the temporary relation. + * ---------------- + */ + estate->es_direction = EXEC_FRWD; + + /* ---------------- + * if we couldn't create the temp or current relations then + * we print a warning and return NULL. + * ---------------- + */ + tempRelation = sortstate->sort_TempRelation; + if (tempRelation == NULL) { + elog(DEBUG, "ExecSort: temp relation is NULL! aborting..."); + return NULL; + } + + currentRelation = sortstate->csstate.css_currentRelation; + if (currentRelation == NULL) { + elog(DEBUG, "ExecSort: current relation is NULL! aborting..."); + return NULL; + } + + /* ---------------- + * retrieve tuples from the subplan and + * insert them in the temporary relation + * ---------------- + */ + outerNode = outerPlan((Plan *) node); + SO1_printf("ExecSort: %s\n", + "inserting tuples into tempRelation"); + + for (;;) { + slot = ExecProcNode(outerNode, (Plan*)node); + + if (TupIsNull(slot)) + break; + + tupCount++; + + heapTuple = slot->val; + + heap_insert(tempRelation, /* relation desc */ + heapTuple); /* heap tuple to insert */ + + ExecClearTuple(slot); + } + + /* ---------------- + * now sort the tuples in our temporary relation + * into a new sorted relation using psort() + * + * psort() seems to require that the relations + * are created and opened in advance. + * -cim 1/25/90 + * ---------------- + */ + keycount = node->keycount; + sortkeys = (ScanKey)sortstate->sort_Keys; + SO1_printf("ExecSort: %s\n", + "calling psort"); + + /* + * If no tuples were fetched from the proc node return NULL now + * psort dumps it if 0 tuples are in the relation and I don't want + * to try to debug *that* routine!! + */ + if (tupCount == 0) + return NULL; + + psort(tempRelation, /* old relation */ + currentRelation, /* new relation */ + keycount, /* number keys */ + sortkeys); /* keys */ + + if (currentRelation == NULL) { + elog(DEBUG, "ExecSort: sorted relation is NULL! aborting..."); + return NULL; + } + + /* ---------------- + * restore to user specified direction + * ---------------- + */ + estate->es_direction = dir; + + /* ---------------- + * now initialize the scan descriptor to scan the + * sorted relation and update the sortstate information + * ---------------- + */ + currentScanDesc = heap_beginscan(currentRelation, /* relation */ + ScanDirectionIsBackward(dir), + /* bkwd flag */ + NowTimeQual, /* time qual */ + 0, /* num scan keys */ + NULL); /* scan keys */ + + sortstate->csstate.css_currentRelation = currentRelation; + sortstate->csstate.css_currentScanDesc = currentScanDesc; + + /* ---------------- + * make sure the tuple descriptor is up to date + * ---------------- + */ + slot = sortstate->csstate.css_ScanTupleSlot; + + slot->ttc_tupleDescriptor = + RelationGetTupleDescriptor(currentRelation); + + /* ---------------- + * finally set the sorted flag to true + * ---------------- + */ + sortstate->sort_Flag = true; + } + else { + slot = sortstate->csstate.css_ScanTupleSlot; + } + + SO1_printf("ExecSort: %s\n", + "retrieveing tuple from sorted relation"); + + /* ---------------- + * at this point we know we have a sorted relation so + * we preform a simple scan on it with amgetnext().. + * ---------------- + */ + currentScanDesc = sortstate->csstate.css_currentScanDesc; + + heapTuple = heap_getnext(currentScanDesc, /* scan desc */ + ScanDirectionIsBackward(dir), + /* bkwd flag */ + &buffer); /* return: buffer */ + + /* Increase the pin count on the buffer page, because the tuple stored in + the slot also points to it (as well as the scan descriptor). If we + don't, ExecStoreTuple will decrease the pin count on the next iteration. + - 01/09/93 */ + + if (buffer != InvalidBuffer) + IncrBufferRefCount(buffer); + + return ExecStoreTuple(heapTuple, /* tuple to store */ + slot, /* slot to store in */ + buffer, /* this tuple's buffer */ + false); /* don't free stuff from amgetnext */ +} + +/* ---------------------------------------------------------------- + * ExecInitSort + * + * old comments + * Creates the run-time state information for the sort node + * produced by the planner and initailizes its outer subtree. + * ---------------------------------------------------------------- + */ +bool +ExecInitSort(Sort *node, EState *estate, Plan *parent) +{ + SortState *sortstate; + Plan *outerPlan; + ScanKey sortkeys; + TupleDesc tupType; + Oid tempOid; + Oid sortOid; + Relation tempDesc; + Relation sortedDesc; + + SO1_printf("ExecInitSort: %s\n", + "initializing sort node"); + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create state structure + * ---------------- + */ + sortstate = makeNode(SortState); + sortstate->sort_Flag = 0; + sortstate->sort_Keys = NULL; + sortstate->sort_TempRelation = NULL; + + node->sortstate = sortstate; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks + * + * Sort nodes don't initialize their ExprContexts because + * they never call ExecQual or ExecTargetList. + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &sortstate->csstate.cstate, parent); + +#define SORT_NSLOTS 1 + /* ---------------- + * tuple table initialization + * + * sort nodes only return scan tuples from their sorted + * relation. + * ---------------- + */ + ExecInitScanTupleSlot(estate, &sortstate->csstate); + ExecInitResultTupleSlot(estate, &sortstate->csstate.cstate); + + /* ---------------- + * initializes child nodes + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + ExecInitNode(outerPlan, estate, (Plan *) node); + + /* ---------------- + * initialize sortstate information + * ---------------- + */ + sortkeys = FormSortKeys(node); + sortstate->sort_Keys = sortkeys; + sortstate->sort_Flag = false; + + /* ---------------- + * initialize tuple type. no need to initialize projection + * info because this node doesn't do projections. + * ---------------- + */ + ExecAssignScanTypeFromOuterPlan((Plan *) node, &sortstate->csstate); + sortstate->csstate.cstate.cs_ProjInfo = NULL; + + /* ---------------- + * get type information needed for ExecCreatR + * ---------------- + */ + tupType = ExecGetScanType(&sortstate->csstate); + + /* ---------------- + * ExecCreatR wants its second argument to be an object id of + * a relation in the range table or _TEMP_RELATION_ID_ + * indicating that the relation is not in the range table. + * + * In the second case ExecCreatR creates a temp relation. + * (currently this is the only case we support -cim 10/16/89) + * ---------------- + */ + tempOid = node->tempid; + sortOid = _TEMP_RELATION_ID_; + + /* ---------------- + * create the temporary relations + * ---------------- + */ +/* len = ExecTargetListLength(node->plan.targetlist); */ + tempDesc = ExecCreatR(tupType, tempOid); + sortedDesc = ExecCreatR(tupType, sortOid); + + /* ---------------- + * save the relation descriptor in the sortstate + * ---------------- + */ + sortstate->sort_TempRelation = tempDesc; + sortstate->csstate.css_currentRelation = sortedDesc; + SO1_printf("ExecInitSort: %s\n", + "sort node initialized"); + + /* ---------------- + * return relation oid of temporary sort relation in a list + * (someday -- for now we return LispTrue... cim 10/12/89) + * ---------------- + */ + return TRUE; +} + +int +ExecCountSlotsSort(Sort *node) +{ + return ExecCountSlotsNode(outerPlan((Plan *)node)) + + ExecCountSlotsNode(innerPlan((Plan *)node)) + + SORT_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndSort(node) + * + * old comments + * destroys the temporary relation. + * ---------------------------------------------------------------- + */ +void +ExecEndSort(Sort *node) +{ + SortState *sortstate; + Relation tempRelation; + Relation sortedRelation; + Plan *outerPlan; + + /* ---------------- + * get info from the sort state + * ---------------- + */ + SO1_printf("ExecEndSort: %s\n", + "shutting down sort node"); + + sortstate = node->sortstate; + tempRelation = sortstate->sort_TempRelation; + sortedRelation = sortstate->csstate.css_currentRelation; + + heap_destroyr(tempRelation); + heap_destroyr(sortedRelation); + + + /* ---------------- + * close the sorted relation and shut down the scan. + * ---------------- + */ + ExecCloseR((Plan *) node); + + /* ---------------- + * shut down the subplan + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + ExecEndNode(outerPlan, (Plan*)node); + + /* ---------------- + * clean out the tuple table + * ---------------- + */ + ExecClearTuple(sortstate->csstate.css_ScanTupleSlot); + + SO1_printf("ExecEndSort: %s\n", + "sort node shutdown"); +} + +/* ---------------------------------------------------------------- + * ExecSortMarkPos + * ---------------------------------------------------------------- + */ +void +ExecSortMarkPos(Sort *node) +{ + SortState *sortstate; + HeapScanDesc sdesc; + + /* ---------------- + * if we haven't sorted yet, just return + * ---------------- + */ + sortstate = node->sortstate; + if (sortstate->sort_Flag == false) + return; + + sdesc = sortstate->csstate.css_currentScanDesc; + heap_markpos(sdesc); + return; +} + +/* ---------------------------------------------------------------- + * ExecSortRestrPos + * ---------------------------------------------------------------- + */ +void +ExecSortRestrPos(Sort *node) +{ + SortState *sortstate; + HeapScanDesc sdesc; + + /* ---------------- + * if we haven't sorted yet, just return. + * ---------------- + */ + sortstate = node->sortstate; + if (sortstate->sort_Flag == false) + return; + + /* ---------------- + * restore the scan to the previously marked position + * ---------------- + */ + sdesc = sortstate->csstate.css_currentScanDesc; + heap_restrpos(sdesc); +} diff --git a/src/backend/executor/nodeSort.h b/src/backend/executor/nodeSort.h new file mode 100644 index 00000000000..504b8a1f19e --- /dev/null +++ b/src/backend/executor/nodeSort.h @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * nodeSort.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeSort.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODESORT_H +#define NODESORT_H + +extern TupleTableSlot *ExecSort(Sort *node); +extern bool ExecInitSort(Sort *node, EState *estate, Plan *parent); +extern int ExecCountSlotsSort(Sort *node); +extern void ExecEndSort(Sort *node); +extern void ExecSortMarkPos(Sort *node); +extern void ExecSortRestrPos(Sort *node); + +#endif /* NODESORT_H */ diff --git a/src/backend/executor/nodeTee.c b/src/backend/executor/nodeTee.c new file mode 100644 index 00000000000..5be700b9ae7 --- /dev/null +++ b/src/backend/executor/nodeTee.c @@ -0,0 +1,503 @@ +/*------------------------------------------------------------------------- + * + * nodeTee.c-- + * + * + * Copyright (c) 1994, Regents of the University of California + * + * DESCRIPTION + * This code provides support for a tee node, which allows multiple + * parent in a megaplan. + * + * INTERFACE ROUTINES + * ExecTee + * ExecInitTee + * ExecEndTee + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/Attic/nodeTee.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#include +#include "utils/palloc.h" +#include "utils/relcache.h" +#include "storage/bufmgr.h" /* for IncrBufferRefCount */ +#include "optimizer/internal.h" +#include "executor/executor.h" +#include "executor/nodeTee.h" +#include "catalog/catalog.h" +#include "tcop/pquery.h" + +/* ------------------------------------------------------------------ + * ExecInitTee + * + * Create tee state + * + * ------------------------------------------------------------------ + */ +bool +ExecInitTee(Tee* node, EState *currentEstate, Plan * parent) +{ + TeeState *teeState; + Plan *outerPlan; + int len; + Relation bufferRel; + TupleDesc tupType; + EState *estate; + + /* it is possible that the Tee has already been initialized + since it can be reached by multiple parents. + If it is already initialized, simply return and do + not initialize the children nodes again + */ + if (node->plan.state) + return TRUE; + + /* ---------------- + * assign the node's execution state + * ---------------- + */ + /* make a new executor state, because we have a different + es_range_table */ + +/* node->plan.state = estate;*/ + + estate = CreateExecutorState(); + estate->es_direction = currentEstate->es_direction; + estate->es_BaseId = currentEstate->es_BaseId; + estate->es_BaseId = currentEstate->es_BaseId; + estate->es_tupleTable = currentEstate->es_tupleTable; + estate->es_refcount = currentEstate->es_refcount; + estate->es_junkFilter = currentEstate->es_junkFilter; + + /* use the range table for Tee subplan since the range tables + for the two parents may be different */ + if (node->rtentries) + estate->es_range_table = node->rtentries; + else + estate->es_range_table = currentEstate->es_range_table; + + node->plan.state = estate; + + + /* ---------------- + * create teeState structure + * ---------------- + */ + teeState = makeNode(TeeState); + teeState->tee_leftPlace = 0; + teeState->tee_rightPlace = 0; + teeState->tee_lastPlace = 0; + teeState->tee_bufferRel = NULL; + teeState->tee_leftScanDesc = NULL; + teeState->tee_rightScanDesc = NULL; + + + node->teestate = teeState; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + create expression context for node + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, &(teeState->cstate), parent); + ExecAssignExprContext(estate, &(teeState->cstate)); + +#define TEE_NSLOTS 2 + /* ---------------- + * initialize tuple slots + * ---------------- + */ + ExecInitResultTupleSlot(estate, &(teeState->cstate)); + + /* initialize child nodes */ + outerPlan = outerPlan((Plan*) node); + ExecInitNode(outerPlan, estate, (Plan*) node); + + /* ---------------- + * the tuple type info is from the outer plan of this node + * the result type is also the same as the outerplan + */ + ExecAssignResultTypeFromOuterPlan((Plan*) node, &(teeState->cstate)); + ExecAssignProjectionInfo((Plan*)node, &teeState->cstate); + + /* --------------------------------------- + initialize temporary relation to buffer tuples + */ + tupType = ExecGetResultType(&(teeState->cstate)); + len = ExecTargetListLength(((Plan*)node)->targetlist); + +/* bufferRel = ExecCreatR(len, tupType, _TEMP_RELATION_ID_); */ + + /* create a catalogued relation even though this is a temporary relation */ + /* cleanup of catalogued relations is easier to do */ + + if (node->teeTableName[0] != '\0') { + Relation r; + + teeState->tee_bufferRelname = pstrdup(node->teeTableName); + + /* we are given an tee table name, + if a relation by that name exists, then we open it, + else we create it and then open it */ + r = RelationNameGetRelation(teeState->tee_bufferRelname); + + if (RelationIsValid(r)) + bufferRel = heap_openr(teeState->tee_bufferRelname); + else + bufferRel = heap_open(heap_create(teeState->tee_bufferRelname, +/*FIX */ NULL, + 'n', + DEFAULT_SMGR, + tupType)); + } + else { + sprintf(teeState->tee_bufferRelname, + "ttemp_%d", /* 'ttemp' for 'tee' temporary*/ + newoid()); +/* bufferRel = ExecCreatR(len, tupType, _TEMP_RELATION_ID); */ + bufferRel = heap_open(heap_create(teeState->tee_bufferRelname, + NULL, /*XXX */ + 'n', + DEFAULT_SMGR, + tupType)); + } + + teeState->tee_bufferRel = bufferRel; + + /*initialize a memory context for allocating thing like scan descriptors */ + /* we do this so that on cleanup of the tee, we can free things. + if we didn't have our own memory context, we would be in the memory + context of the portal that we happen to be using at the moment */ + + teeState->tee_mcxt = (MemoryContext)CreateGlobalMemory(teeState->tee_bufferRelname); + + /* don't initialize the scan descriptors here + because it's not good to initialize scan descriptors on empty + rels. Wait until the scan descriptors are needed + before initializing them. */ + + teeState->tee_leftScanDesc = NULL; + teeState->tee_rightScanDesc = NULL; + + return TRUE; +} + +int +ExecCountSlotsTee(Tee *node) +{ + /* Tee nodes can't have innerPlans */ + return ExecCountSlotsNode(outerPlan(node)) + TEE_NSLOTS; +} + +/* ---------------------------------------------------------------- + initTeeScanDescs + initializes the left and right scandescs on the temporary + relation of a Tee node + + must open two separate scan descriptors, + because the left and right scans may be at different points +* ---------------------------------------------------------------- +*/ +void +initTeeScanDescs(Tee* node) +{ + TeeState *teeState; + Relation bufferRel; + ScanDirection dir; + MemoryContext orig; + + teeState = node->teestate; + if (teeState->tee_leftScanDesc && teeState->tee_rightScanDesc) + return; + + orig = CurrentMemoryContext; + MemoryContextSwitchTo(teeState->tee_mcxt); + + bufferRel = teeState->tee_bufferRel; + dir = ((Plan*)node)->state->es_direction; /* backwards not handled yet XXX */ + + if (teeState->tee_leftScanDesc == NULL) + { + teeState->tee_leftScanDesc = heap_beginscan(bufferRel, + ScanDirectionIsBackward(dir), + NowTimeQual, /* time qual */ + 0, /* num scan keys */ + NULL /* scan keys */ + ); + } + if (teeState->tee_rightScanDesc == NULL) + { + teeState->tee_rightScanDesc = heap_beginscan(bufferRel, + ScanDirectionIsBackward(dir), + NowTimeQual, /* time qual */ + 0, /* num scan keys */ + NULL /* scan keys */ + ); + } + + MemoryContextSwitchTo(orig); +} + +/* ---------------------------------------------------------------- + * ExecTee(node) + * + * + * A Tee serves to connect a subplan to multiple parents. + * the subplan is always the outplan of the Tee node. + * + * The Tee gets requests from either leftParent or rightParent, + * fetches the result tuple from the child, and then + * stored the result into a temporary relation (serving as a queue). + * leftPlace and rightPlace keep track of where the left and rightParents + * are. + * If a parent requests a tuple and that parent is not at the end + * of the temporary relation, then the request is satisfied from + * the queue instead of by executing the child plan + * + * ---------------------------------------------------------------- + */ + +TupleTableSlot* +ExecTee(Tee *node, Plan *parent) +{ + EState *estate; + TeeState *teeState; + int leftPlace, rightPlace, lastPlace; + int branch; + TupleTableSlot* result; + TupleTableSlot* slot; + Plan *childNode; + ScanDirection dir; + HeapTuple heapTuple; + Relation bufferRel; + HeapScanDesc scanDesc; + Buffer buffer; + + estate = ((Plan*)node)->state; + teeState = node->teestate; + leftPlace = teeState->tee_leftPlace; + rightPlace = teeState->tee_rightPlace; + lastPlace = teeState->tee_lastPlace; + bufferRel = teeState->tee_bufferRel; + + childNode = outerPlan(node); + + dir = estate->es_direction; + + /* XXX doesn't handle backwards direction yet */ + + if (parent == node->leftParent) { + branch = leftPlace; + } + else + if ( (parent == node->rightParent) || (parent == (Plan*) node)) + /* the tee node could be the root node of the plan, + in which case, we treat it like a right-parent pull*/ + { + branch = rightPlace; + } + else + { + elog(WARN,"A Tee node can only be executed from its left or right parent\n"); + return NULL; + } + + if (branch == lastPlace) + { /* we're at the end of the queue already, + - get a new tuple from the child plan, + - store it in the queue, + - increment lastPlace, + - increment leftPlace or rightPlace as appropriate, + - and return result + */ + slot = ExecProcNode(childNode, (Plan*)node); + if (!TupIsNull(slot)) + { + heapTuple = slot->val; + + /* insert into temporary relation */ + heap_insert(bufferRel, heapTuple); + + /* once there is data in the temporary relation, + ensure that the left and right scandescs are initialized */ + initTeeScanDescs(node); + + scanDesc = (parent == node->leftParent) ? + teeState->tee_leftScanDesc : teeState->tee_rightScanDesc; + + { + /* move the scandesc forward so we don't re-read this tuple later */ + HeapTuple throwAway; + /* Buffer buffer;*/ + throwAway = heap_getnext(scanDesc, + ScanDirectionIsBackward(dir), + /* &buffer */ + (Buffer*)NULL); + } + + /* set the shouldFree field of the child's slot so that + when the child's slot is free'd, this tuple isn't free'd also */ + /* does this mean this tuple has to be garbage collected later??*/ + slot->ttc_shouldFree = false; + + teeState->tee_lastPlace = lastPlace + 1; + } + result = slot; + } + else + {/* the desired data already exists in the temporary relation */ + scanDesc = (parent == node->leftParent) ? + teeState->tee_leftScanDesc : teeState->tee_rightScanDesc; + + heapTuple = heap_getnext(scanDesc, + ScanDirectionIsBackward(dir), + &buffer); + + /* Increase the pin count on the buffer page, because the + tuple stored in the slot also points to it (as well as + the scan descriptor). If we don't, ExecStoreTuple will + decrease the pin count on the next iteration. */ + + if (buffer != InvalidBuffer) + IncrBufferRefCount(buffer); + + slot = teeState->cstate.cs_ResultTupleSlot; + slot->ttc_tupleDescriptor = RelationGetTupleDescriptor(bufferRel); + + result = ExecStoreTuple(heapTuple,/* tuple to store */ + slot, /* slot to store in */ + buffer,/* this tuple's buffer */ + false); /* don't free stuff from heap_getnext */ + + } + + if (parent == node->leftParent) + { + teeState->tee_leftPlace = leftPlace+1; + } + else + { + teeState->tee_rightPlace = rightPlace+1; + } + + return result; +} + +/* ---------------------------------------------------------------- + * ExecTeeReScan(node) + * + * Rescans the relation. + * ---------------------------------------------------------------- + */ +void +ExecTeeReScan(Tee *node, ExprContext *exprCtxt, Plan *parent) +{ + + EState *estate; + TeeState *teeState; + ScanDirection dir; + + estate = ((Plan*)node)->state; + teeState = node->teestate; + + dir = estate->es_direction; + + /* XXX doesn't handle backwards direction yet */ + + if (parent == node->leftParent) { + if (teeState->tee_leftScanDesc) + { + heap_rescan(teeState->tee_leftScanDesc, + ScanDirectionIsBackward(dir), + NULL); + teeState->tee_leftPlace = 0; + } + } + else + { + if (teeState->tee_rightScanDesc) + { + heap_rescan(teeState->tee_leftScanDesc, + ScanDirectionIsBackward(dir), + NULL); + teeState->tee_rightPlace = 0; + } + } +} + + +/* --------------------------------------------------------------------- + * ExecEndTee + * + * End the Tee node, and free up any storage + * since a Tee node can be downstream of multiple parent nodes, + * only free when both parents are done + * -------------------------------------------------------------------- + */ + +void +ExecEndTee(Tee* node, Plan* parent) +{ + EState *estate; + TeeState *teeState; + int leftPlace, rightPlace, lastPlace; + Relation bufferRel; + MemoryContext orig; + + estate = ((Plan*)node)->state; + teeState = node->teestate; + leftPlace = teeState->tee_leftPlace; + rightPlace = teeState->tee_rightPlace; + lastPlace = teeState->tee_lastPlace; + + if (!node->leftParent || parent == node->leftParent) + leftPlace = -1; + + if (!node->rightParent || parent == node->rightParent) + rightPlace = -1; + + if (parent == (Plan*)node) + rightPlace = leftPlace = -1; + + teeState->tee_leftPlace = leftPlace; + teeState->tee_rightPlace = rightPlace; + if ( (leftPlace == -1) && (rightPlace == -1) ) + { + /* remove the temporary relations */ + /* and close the scan descriptors */ + + bufferRel = teeState->tee_bufferRel; + if (bufferRel) { + heap_destroyr(bufferRel); + teeState->tee_bufferRel = NULL; + if (teeState->tee_mcxt) { + orig = CurrentMemoryContext; + MemoryContextSwitchTo(teeState->tee_mcxt); + } + + if (teeState->tee_leftScanDesc) + { + heap_endscan(teeState->tee_leftScanDesc); + teeState->tee_leftScanDesc = NULL; + } + if (teeState->tee_rightScanDesc) + { + heap_endscan(teeState->tee_rightScanDesc); + teeState->tee_rightScanDesc = NULL; + } + + if (teeState->tee_mcxt) { + MemoryContextSwitchTo(orig); + teeState->tee_mcxt = NULL; + } + } + } + +} + diff --git a/src/backend/executor/nodeTee.h b/src/backend/executor/nodeTee.h new file mode 100644 index 00000000000..aa50efdead4 --- /dev/null +++ b/src/backend/executor/nodeTee.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * nodeTee.h-- + * support functions for a Tee executor node + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeTee.h,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#ifndef NODETEE_H +#define NODETEE_H + +extern TupleTableSlot* ExecTee(Tee* node, Plan* parent); +extern bool ExecInitTee(Tee* node, EState* estate, Plan* parent); +extern void ExecTeeReScan(Tee *node, ExprContext *exprCtxt, Plan *parent); +extern void ExecEndTee(Tee* node, Plan* parent); +extern int ExecCountSlotsTee(Tee* node); + +#endif /* NODETEE_H */ diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c new file mode 100644 index 00000000000..8be0bd8497c --- /dev/null +++ b/src/backend/executor/nodeUnique.c @@ -0,0 +1,316 @@ +/*------------------------------------------------------------------------- + * + * nodeUnique.c-- + * Routines to handle unique'ing of queries where appropriate + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/executor/nodeUnique.c,v 1.1.1.1 1996/07/09 06:21:27 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * ExecUnique - generate a unique'd temporary relation + * ExecInitUnique - initialize node and subnodes.. + * ExecEndUnique - shutdown node and subnodes + * + * NOTES + * Assumes tuples returned from subplan arrive in + * sorted order. + * + */ +#include "executor/executor.h" +#include "executor/nodeUnique.h" +#include "optimizer/clauses.h" +#include "access/printtup.h" /* for typtoout() */ +#include "utils/builtins.h" /* for namecpy()*/ + +/* ---------------------------------------------------------------- + * ExecIdenticalTuples + * + * This is a hack function used by ExecUnique to see if + * two tuples are identical. This should be provided + * by the heap tuple code but isn't. The real problem + * is that we assume we can byte compare tuples to determine + * if they are "equal". In fact, if we have user defined + * types there may be problems because it's possible that + * an ADT may have multiple representations with the + * same ADT value. -cim + * ---------------------------------------------------------------- + */ +static bool /* true if tuples are identical, false otherwise */ +ExecIdenticalTuples(TupleTableSlot *t1, TupleTableSlot *t2) +{ + HeapTuple h1; + HeapTuple h2; + char *d1; + char *d2; + int len; + + h1 = t1->val; + h2 = t2->val; + + /* ---------------- + * if tuples aren't the same length then they are + * obviously different (one may have null attributes). + * ---------------- + */ + if (h1->t_len != h2->t_len) + return false; + + /* ---------------- + * if the tuples have different header offsets then + * they are different. This will prevent us from returning + * true when comparing tuples of one attribute where one of + * two we're looking at is null (t_len - t_hoff == 0). + * THE t_len FIELDS CAN BE THE SAME IN THIS CASE!! + * ---------------- + */ + if (h1->t_hoff != h2->t_hoff) + return false; + + /* ---------------- + * ok, now get the pointers to the data and the + * size of the attribute portion of the tuple. + * ---------------- + */ + d1 = (char *) GETSTRUCT(h1); + d2 = (char *) GETSTRUCT(h2); + len = (int) h1->t_len - (int) h1->t_hoff; + + /* ---------------- + * byte compare the data areas and return the result. + * ---------------- + */ + if (memcmp(d1, d2, len) != 0) + return false; + + return true; +} + +/* ---------------------------------------------------------------- + * ExecUnique + * + * This is a very simple node which filters out duplicate + * tuples from a stream of sorted tuples from a subplan. + * + * XXX see comments below regarding freeing tuples. + * ---------------------------------------------------------------- + */ +TupleTableSlot * /* return: a tuple or NULL */ +ExecUnique(Unique *node) +{ + UniqueState *uniquestate; + TupleTableSlot *resultTupleSlot; + TupleTableSlot *slot; + Plan *outerPlan; + char *uniqueAttr; + AttrNumber uniqueAttrNum; + TupleDesc tupDesc; + Oid typoutput; + + /* ---------------- + * get information from the node + * ---------------- + */ + uniquestate = node->uniquestate; + outerPlan = outerPlan((Plan *) node); + resultTupleSlot = uniquestate->cs_ResultTupleSlot; + uniqueAttr = node->uniqueAttr; + uniqueAttrNum = node->uniqueAttrNum; + + if (uniqueAttr) { + tupDesc = ExecGetResultType(uniquestate); + typoutput = typtoout((Oid)tupDesc->attrs[uniqueAttrNum]->atttypid); + } + + /* ---------------- + * now loop, returning only non-duplicate tuples. + * We assume that the tuples arrive in sorted order + * so we can detect duplicates easily. + * ---------------- + */ + for (;;) { + /* ---------------- + * fetch a tuple from the outer subplan + * ---------------- + */ + slot = ExecProcNode(outerPlan, (Plan*)node); + if (TupIsNull(slot)) + return NULL; + + /* ---------------- + * we use the result tuple slot to hold our saved tuples. + * if we haven't a saved tuple to compare our new tuple with, + * then we exit the loop. This new tuple as the saved tuple + * the next time we get here. + * ---------------- + */ + if (TupIsNull(resultTupleSlot)) + break; + + /* ---------------- + * now test if the new tuple and the previous + * tuple match. If so then we loop back and fetch + * another new tuple from the subplan. + * ---------------- + */ + + if (uniqueAttr) { + /* to check equality, we check to see if the typoutput + of the attributes are equal */ + bool isNull1,isNull2; + char *attr1, *attr2; + char *val1, *val2; + + attr1 = heap_getattr(slot->val, InvalidBuffer, + uniqueAttrNum, tupDesc,&isNull1); + attr2 = heap_getattr(resultTupleSlot->val, InvalidBuffer, + uniqueAttrNum, tupDesc,&isNull2); + + if (isNull1 == isNull2) { + if (isNull1) /* both are null, they are equal */ + continue; + val1 = fmgr(typoutput, attr1, gettypelem(tupDesc->attrs[uniqueAttrNum]->atttypid)); + val2 = fmgr(typoutput, attr2, gettypelem(tupDesc->attrs[uniqueAttrNum]->atttypid)); + /* now, val1 and val2 are ascii representations so we can + use strcmp for comparison */ + if (strcmp(val1,val2) == 0) /* they are equal */ + continue; + else + break; + } + else /* one is null and the other isn't, they aren't equal */ + break; + + } + else { + if (! ExecIdenticalTuples(slot, resultTupleSlot)) + break; + } + + } + + /* ---------------- + * we have a new tuple different from the previous saved tuple + * so we save it in the saved tuple slot. We copy the tuple + * so we don't increment the buffer ref count. + * ---------------- + */ + ExecStoreTuple(heap_copytuple(slot->val), + resultTupleSlot, + InvalidBuffer, + true); + + return resultTupleSlot; +} + +/* ---------------------------------------------------------------- + * ExecInitUnique + * + * This initializes the unique node state structures and + * the node's subplan. + * ---------------------------------------------------------------- + */ +bool /* return: initialization status */ +ExecInitUnique(Unique *node, EState *estate, Plan *parent) +{ + UniqueState *uniquestate; + Plan *outerPlan; + char *uniqueAttr; + + /* ---------------- + * assign execution state to node + * ---------------- + */ + node->plan.state = estate; + + /* ---------------- + * create new UniqueState for node + * ---------------- + */ + uniquestate = makeNode(UniqueState); + node->uniquestate = uniquestate; + uniqueAttr = node->uniqueAttr; + + /* ---------------- + * Miscellanious initialization + * + * + assign node's base_id + * + assign debugging hooks and + * + * Unique nodes have no ExprContext initialization because + * they never call ExecQual or ExecTargetList. + * ---------------- + */ + ExecAssignNodeBaseInfo(estate, uniquestate, parent); + +#define UNIQUE_NSLOTS 1 + /* ------------ + * Tuple table initialization + * ------------ + */ + ExecInitResultTupleSlot(estate, uniquestate); + + /* ---------------- + * then initialize outer plan + * ---------------- + */ + outerPlan = outerPlan((Plan *) node); + ExecInitNode(outerPlan, estate, (Plan *) node); + + /* ---------------- + * unique nodes do no projections, so initialize + * projection info for this node appropriately + * ---------------- + */ + ExecAssignResultTypeFromOuterPlan((Plan *)node,uniquestate); + uniquestate->cs_ProjInfo = NULL; + + if (uniqueAttr) { + TupleDesc tupDesc; + int i = 0; + + tupDesc = ExecGetResultType(uniquestate); + /* the parser should have ensured that uniqueAttr is a legal attribute name*/ + while ( strcmp((tupDesc->attrs[i]->attname).data, uniqueAttr) != 0) + i++; + node->uniqueAttrNum = i+1; /* attribute numbers start from 1 */ + } + else + node->uniqueAttrNum = InvalidAttrNumber; + + /* ---------------- + * all done. + * ---------------- + */ + return TRUE; +} + +int +ExecCountSlotsUnique(Unique *node) +{ + return ExecCountSlotsNode(outerPlan(node)) + + ExecCountSlotsNode(innerPlan(node)) + + UNIQUE_NSLOTS; +} + +/* ---------------------------------------------------------------- + * ExecEndUnique + * + * This shuts down the subplan and frees resources allocated + * to this node. + * ---------------------------------------------------------------- + */ +void +ExecEndUnique(Unique *node) +{ + UniqueState *uniquestate; + + uniquestate = node->uniquestate; + ExecEndNode(outerPlan((Plan *) node), (Plan*)node); + ExecClearTuple(uniquestate->cs_ResultTupleSlot); +} diff --git a/src/backend/executor/nodeUnique.h b/src/backend/executor/nodeUnique.h new file mode 100644 index 00000000000..a8dfc9bd6b9 --- /dev/null +++ b/src/backend/executor/nodeUnique.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * nodeUnique.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeUnique.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEUNIQUE_H +#define NODEUNIQUE_H + +extern TupleTableSlot *ExecUnique(Unique *node); +extern bool ExecInitUnique(Unique *node, EState *estate, Plan *parent); +extern int ExecCountSlotsUnique(Unique *node); +extern void ExecEndUnique(Unique *node); + +#endif /* NODEUNIQUE_H */ diff --git a/src/backend/executor/tuptable.h b/src/backend/executor/tuptable.h new file mode 100644 index 00000000000..33f7de33589 --- /dev/null +++ b/src/backend/executor/tuptable.h @@ -0,0 +1,72 @@ +/*------------------------------------------------------------------------- + * + * tuptable.h-- + * tuple table support stuff + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: tuptable.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + * NOTES + * The tuple table interface is getting pretty ugly. + * It should be redesigned soon. + * + *------------------------------------------------------------------------- + */ +#ifndef TUPTABLE_H +#define TUPTABLE_H + +/* ---------------- + * Note: the executor tuple table is managed and manipulated by special + * code and macros in executor/execTuples.c and tupTable.h + * + * TupleTableSlot information + * + * shouldFree boolean - should we call pfree() on tuple + * descIsNew boolean - true when tupleDescriptor changes + * tupleDescriptor type information kept regarding the tuple data + * buffer the buffer for tuples pointing to disk pages + * + * The executor stores pointers to tuples in a ``tuple table'' + * which is composed of TupleTableSlot's. Some of the tuples + * are pointers to buffer pages and others are pointers to + * palloc'ed memory and the shouldFree variable tells us when + * we may call pfree() on a tuple. -cim 9/23/90 + * + * In the implementation of nested-dot queries such as + * "retrieve (EMP.hobbies.all)", a single scan may return tuples + * of many types, so now we return pointers to tuple descriptors + * along with tuples returned via the tuple table. -cim 1/18/90 + * ---------------- + */ +typedef struct TupleTableSlot { + NodeTag type; + HeapTuple val; + bool ttc_shouldFree; + bool ttc_descIsNew; + TupleDesc ttc_tupleDescriptor; + Buffer ttc_buffer; + int ttc_whichplan; +} TupleTableSlot; + +/* ---------------- + * tuple table data structure + * ---------------- + */ +typedef struct TupleTableData { + int size; /* size of the table */ + int next; /* next available slot number */ + TupleTableSlot *array; /* array of TupleTableSlot's */ +} TupleTableData; + +typedef TupleTableData *TupleTable; + +/* + tuple table macros are all excised from the system now + see executor.h for decls of functions defined in execTuples.c + + - jolly +*/ + +#endif /* TUPTABLE_H */ diff --git a/src/backend/include/Makefile.inc b/src/backend/include/Makefile.inc new file mode 100644 index 00000000000..b27b9fb009d --- /dev/null +++ b/src/backend/include/Makefile.inc @@ -0,0 +1,16 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# this makefile is only use for collecting HEADERS +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/include/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:= $(VPATH):$(CURDIR)/include + +HEADERS+= c.h libpq-fe.h miscadmin.h postgres.h diff --git a/src/backend/include/c.h b/src/backend/include/c.h new file mode 100644 index 00000000000..3b4ea8b207f --- /dev/null +++ b/src/backend/include/c.h @@ -0,0 +1,768 @@ +/*------------------------------------------------------------------------- + * + * c.h-- + * Fundamental C definitions. This is included by every .c file in + * postgres. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: c.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * TABLE OF CONTENTS + * + * When adding stuff to this file, please try and put stuff + * into the relevant section, or add new sections as appropriate. + * + * section description + * ------- ------------------------------------------------ + * 1) bool, true, false, TRUE, FALSE + * 2) __STDC__, non-ansi C definitions: + * Pointer typedef, NULL + * cpp magic macros + * type prefixes: const, signed, volatile, inline + * 3) standard system types + * 4) datum type + * 5) IsValid macros for system types + * 6) offsetof, lengthof, endof + * 7) exception handling definitions, Assert, Trap, etc macros + * 8) Min, Max, Abs macros + * 9) externs + * 10) Berkeley-specific defs + * 11) system-specific hacks + * + * NOTES + * + * This file is MACHINE AND COMPILER dependent!!! (For now.) + * + * ---------------------------------------------------------------- + */ +#ifndef C_H +#define C_H + +/* ---------------------------------------------------------------- + * Section 1: bool, true, false, TRUE, FALSE + * ---------------------------------------------------------------- + */ +/* + * bool -- + * Boolean value, either true or false. + * + */ +#define false ((char) 0) +#define true ((char) 1) +typedef char bool; +typedef bool *BoolPtr; + +#ifndef TRUE +#define TRUE 1 +#endif /* TRUE */ + +#ifndef FALSE +#define FALSE 0 +#endif /* FALSE */ + +/* ---------------------------------------------------------------- + * Section 2: __STDC__, non-ansi C definitions: + * + * cpp magic macros + * Pointer typedef, NULL + * type prefixes: const, signed, volatile, inline + * ---------------------------------------------------------------- + */ + +#ifdef __STDC__ /* ANSI C */ + +/* + * Pointer -- + * Variable holding address of any memory resident object. + */ + +/* + * XXX Pointer arithmetic is done with this, so it can't be void * + * under "true" ANSI compilers. + */ +typedef char *Pointer; + +#ifndef NULL +/* + * NULL -- + * Null pointer. + */ +#define NULL ((void *) 0) +#endif /* !defined(NULL) */ + +#define HAVE_ANSI_CPP /* all ANSI C compilers must have this! */ +#if defined(NEED_STD_HDRS) +#undef NEED_STD_HDRS /* all ANSI systems must have stddef/stdlib */ +#endif /* NEED_STD_HDRS */ + +#else /* !defined(__STDC__) */ /* NOT ANSI C */ + +/* + * Pointer -- + * Variable containing address of any memory resident object. + */ +typedef char *Pointer; + +#ifndef NULL +/* + * NULL -- + * Null pointer. + */ +#define NULL 0 +#endif /* !defined(NULL) */ + +/* + * const -- + * Type modifier. Identifies read only variables. + * + * Example: + * extern const Version RomVersion; + */ +#define const /* const */ + +/* + * signed -- + * Type modifier. Identifies signed integral types. + */ +#define signed /* signed */ + +/* + * volatile -- + * Type modifier. Identifies variables which may change in ways not + * noticeable by the compiler, e.g. via asynchronous interrupts. + * + * Example: + * extern volatile unsigned int NumberOfInterrupts; + */ +#define volatile /* volatile */ + +#endif /* !defined(__STDC__) */ /* NOT ANSI C */ + +/* + * CppAsString -- + * Convert the argument to a string, using the C preprocessor. + * CppConcat -- + * Concatenate two arguments together, using the C preprocessor. + */ +#if defined(HAVE_ANSI_CPP) + +#define CppAsString(identifier) #identifier +#define CppConcat(x, y) x##y +#define CppConcat0(x, y) x##y +#define CppConcat1(x, y) x##y +#define CppConcat2(x, y) x##y +#define CppConcat3(x, y) x##y +#define CppConcat4(x, y) x##y + +#else /* !HAVE_ANSI_CPP */ + +#define CppAsString(identifier) "identifier" + +/* + * CppIdentity -- On Reiser based cpp's this is used to concatenate + * two tokens. That is + * CppIdentity(A)B ==> AB + * We renamed it to _private_CppIdentity because it should not + * be referenced outside this file. On other cpp's it + * produces A B. + */ +#define _priv_CppIdentity(x)x +#define CppConcat(x, y) _priv_CppIdentity(x)y +#define CppConcat0(x, y) _priv_CppIdentity(x)y +#define CppConcat1(x, y) _priv_CppIdentity(x)y +#define CppConcat2(x, y) _priv_CppIdentity(x)y +#define CppConcat3(x, y) _priv_CppIdentity(x)y +#define CppConcat4(x, y) _priv_CppIdentity(x)y + +#endif /* !HAVE_ANSI_CPP */ + +#ifndef __GNUC__ /* GNU cc */ +# define inline +#endif + +#if defined(NEED_STD_HDRS) +/* + * You're doomed. We've removed almost all of our own C library + * extern declarations because they conflict on the different + * systems. You'll have to write your own stdlib.h. + */ +#include "stdlib.h" +#else /* NEED_STD_HDRS */ +#include +#include +#endif /* NEED_STD_HDRS */ + +/* ---------------------------------------------------------------- + * Section 3: standard system types + * ---------------------------------------------------------------- + */ + +/* + * intN -- + * Signed integer, AT LEAST N BITS IN SIZE, + * used for numerical computations. + */ +typedef signed char int8; /* >= 8 bits */ +typedef signed short int16; /* >= 16 bits */ +typedef signed int int32; /* >= 32 bits */ + +/* + * uintN -- + * Unsigned integer, AT LEAST N BITS IN SIZE, + * used for numerical computations. + */ +typedef unsigned char uint8; /* >= 8 bits */ +typedef unsigned short uint16; /* >= 16 bits */ +typedef unsigned int uint32; /* >= 32 bits */ + +/* + * floatN -- + * Floating point number, AT LEAST N BITS IN SIZE, + * used for numerical computations. + * + * Since sizeof(floatN) may be > sizeof(char *), always pass + * floatN by reference. + */ +typedef float float32data; +typedef double float64data; +typedef float *float32; +typedef double *float64; + +/* + * boolN -- + * Boolean value, AT LEAST N BITS IN SIZE. + */ +typedef uint8 bool8; /* >= 8 bits */ +typedef uint16 bool16; /* >= 16 bits */ +typedef uint32 bool32; /* >= 32 bits */ + +/* + * bitsN -- + * Unit of bitwise operation, AT LEAST N BITS IN SIZE. + */ +typedef uint8 bits8; /* >= 8 bits */ +typedef uint16 bits16; /* >= 16 bits */ +typedef uint32 bits32; /* >= 32 bits */ + +/* + * wordN -- + * Unit of storage, AT LEAST N BITS IN SIZE, + * used to fetch/store data. + */ +typedef uint8 word8; /* >= 8 bits */ +typedef uint16 word16; /* >= 16 bits */ +typedef uint32 word32; /* >= 32 bits */ + +/* + * Size -- + * Size of any memory resident object, as returned by sizeof. + */ +typedef unsigned int Size; + +/* + * Index -- + * Index into any memory resident array. + * + * Note: + * Indices are non negative. + */ +typedef unsigned int Index; + +#define MAXDIM 6 +typedef struct { + int indx[MAXDIM]; +} IntArray; + +/* + * Offset -- + * Offset into any memory resident array. + * + * Note: + * This differs from an Index in that an Index is always + * non negative, whereas Offset may be negative. + */ +typedef signed int Offset; + +/* ---------------------------------------------------------------- + * Section 4: datum type + support macros + * ---------------------------------------------------------------- + */ +/* + * datum.h -- + * POSTGRES abstract data type datum representation definitions. + * + * Note: + * + * Port Notes: + * Postgres makes the following assumption about machines: + * + * sizeof(Datum) == sizeof(long) >= sizeof(void *) >= 4 + * + * Postgres also assumes that + * + * sizeof(char) == 1 + * + * and that + * + * sizeof(short) == 2 + * + * If your machine meets these requirements, Datums should also be checked + * to see if the positioning is correct. + * + * This file is MACHINE AND COMPILER dependent!!! + */ + +typedef unsigned long Datum; /* XXX sizeof(long) >= sizeof(void *) */ +typedef Datum * DatumPtr; + +#define GET_1_BYTE(datum) (((Datum) (datum)) & 0x000000ff) +#define GET_2_BYTES(datum) (((Datum) (datum)) & 0x0000ffff) +#define GET_4_BYTES(datum) (((Datum) (datum)) & 0xffffffff) +#define SET_1_BYTE(value) (((Datum) (value)) & 0x000000ff) +#define SET_2_BYTES(value) (((Datum) (value)) & 0x0000ffff) +#define SET_4_BYTES(value) (((Datum) (value)) & 0xffffffff) + +/* + * DatumGetChar -- + * Returns character value of a datum. + */ + +#define DatumGetChar(X) ((char) GET_1_BYTE(X)) + +/* + * CharGetDatum -- + * Returns datum representation for a character. + */ + +#define CharGetDatum(X) ((Datum) SET_1_BYTE(X)) + +/* + * Int8GetDatum -- + * Returns datum representation for an 8-bit integer. + */ + +#define Int8GetDatum(X) ((Datum) SET_1_BYTE(X)) + +/* + * DatumGetUInt8 -- + * Returns 8-bit unsigned integer value of a datum. + */ + +#define DatumGetUInt8(X) ((uint8) GET_1_BYTE(X)) + +/* + * UInt8GetDatum -- + * Returns datum representation for an 8-bit unsigned integer. + */ + +#define UInt8GetDatum(X) ((Datum) SET_1_BYTE(X)) + +/* + * DatumGetInt16 -- + * Returns 16-bit integer value of a datum. + */ + +#define DatumGetInt16(X) ((int16) GET_2_BYTES(X)) + +/* + * Int16GetDatum -- + * Returns datum representation for a 16-bit integer. + */ + +#define Int16GetDatum(X) ((Datum) SET_2_BYTES(X)) + +/* + * DatumGetUInt16 -- + * Returns 16-bit unsigned integer value of a datum. + */ + +#define DatumGetUInt16(X) ((uint16) GET_2_BYTES(X)) + +/* + * UInt16GetDatum -- + * Returns datum representation for a 16-bit unsigned integer. + */ + +#define UInt16GetDatum(X) ((Datum) SET_2_BYTES(X)) + +/* + * DatumGetInt32 -- + * Returns 32-bit integer value of a datum. + */ + +#define DatumGetInt32(X) ((int32) GET_4_BYTES(X)) + +/* + * Int32GetDatum -- + * Returns datum representation for a 32-bit integer. + */ + +#define Int32GetDatum(X) ((Datum) SET_4_BYTES(X)) + +/* + * DatumGetUInt32 -- + * Returns 32-bit unsigned integer value of a datum. + */ + +#define DatumGetUInt32(X) ((uint32) GET_4_BYTES(X)) + +/* + * UInt32GetDatum -- + * Returns datum representation for a 32-bit unsigned integer. + */ + +#define UInt32GetDatum(X) ((Datum) SET_4_BYTES(X)) + +/* + * DatumGetObjectId -- + * Returns object identifier value of a datum. + */ + +#define DatumGetObjectId(X) ((Oid) GET_4_BYTES(X)) + +/* + * ObjectIdGetDatum -- + * Returns datum representation for an object identifier. + */ + +#define ObjectIdGetDatum(X) ((Datum) SET_4_BYTES(X)) + +/* + * DatumGetPointer -- + * Returns pointer value of a datum. + */ + +#define DatumGetPointer(X) ((Pointer) X) + +/* + * PointerGetDatum -- + * Returns datum representation for a pointer. + */ + +#define PointerGetDatum(X) ((Datum) X) + +/* + * DatumGetName -- + * Returns name value of a datum. + */ + +#define DatumGetName(X) ((Name) DatumGetPointer((Datum) X)) + +/* + * NameGetDatum -- + * Returns datum representation for a name. + */ + +#define NameGetDatum(X) PointerGetDatum((Pointer) X) + + +/* + * DatumGetFloat32 -- + * Returns 32-bit floating point value of a datum. + * This is really a pointer, of course. + */ + +#define DatumGetFloat32(X) ((float32) DatumGetPointer((Datum) X)) + +/* + * Float32GetDatum -- + * Returns datum representation for a 32-bit floating point number. + * This is really a pointer, of course. + */ + +#define Float32GetDatum(X) PointerGetDatum((Pointer) X) + +/* + * DatumGetFloat64 -- + * Returns 64-bit floating point value of a datum. + * This is really a pointer, of course. + */ + +#define DatumGetFloat64(X) ((float64) DatumGetPointer(X)) + +/* + * Float64GetDatum -- + * Returns datum representation for a 64-bit floating point number. + * This is really a pointer, of course. + */ + +#define Float64GetDatum(X) PointerGetDatum((Pointer) X) + +/* ---------------------------------------------------------------- + * Section 5: IsValid macros for system types + * ---------------------------------------------------------------- + */ +/* + * BoolIsValid -- + * True iff bool is valid. + */ +#define BoolIsValid(boolean) ((boolean) == false || (boolean) == true) + +/* + * PointerIsValid -- + * True iff pointer is valid. + */ +#define PointerIsValid(pointer) (bool)((void*)(pointer) != NULL) + +/* + * PointerIsInBounds -- + * True iff pointer is within given bounds. + * + * Note: + * Assumes the bounded interval to be [min,max), + * i.e. closed on the left and open on the right. + */ +#define PointerIsInBounds(pointer, min, max) \ + ((min) <= (pointer) && (pointer) < (max)) + +/* + * PointerIsAligned -- + * True iff pointer is properly aligned to point to the given type. + */ +#define PointerIsAligned(pointer, type) \ + (((long)(pointer) % (sizeof (type))) == 0) + +/* ---------------------------------------------------------------- + * Section 6: offsetof, lengthof, endof + * ---------------------------------------------------------------- + */ +/* + * offsetof -- + * Offset of a structure/union field within that structure/union. + * + * XXX This is supposed to be part of stddef.h, but isn't on + * some systems (like SunOS 4). + */ +#ifndef offsetof +#define offsetof(type, field) ((long) &((type *)0)->field) +#endif /* offsetof */ + +/* + * lengthof -- + * Number of elements in an array. + */ +#define lengthof(array) (sizeof (array) / sizeof ((array)[0])) + +/* + * endof -- + * Address of the element one past the last in an array. + */ +#define endof(array) (&array[lengthof(array)]) + +/* ---------------------------------------------------------------- + * Section 7: exception handling definitions + * Assert, Trap, etc macros + * ---------------------------------------------------------------- + */ +/* + * Exception Handling definitions + */ + +typedef char *ExcMessage; +typedef struct Exception { + ExcMessage message; +} Exception; + +/* + * NO_ASSERT_CHECKING, if defined, turns off all the assertions. + * - plai 9/5/90 + * + * It should _NOT_ be undef'ed in releases or in benchmark copies + * + * #undef NO_ASSERT_CHECKING + */ + +/* + * Trap -- + * Generates an exception if the given condition is true. + * + */ +#define Trap(condition, exception) \ + { if (condition) \ + ExceptionalCondition(CppAsString(condition), &(exception), \ + (char*)NULL, __FILE__, __LINE__); } + +/* + * TrapMacro is the same as Trap but it's intended for use in macros: + * + * #define foo(x) (AssertM(x != 0) && bar(x)) + * + * Isn't CPP fun? + */ +#define TrapMacro(condition, exception) \ + ((bool) ((! condition) || \ + (ExceptionalCondition(CppAsString(condition), \ + &(exception), \ + (char*) NULL, __FILE__, __LINE__)))) + +#ifdef NO_ASSERT_CHECKING +#define Assert(condition) +#define AssertMacro(condition) true +#define AssertArg(condition) +#define AssertState(condition) +#else +#define Assert(condition) \ + Trap(!(condition), FailedAssertion) + +#define AssertMacro(condition) \ + TrapMacro(!(condition), FailedAssertion) + +#define AssertArg(condition) \ + Trap(!(condition), BadArg) + +#define AssertState(condition) \ + Trap(!(condition), BadState) + +#endif /* NO_ASSERT_CHECKING */ + +/* + * LogTrap -- + * Generates an exception with a message if the given condition is true. + * + */ +#define LogTrap(condition, exception, printArgs) \ + { if (condition) \ + ExceptionalCondition(CppAsString(condition), &(exception), \ + form printArgs, __FILE__, __LINE__); } + +/* + * LogTrapMacro is the same as LogTrap but it's intended for use in macros: + * + * #define foo(x) (LogAssertMacro(x != 0, "yow!") && bar(x)) + */ +#define LogTrapMacro(condition, exception, printArgs) \ + ((bool) ((! condition) || \ + (ExceptionalCondition(CppAsString(condition), \ + &(exception), \ + form printArgs, __FILE__, __LINE__)))) + +#ifdef NO_ASSERT_CHECKING +#define LogAssert(condition, printArgs) +#define LogAssertMacro(condition, printArgs) true +#define LogAssertArg(condition, printArgs) +#define LogAssertState(condition, printArgs) +#else +#define LogAssert(condition, printArgs) \ + LogTrap(!(condition), FailedAssertion, printArgs) + +#define LogAssertMacro(condition, printArgs) \ + LogTrapMacro(!(condition), FailedAssertion, printArgs) + +#define LogAssertArg(condition, printArgs) \ + LogTrap(!(condition), BadArg, printArgs) + +#define LogAssertState(condition, printArgs) \ + LogTrap(!(condition), BadState, printArgs) + +#endif /* NO_ASSERT_CHECKING */ + +/* ---------------------------------------------------------------- + * Section 8: Min, Max, Abs macros + * ---------------------------------------------------------------- + */ +/* + * Max -- + * Return the maximum of two numbers. + */ +#define Max(x, y) ((x) > (y) ? (x) : (y)) + +/* + * Min -- + * Return the minimum of two numbers. + */ +#define Min(x, y) ((x) < (y) ? (x) : (y)) + +/* + * Abs -- + * Return the absolute value of the argument. + */ +#define Abs(x) ((x) >= 0 ? (x) : -(x)) + +/* ---------------------------------------------------------------- + * Section 9: externs + * ---------------------------------------------------------------- + */ + +extern Exception FailedAssertion; +extern Exception BadArg; +extern Exception BadState; + +/* in utils/error/assert.c */ +extern int ExceptionalCondition(char *conditionName, + Exception *exceptionP, char *details, + char *fileName, int lineNumber); + + +/* ---------------- + * form is used by assert and the exception handling stuff + * ---------------- + */ +extern char *form(char *fmt, ...); + + + +/* ---------------------------------------------------------------- + * Section 10: berkeley-specific configuration + * + * this section contains settings which are only relevant to the UC Berkeley + * sites. Other sites can ignore this + * ---------------------------------------------------------------- + */ + +/* ---------------- + * storage managers + * + * These are experimental and are not supported in the code that + * we distribute to other sites. + * ---------------- + */ +#ifdef SEQUOIA +#define MAIN_MEMORY +#endif + + + +/* ---------------------------------------------------------------- + * Section 11: system-specific hacks + * + * This should be limited to things that absolutely have to be + * included in every source file. The changes should be factored + * into a separate file so that changes to one port don't require + * changes to c.h (and everyone recompiling their whole system). + * ---------------------------------------------------------------- + */ + +#if defined(PORTNAME_hpux) +#include "port/hpux/fixade.h" /* for 8.07 unaligned access fixup */ +#endif /* PORTNAME_hpux */ + +#if defined(PORTNAME_sparc) +#define memmove(d, s, l) bcopy(s, d, l) +#endif + +/* These are for things that are one way on Unix and another on NT */ +#ifndef WIN32 +#define NULL_DEV "/dev/null" +#define COPY_CMD "cp" +#define SEP_CHAR '/' +#else +#define NULL_DEV "NUL" +#define COPY_CMD "copy" +#define SEP_CHAR '\\' +#endif /* WIN32 */ + +#if defined(WIN32) +#include "port/win32/nt.h" +#include "port/win32/machine.h" +#endif /* WIN32 */ + +/* ---------------- + * end of c.h + * ---------------- + */ +#endif /* C_H */ diff --git a/src/backend/include/miscadmin.h b/src/backend/include/miscadmin.h new file mode 100644 index 00000000000..85c5699205e --- /dev/null +++ b/src/backend/include/miscadmin.h @@ -0,0 +1,193 @@ +/*------------------------------------------------------------------------- + * + * miscadmin.h-- + * this file contains general postgres administration and initialization + * stuff that used to be spread out between the following files: + * globals.h global variables + * magic.h PG_RELEASE, PG_VERSION, etc defines + * pdir.h directory path crud + * pinit.h postgres initialization + * pmod.h processing modes + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: miscadmin.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + * NOTES + * some of the information in this file will be moved to + * other files. + * + *------------------------------------------------------------------------- + */ +#ifndef MISCADMIN_H +#define MISCADMIN_H + +/* ---------------- + * note: was in unix.h This should be moved + * to the .c files. + * ---------------- + */ +#include + +#include "postgres.h" +#include "storage/backendid.h" + +/***************************************************************************** + * globals.h -- * + *****************************************************************************/ + +/* #include "storage/sinval.h" */ + +/* + * from postmaster/postmaster.c + */ +extern int PostmasterMain(int argc, char* argv[]); + +/* + * from utils/init/globals.c + */ +extern int Portfd; +extern int Noversion; /* moved from magic.c */ +extern int MasterPid; /* declared and defined in utils/initglobals.c */ +extern int Quiet; +extern char *DataDir; + +extern char OutputFileName[]; +extern void InitGlobals(); + +/* + * done in storage/backendid.h for now. + * + * extern BackendId MyBackendId; + * extern BackendTag MyBackendTag; + */ +extern bool MyDatabaseIdIsInitialized; +extern Oid MyDatabaseId; +extern bool TransactionInitWasProcessed; + +extern bool IsUnderPostmaster; +extern bool IsPostmaster; + +extern short DebugLvl; + +extern Oid LastOidProcessed; /* for query rewrite */ + +#define MAX_PARSE_BUFFER 8192 + +/* + * default number of buffers in buffer pool + * + */ +#define NDBUFS 64 + +/***************************************************************************** + * magic.h - definitions of the indexes of the magic numbers * + *****************************************************************************/ + +#define PG_RELEASE 5 +#define PG_VERSION 1 +#define PG_VERFILE "PG_VERSION" + +/***************************************************************************** + * pdir.h -- * + * POSTGRES directory path definitions. * + *****************************************************************************/ + +/* now in utils/init/miscinit.c */ +extern char *GetDatabasePath(void); +extern char *GetDatabaseName(void); +extern void SetDatabaseName(char *name); +extern void SetDatabasePath(char *path); +extern char *GetPgUserName(void); +extern void SetPgUserName(void); +extern Oid GetUserId(void); +extern void SetUserId(void); +extern char *GetPGHome(void); +extern char *GetPGData(void); +extern int ValidateBackend(char *path); +extern int FindBackend(char *backend, char *argv0); +extern int CheckPathAccess(char *path, char *name, int open_mode); + + +/***************************************************************************** + * pmod.h -- * + * POSTGRES processing mode definitions. * + *****************************************************************************/ +/* + * Description: + * There are four processing modes in POSTGRES. They are NoProcessing + * or "none," BootstrapProcessing or "bootstrap," InitProcessing or + * "initialization," and NormalProcessing or "normal." + * + * If a POSTGRES binary is in normal mode, then all code may be executed + * normally. In the none mode, only bookkeeping code may be called. In + * particular, access method calls may not occur in this mode since the + * execution state is outside a transaction. + * + * The final two processing modes are used during special times. When the + * system state indicates bootstrap processing, transactions are all given + * transaction id "one" and are consequently guarenteed to commit. This mode + * is used during the initial generation of template databases. + * + * Finally, the execution state is in initialization mode until all normal + * initialization is complete. Some code behaves differently when executed in + * this mode to enable system bootstrapping. + */ + +typedef enum ProcessingMode { + NoProcessing, /* "nothing" can be done */ + BootstrapProcessing, /* bootstrap creation of template database */ + InitProcessing, /* initializing system */ + NormalProcessing /* normal processing */ +} ProcessingMode; + + +/***************************************************************************** + * pinit.h -- * + * POSTGRES initialization and cleanup definitions. * + *****************************************************************************/ +/* + * Note: + * XXX AddExitHandler not defined yet. + */ + +typedef int16 ExitStatus; + +#define NormalExitStatus (0) +#define FatalExitStatus (127) +/* XXX are there any other meaningful exit codes? */ + +/* in utils/init/postinit.c */ +extern void InitMyDatabaseId(void); +extern void DoChdirAndInitDatabaseNameAndPath(char *name, char *path); +extern void InitUserid(void); +extern void InitCommunication(void); +extern void InitStdio(void); + +extern bool PostgresIsInitialized; + +extern void InitPostgres(char *name); + +/* in miscinit.c */ +extern void ExitPostgres(ExitStatus status); +extern void AbortPostgres(void); +extern void StatusBackendExit(int status); +extern void StatusPostmasterExit(int status); + +extern bool IsNoProcessingMode(void); +extern bool IsBootstrapProcessingMode(void); +extern bool IsInitProcessingMode(void); +extern bool IsNormalProcessingMode(void); +extern void SetProcessingMode(ProcessingMode mode); +extern ProcessingMode GetProcessingMode(void); + + +/* + * Prototypes for utils/init/magic.c + */ +extern int DatabaseMetaGunkIsConsistent(char database[], char path[]); +extern int ValidPgVersion(char path []); +extern void SetPgVersion(char path []); + +#endif /* MISCADMIN_H */ diff --git a/src/backend/include/postgres.h b/src/backend/include/postgres.h new file mode 100644 index 00000000000..429a25e4ff4 --- /dev/null +++ b/src/backend/include/postgres.h @@ -0,0 +1,224 @@ +/*------------------------------------------------------------------------- + * + * postgres.h-- + * definition of (and support for) postgres system types. + * this file is included by almost every .c in the system + * + * Copyright (c) 1995, Regents of the University of California + * + * $Id: postgres.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * NOTES + * this file will eventually contain the definitions for the + * following (and perhaps other) system types: + * + * int2 int4 float4 float8 + * Oid regproc RegProcedure + * aclitem + * struct varlena + * char8 char16 int28 oid8 + * bytea text + * NameData Name + * oidint4 oidint2 oidname + * + * TABLE OF CONTENTS + * 1) simple type definitions + * 2) varlena and array types + * 3) TransactionId and CommandId + * 4) genbki macros used by catalog/pg_xxx.h files + * 5) random SIGNBIT, MAXPGPATH, STATUS macros + * + * ---------------------------------------------------------------- + */ +#ifndef POSTGRES_H +#define POSTGRES_H + +#include "c.h" + +/* ---------------------------------------------------------------- + * Section 1: simple type definitions + * ---------------------------------------------------------------- + */ + +typedef int16 int2; +typedef int32 int4; +typedef float float4; +typedef double float8; + +typedef int4 aclitem; + + +typedef uint32 Oid; +#define InvalidOid 0 +#define OidIsValid(objectId) ((bool) (objectId != InvalidOid)) + +/* unfortunately, both regproc and RegProcedure are used */ +typedef Oid regproc; +typedef Oid RegProcedure; + +/* ptr to func returning (char *) */ +typedef char * ((*func_ptr)()); + + +#define RegProcedureIsValid(p) OidIsValid(p) + +/* ---------------------------------------------------------------- + * Section 2: variable length and array types + * ---------------------------------------------------------------- + */ +/* ---------------- + * struct varlena + * ---------------- + */ +struct varlena { + int32 vl_len; + char vl_dat[1]; +}; + +#define VARSIZE(PTR) (((struct varlena *)(PTR))->vl_len) +#define VARDATA(PTR) (((struct varlena *)(PTR))->vl_dat) +#define VARHDRSZ sizeof(int32) + +typedef struct varlena bytea; +typedef struct varlena text; + +typedef struct char8 { + char data[8]; +} char8; + +/* ---------------- + * char16 + * ---------------- + */ +typedef struct char16 { + char data[16]; +} char16; + +typedef char16 *Char16; + +typedef int2 int28[8]; +typedef Oid oid8[8]; + +/* char16 is distinct from Name. + now, you can truly change the max length of system names + by altering the NAMEDATALEN define below. + don't set the value too high because tuples are still constrained + to be less than 8K +*/ + + /* NAMEDATALEN is the maximum string length (counting terminating null) + of a Name */ +/* defined in Makefile.global */ +/* if you change the value of NAMEDATALEN, you may need to change the + alignment of the 'name' type in pg_type.h */ +#ifndef NAMEDATALEN +#define NAMEDATALEN 16 +#endif /* NAMEDATALEN */ +/* OIDNAMELEN should be NAMEDATALEN + sizeof(Oid) */ +#ifndef OIDNAMELEN +#define OIDNAMELEN 20 +#endif /* OIDNAMELEN */ + +typedef struct nameData { + char data[NAMEDATALEN]; +} NameData; +typedef NameData *Name; + +/* ---------------- + * oidint4 + * + * this is a new system type used by the file interface. + * ---------------- + */ +typedef struct OidInt4Data { + Oid oi_oid; + int32 oi_int4; +} OidInt4Data; + +typedef struct OidInt4Data *OidInt4; + +/* ---------------- + * oidint2 + * + * this is a new system type used to define indices on two attrs. + * ---------------- + */ +typedef struct OidInt2Data { + Oid oi_oid; + int16 oi_int2; +} OidInt2Data; + +typedef struct OidInt2Data *OidInt2; + +/* ---------------- + * oidname + * + * this is a new system type used to define indices on two attrs. + * ---------------- + */ +typedef struct OidNameData { + Oid id; + NameData name; +} OidNameData; + +typedef struct OidNameData *OidName; + +/* ---------------------------------------------------------------- + * Section 3: TransactionId and CommandId + * ---------------------------------------------------------------- + */ + +typedef uint32 TransactionId; +#define InvalidTransactionId 0 +typedef uint16 CommandId; +#define FirstCommandId 0 + +/* ---------------------------------------------------------------- + * Section 4: genbki macros used by the + * catalog/pg_xxx.h files + * ---------------------------------------------------------------- + */ +#define CATALOG(x) \ + typedef struct CppConcat(FormData_,x) + +#define DATA(x) extern int errno +#define DECLARE_INDEX(x) extern int errno + +#define BUILD_INDICES +#define BOOTSTRAP + +#define BKI_BEGIN +#define BKI_END + +/* ---------------------------------------------------------------- + * Section 5: random stuff + * SIGNBIT, MAXPGPATH, STATUS... + * ---------------------------------------------------------------- + */ + +/* msb for int/unsigned */ +#define SIGNBIT (0x8000) + +/* msb for char */ +#define CSIGNBIT (1 << 7) + +/* ---------------- + * global variables which should probably go someplace else. + * ---------------- + */ +#define MAXPGPATH 128 + +#define STATUS_OK (0) +#define STATUS_ERROR (-1) +#define STATUS_NOT_FOUND (-2) +#define STATUS_INVALID (-3) +#define STATUS_UNCATALOGUED (-4) +#define STATUS_REPLACED (-5) +#define STATUS_NOT_DONE (-6) +#define STATUS_BAD_PACKET (-7) +#define STATUS_FOUND (1) + +#endif /* POSTGRES_H */ diff --git a/src/backend/lib/Makefile.inc b/src/backend/lib/Makefile.inc new file mode 100644 index 00000000000..c3a46d546ce --- /dev/null +++ b/src/backend/lib/Makefile.inc @@ -0,0 +1,20 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the lib module (miscellaneous stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/lib/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:=$(VPATH):$(CURDIR)/lib + + +SRCS_LIB= bit.c fstack.c hasht.c lispsort.c qsort.c stringinfo.c dllist.c + +HEADERS+= fstack.h hasht.h lispsort.h qsort.h stringinfo.h dllist.h + diff --git a/src/backend/lib/bit.c b/src/backend/lib/bit.c new file mode 100644 index 00000000000..9aa12b48502 --- /dev/null +++ b/src/backend/lib/bit.c @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * bit.c-- + * Standard bit array code. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/Attic/bit.c,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +/* + * utils/memutils.h contains declarations of the functions in this file + */ +#include "utils/memutils.h" + +void +BitArraySetBit(BitArray bitArray, BitIndex bitIndex) +{ + bitArray[bitIndex/BitsPerByte] + |= (1 << (BitsPerByte - (bitIndex % BitsPerByte) - 1)); + return; +} + +void +BitArrayClearBit(BitArray bitArray, BitIndex bitIndex) +{ + bitArray[bitIndex/BitsPerByte] + &= ~(1 << (BitsPerByte - (bitIndex % BitsPerByte) - 1)); + return; +} + +bool +BitArrayBitIsSet(BitArray bitArray, BitIndex bitIndex) +{ + return( (bool) (((bitArray[bitIndex / BitsPerByte] & + (1 << (BitsPerByte - (bitIndex % BitsPerByte) + - 1) + ) + ) != 0 ) ? 1 : 0) ); +} + diff --git a/src/backend/lib/dllist.c b/src/backend/lib/dllist.c new file mode 100644 index 00000000000..92526632c9f --- /dev/null +++ b/src/backend/lib/dllist.c @@ -0,0 +1,204 @@ +/*------------------------------------------------------------------------- + * + * dllist.c-- + * this is a simple doubly linked list implementation + * replaces the old simplelists stuff + * the elements of the lists are void* + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/dllist.c,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#include "c.h" +#include "lib/dllist.h" + +Dllist* +DLNewList() +{ + Dllist* l; + + l = malloc(sizeof(Dllist)); + l->dll_head = 0; + l->dll_tail = 0; + + return l; +} + + /* free up a list and all the nodes in it*/ +void +DLFreeList(Dllist* l) +{ + Dlelem* curr; + + while ( (curr = DLRemHead(l)) != 0) + free(curr); + + free(l); +} + +Dlelem* +DLNewElem(void* val) +{ + Dlelem* e; + e = malloc(sizeof(Dlelem)); + e->dle_next = 0; + e->dle_prev = 0; + e->dle_val = val; + e->dle_list = 0; + return e; +} + +void +DLFreeElem(Dlelem* e) +{ + free(e); +} + +Dlelem* +DLGetHead(Dllist* l) +{ + return (l ? l->dll_head : 0); +} + +/* get the value stored in the first element */ +void* +DLGetHeadVal(Dllist* l) +{ + Dlelem* e = DLGetHead(l); + + return (e ? e->dle_val : 0); +} + +Dlelem* +DLGetTail(Dllist* l) +{ + return (l ? l->dll_tail : 0); +} + +/* get the value stored in the first element */ +void* +DLGetTailVal(Dllist* l) +{ + Dlelem* e = DLGetTail(l); + + return (e ? e->dle_val : 0); +} + + +Dlelem* +DLGetPred(Dlelem* e) /* get predecessor */ +{ + return (e ? e->dle_prev : 0); +} + +Dlelem* +DLGetSucc(Dlelem* e) /* get successor */ +{ + return (e ? e->dle_next : 0); +} + +void +DLRemove(Dlelem* e) +{ + Dllist* l; + + if (e->dle_prev) + e->dle_prev->dle_next = e->dle_next; + if (e->dle_next) + e->dle_next->dle_prev = e->dle_prev; + + /* check to see if we're removing the head or tail */ + l = e->dle_list; + if (e == l->dll_head) + DLRemHead(l); + if (e == l->dll_tail) + DLRemTail(l); + +} + +void +DLAddHead(Dllist* l, Dlelem* e) +{ + e->dle_list = l; + + if (l->dll_head) { + l->dll_head->dle_prev = e; + e->dle_next = l->dll_head; + } + e->dle_prev = 0; + l->dll_head = e; + + if (l->dll_tail == 0) /* if this is first element added */ + l->dll_tail = l->dll_head; +} + +void +DLAddTail(Dllist* l, Dlelem* e) +{ + e->dle_list = l; + + if (l->dll_tail) { + l->dll_tail->dle_next = e; + e->dle_prev = l->dll_tail; + } + e->dle_next = 0; + l->dll_tail = e; + + if (l->dll_head == 0) /* if this is first element added */ + l->dll_head = l->dll_tail; +} + +Dlelem* +DLRemHead(Dllist* l) +{ + /* remove and return the head */ + Dlelem* result; + + if (l->dll_head == 0) + return 0; + + result = l->dll_head; + if (l->dll_head->dle_next) { + l->dll_head->dle_next->dle_prev = 0; + } + + l->dll_head = l->dll_head->dle_next; + + result->dle_next = 0; + result->dle_list = 0; + + if (result == l->dll_tail) /* if the head is also the tail */ + l->dll_tail = 0; + + return result; +} + +Dlelem* +DLRemTail(Dllist* l) +{ + /* remove and return the tail */ + Dlelem* result; + + if (l->dll_tail == 0 ) + return 0; + + result = l->dll_tail; + if (l->dll_tail->dle_prev) { + l->dll_tail->dle_prev->dle_next = 0; + } + l->dll_tail = l->dll_tail->dle_prev; + + result->dle_prev = 0; + result->dle_list = 0; + + if (result == l->dll_head) /* if the tail is also the head */ + l->dll_head = 0; + + return result; +} + diff --git a/src/backend/lib/dllist.h b/src/backend/lib/dllist.h new file mode 100644 index 00000000000..cd9ac42a12f --- /dev/null +++ b/src/backend/lib/dllist.h @@ -0,0 +1,72 @@ +/*------------------------------------------------------------------------- + * + * dllist.h-- + * simple doubly linked list primitives + * the elements of the list are void* so the lists can contain + * anything + * Dlelem can only be in one list at a time + * + * + * Here's a small example of how to use Dllist's : + * + * Dllist *lst; + * Dlelem *elt; + * void *in_stuff; -- stuff to stick in the list + * void *out_stuff + * + * lst = DLNewList(); -- make a new dllist + * DLAddHead(lst, DLNewElem(in_stuff)); -- add a new element to the list + * with in_stuff as the value + * ... + * elt = DLGetHead(lst); -- retrieve the head element + * out_stuff = (void*)DLE_VAL(elt); -- get the stuff out + * DLRemove(elt); -- removes the element from its list + * DLFreeElem(elt); -- free the element since we don't + * use it anymore + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: dllist.h,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#ifndef DLLIST_H +#define DLLIST_H + +#include "c.h" + +struct Dllist; +struct Dlelem; + +typedef struct Dlelem { + struct Dlelem *dle_next; /* next element */ + struct Dlelem *dle_prev; /* previous element */ + void *dle_val; /* value of the element */ + struct Dllist *dle_list; /* what list this element is in */ +} Dlelem; + +typedef struct Dllist { + Dlelem *dll_head; + Dlelem *dll_tail; +} Dllist; + +extern Dllist* DLNewList(); /* initialize a new list */ +extern void DLFreeList(Dllist*); /* free up a list and all the nodes in it*/ +extern Dlelem* DLNewElem(void* val); +extern void DLFreeElem(Dlelem*); +extern Dlelem* DLGetHead(Dllist*); +extern Dlelem* DLGetTail(Dllist*); +extern void* DLGetHeadVal(Dllist*); +extern void* DLGetTailVal(Dllist*); +extern Dlelem* DLGetPred(Dlelem*); /* get predecessor */ +extern Dlelem* DLGetSucc(Dlelem*); /* get successor */ +extern void DLRemove(Dlelem*); /* removes node from list*/ +extern void DLAddHead(Dllist* list, Dlelem* node); +extern void DLAddTail(Dllist* list, Dlelem* node); +extern Dlelem* DLRemHead(Dllist* list); /* remove and return the head */ +extern Dlelem* DLRemTail(Dllist* list); /* remove and return the tail */ + +#define DLE_VAL(x) (x->dle_val) + +#endif /* DLLIST_H */ diff --git a/src/backend/lib/fstack.c b/src/backend/lib/fstack.c new file mode 100644 index 00000000000..541767183d8 --- /dev/null +++ b/src/backend/lib/fstack.c @@ -0,0 +1,153 @@ +/*------------------------------------------------------------------------- + * + * fstack.c-- + * Fixed format stack definitions. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/Attic/fstack.c,v 1.1.1.1 1996/07/09 06:21:28 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" +#include "lib/fstack.h" + +/* + * Internal function definitions + */ + +/* + * FixedItemIsValid -- + * True iff item is valid. + */ +#define FixedItemIsValid(item) PointerIsValid(item) + +/* + * FixedStackGetItemBase -- + * Returns base of enclosing structure. + */ +#define FixedStackGetItemBase(stack, item) \ + ((Pointer)((char *)(item) - (stack)->offset)) + +/* + * FixedStackGetItem -- + * Returns item of given pointer to enclosing structure. + */ +#define FixedStackGetItem(stack, pointer) \ + ((FixedItem)((char *)(pointer) + (stack)->offset)) + +/* + * External functions + */ + +/* + * FixedStackIsValid -- + * True iff stack is valid. + */ +static bool +FixedStackIsValid(FixedStack stack) +{ + return ((bool)PointerIsValid(stack)); +} + + +void +FixedStackInit(FixedStack stack, Offset offset) +{ + AssertArg(PointerIsValid(stack)); + + stack->top = NULL; + stack->offset = offset; +} + +Pointer +FixedStackPop(FixedStack stack) +{ + Pointer pointer; + + AssertArg(FixedStackIsValid(stack)); + + if (!PointerIsValid(stack->top)) { + return (NULL); + } + + pointer = FixedStackGetItemBase(stack, stack->top); + stack->top = stack->top->next; + + return (pointer); +} + +void +FixedStackPush(FixedStack stack, Pointer pointer) +{ + FixedItem item = FixedStackGetItem(stack, pointer); + + AssertArg(FixedStackIsValid(stack)); + AssertArg(PointerIsValid(pointer)); + + item->next = stack->top; + stack->top = item; +} + + +/* + * FixedStackContains -- + * True iff ordered stack contains given element. + * + * Note: + * This is inefficient. It is intended for debugging use only. + * + * Exceptions: + * BadArg if stack is invalid. + * BadArg if pointer is invalid. + */ +static bool +FixedStackContains(FixedStack stack, Pointer pointer) +{ + FixedItem next; + FixedItem item; + + AssertArg(FixedStackIsValid(stack)); + AssertArg(PointerIsValid(pointer)); + + item = FixedStackGetItem(stack, pointer); + + for (next = stack->top; FixedItemIsValid(next); next = next->next) { + if (next == item) { + return (true); + } + } + return (false); +} + +Pointer +FixedStackGetTop(FixedStack stack) +{ + AssertArg(FixedStackIsValid(stack)); + + if (!PointerIsValid(stack->top)) { + return (NULL); + } + + return (FixedStackGetItemBase(stack, stack->top)); +} + +Pointer +FixedStackGetNext(FixedStack stack, Pointer pointer) +{ + FixedItem item; + + /* AssertArg(FixedStackIsValid(stack)); */ + /* AssertArg(PointerIsValid(pointer)); */ + AssertArg(FixedStackContains(stack, pointer)); + + item = FixedStackGetItem(stack, pointer)->next; + + if (!PointerIsValid(item)) { + return (NULL); + } + + return(FixedStackGetItemBase(stack, item)); +} diff --git a/src/backend/lib/fstack.h b/src/backend/lib/fstack.h new file mode 100644 index 00000000000..b0b1df00d83 --- /dev/null +++ b/src/backend/lib/fstack.h @@ -0,0 +1,113 @@ +/*------------------------------------------------------------------------- + * + * fstack.h-- + * Fixed format stack definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: fstack.h,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * Note: + * Fixed format stacks assist in the construction of FIFO stacks of + * fixed format structures. Structures which are to be stackable + * should contain a FixedItemData component. A stack is initilized + * with the offset of the FixedItemData component of the structure + * it will hold. By doing so, push and pop operations are simplified + * for the callers. All references to stackable items are pointers + * to the base of the structure instead of pointers to the + * FixedItemData component. + * + */ +#ifndef FSTACK_H +#define FSTACK_H + +#include "c.h" + +/* + * FixedItem -- + * Fixed format stackable item chain component. + * + * Note: + * Structures must contain one FixedItemData component per stack in + * which it will be an item. + */ +typedef struct FixedItemData FixedItemData; +typedef FixedItemData *FixedItem; + +struct FixedItemData { + FixedItem next; /* next item or NULL */ +}; + +/* + * FixedStack -- + * Fixed format stack. + */ +typedef struct FixedStackData { + FixedItem top; /* Top item on the stack or NULL */ + Offset offset; /* Offset from struct base to item */ + /* this could be signed short int! */ +} FixedStackData; + +typedef FixedStackData *FixedStack; + +/* + * FixedStackInit -- + * Iniitializes stack for structures with given fixed component offset. + * + * Exceptions: + * BadArg if stack is invalid pointer. + */ +extern void FixedStackInit(FixedStack stack, Offset offset); + +/* + * FixedStackPop -- + * Returns pointer to top structure on stack or NULL if empty stack. + * + * Exceptions: + * BadArg if stack is invalid. + */ +Pointer FixedStackPop(FixedStack stack); + +/* + * FixedStackPush -- + * Places structure associated with pointer onto top of stack. + * + * Exceptions: + * BadArg if stack is invalid. + * BadArg if pointer is invalid. + */ +extern void FixedStackPush(FixedStack stack, Pointer pointer); + +/* + * FixedStackGetTop -- + * Returns pointer to top structure of a stack. This item is not poped. + * + * Note: + * This is not part of the normal stack interface. It is intended for + * debugging use only. + * + * Exceptions: + * BadArg if stack is invalid. + */ +extern Pointer FixedStackGetTop(FixedStack stack); + +/* + * FixedStackGetNext -- + * Returns pointer to next structure after pointer of a stack. + * + * Note: + * This is not part of the normal stack interface. It is intended for + * debugging use only. + * + * Exceptions: + * BadArg if stack is invalid. + * BadArg if pointer is invalid. + * BadArg if stack does not contain pointer. + */ +extern Pointer FixedStackGetNext(FixedStack stack, Pointer pointer); + +#endif /* FSTACK_H */ diff --git a/src/backend/lib/hasht.c b/src/backend/lib/hasht.c new file mode 100644 index 00000000000..5487bed44bc --- /dev/null +++ b/src/backend/lib/hasht.c @@ -0,0 +1,47 @@ +/*------------------------------------------------------------------------- + * + * hasht.c-- + * hash table related functions that are not directly supported + * by the hashing packages under utils/hash. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/Attic/hasht.c,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" +#include "utils/memutils.h" +#include "utils/elog.h" +#include "utils/hsearch.h" +#include "lib/hasht.h" + +/* ----------------------------------- + * HashTableWalk + * + * call function on every element in hashtable + * one extra argument, arg may be supplied + * ----------------------------------- + */ +void +HashTableWalk(HTAB *hashtable, HashtFunc function, int arg) +{ + long *hashent; + long *data; + int keysize; + + keysize = hashtable->hctl->keysize; + (void)hash_seq((HTAB *)NULL); + while ((hashent = hash_seq(hashtable)) != (long *) TRUE) { + if (hashent == NULL) + elog(FATAL, "error in HashTableWalk."); + /* + * XXX the corresponding hash table insertion does NOT + * LONGALIGN -- make sure the keysize is ok + */ + data = (long *) LONGALIGN((char*) hashent + keysize); + (*function)(data, arg); + } +} diff --git a/src/backend/lib/hasht.h b/src/backend/lib/hasht.h new file mode 100644 index 00000000000..543c8c95d84 --- /dev/null +++ b/src/backend/lib/hasht.h @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * hasht.h-- + * hash table related functions that are not directly supported + * under utils/hash. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: hasht.h,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef HASHT_H +#define HASHT_H + +#include "utils/hsearch.h" + +typedef void (*HashtFunc)(); + +extern void HashTableWalk(HTAB *hashtable, HashtFunc function, int arg); + +#endif /* HASHT_H */ diff --git a/src/backend/lib/lispsort.c b/src/backend/lib/lispsort.c new file mode 100644 index 00000000000..0cf49bf0c0b --- /dev/null +++ b/src/backend/lib/lispsort.c @@ -0,0 +1,56 @@ +/*------------------------------------------------------------------------- + * + * lispsort.c-- + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/Attic/lispsort.c,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/relation.h" +#include "lib/lispsort.h" +#include "utils/palloc.h" +#include "lib/qsort.h" + +/* +** lisp_qsort: Takes a lisp list as input, copies it into an array of lisp +** nodes which it sorts via qsort() with the comparison function +** as passed into lisp_qsort(), and returns a new list with +** the nodes sorted. The old list is *not* freed or modified (?) +*/ +List *lisp_qsort(List *the_list, /* the list to be sorted */ + int (*compare)()) /* function to compare two nodes */ +{ + int i; + size_t num; + List **nodearray; + List *tmp, *output; + + /* find size of list */ + num = length(the_list); + if (num < 2) + return(copyObject(the_list)); + + /* copy elements of the list into an array */ + nodearray = (List **) palloc(num * sizeof(List *)); + + for (tmp = the_list, i = 0; tmp != NIL; tmp = lnext(tmp), i++) + nodearray[i] = copyObject(lfirst(tmp)); + + /* sort the array */ + pg_qsort(nodearray, num, sizeof(List *), compare); + + /* lcons together the array elements */ + output = NIL; + for (i = num - 1; i >= 0; i--) + output = lcons(nodearray[i], output); + + return(output); +} diff --git a/src/backend/lib/lispsort.h b/src/backend/lib/lispsort.h new file mode 100644 index 00000000000..e49ee543622 --- /dev/null +++ b/src/backend/lib/lispsort.h @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * lispsort.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: lispsort.h,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef LISPSORT_H +#define LISPSORT_H + +extern List *lisp_qsort(List *the_list, int (*compare)()); + +#endif /* LISPSORT_H */ diff --git a/src/backend/lib/qsort.c b/src/backend/lib/qsort.c new file mode 100644 index 00000000000..5af64bf8006 --- /dev/null +++ b/src/backend/lib/qsort.c @@ -0,0 +1,281 @@ +/*------------------------------------------------------------------------- + * + * qsort.c-- + * + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/Attic/qsort.c,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/*- + * Copyright (c) 1980, 1983, 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)qsort.c 5.9 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include "postgres.h" +#include "lib/qsort.h" + +/* + * MTHRESH is the smallest partition for which we compare for a median + * value instead of using the middle value. + */ +#define MTHRESH 6 + +/* + * THRESH is the minimum number of entries in a partition for continued + * partitioning. + */ +#define THRESH 4 + +static void insertion_sort(char* bot, int nmemb, int size, int (*compar)()); +static void quick_sort(char* bot, int nmemb, int size, int (*compar)()); + +void pg_qsort(void *bot, + size_t nmemb, + size_t size, + int (*compar)(void *, void *)) +{ + + if (nmemb <= 1) + return; + + if (nmemb >= THRESH) + quick_sort(bot, nmemb, size, compar); + else + insertion_sort(bot, nmemb, size, compar); +} + +/* + * Swap two areas of size number of bytes. Although qsort(3) permits random + * blocks of memory to be sorted, sorting pointers is almost certainly the + * common case (and, were it not, could easily be made so). Regardless, it + * isn't worth optimizing; the SWAP's get sped up by the cache, and pointer + * arithmetic gets lost in the time required for comparison function calls. + */ +#define SWAP(a, b) { \ + cnt = size; \ + do { \ + ch = *a; \ + *a++ = *b; \ + *b++ = ch; \ + } while (--cnt); \ +} + +/* + * Knuth, Vol. 3, page 116, Algorithm Q, step b, argues that a single pass + * of straight insertion sort after partitioning is complete is better than + * sorting each small partition as it is created. This isn't correct in this + * implementation because comparisons require at least one (and often two) + * function calls and are likely to be the dominating expense of the sort. + * Doing a final insertion sort does more comparisons than are necessary + * because it compares the "edges" and medians of the partitions which are + * known to be already sorted. + * + * This is also the reasoning behind selecting a small THRESH value (see + * Knuth, page 122, equation 26), since the quicksort algorithm does less + * comparisons than the insertion sort. + */ +#define SORT(bot, n) { \ + if (n > 1) \ + if (n == 2) { \ + t1 = bot + size; \ + if (compar(t1, bot) < 0) \ + SWAP(t1, bot); \ + } else \ + insertion_sort(bot, n, size, compar); \ +} + +static void +quick_sort(char* bot, int nmemb, int size, int (*compar)()) +{ + register int cnt; + register u_char ch; + register char *top, *mid, *t1, *t2; + register int n1, n2; + char *bsv; + + /* bot and nmemb must already be set. */ +partition: + + /* find mid and top elements */ + mid = bot + size * (nmemb >> 1); + top = bot + (nmemb - 1) * size; + + /* + * Find the median of the first, last and middle element (see Knuth, + * Vol. 3, page 123, Eq. 28). This test order gets the equalities + * right. + */ + if (nmemb >= MTHRESH) { + n1 = compar(bot, mid); + n2 = compar(mid, top); + if (n1 < 0 && n2 > 0) + t1 = compar(bot, top) < 0 ? top : bot; + else if (n1 > 0 && n2 < 0) + t1 = compar(bot, top) > 0 ? top : bot; + else + t1 = mid; + + /* if mid element not selected, swap selection there */ + if (t1 != mid) { + SWAP(t1, mid); + mid -= size; + } + } + + /* Standard quicksort, Knuth, Vol. 3, page 116, Algorithm Q. */ +#define didswap n1 +#define newbot t1 +#define replace t2 + didswap = 0; + for (bsv = bot;;) { + for (; bot < mid && compar(bot, mid) <= 0; bot += size); + while (top > mid) { + if (compar(mid, top) <= 0) { + top -= size; + continue; + } + newbot = bot + size; /* value of bot after swap */ + if (bot == mid) /* top <-> mid, mid == top */ + replace = mid = top; + else { /* bot <-> top */ + replace = top; + top -= size; + } + goto swap; + } + if (bot == mid) + break; + + /* bot <-> mid, mid == bot */ + replace = mid; + newbot = mid = bot; /* value of bot after swap */ + top -= size; + +swap: SWAP(bot, replace); + bot = newbot; + didswap = 1; + } + + /* + * Quicksort behaves badly in the presence of data which is already + * sorted (see Knuth, Vol. 3, page 119) going from O N lg N to O N^2. + * To avoid this worst case behavior, if a re-partitioning occurs + * without swapping any elements, it is not further partitioned and + * is insert sorted. This wins big with almost sorted data sets and + * only loses if the data set is very strangely partitioned. A fix + * for those data sets would be to return prematurely if the insertion + * sort routine is forced to make an excessive number of swaps, and + * continue the partitioning. + */ + if (!didswap) { + insertion_sort(bsv, nmemb, size, compar); + return; + } + + /* + * Re-partition or sort as necessary. Note that the mid element + * itself is correctly positioned and can be ignored. + */ +#define nlower n1 +#define nupper n2 + bot = bsv; + nlower = (mid - bot) / size; /* size of lower partition */ + mid += size; + nupper = nmemb - nlower - 1; /* size of upper partition */ + + /* + * If must call recursively, do it on the smaller partition; this + * bounds the stack to lg N entries. + */ + if (nlower > nupper) { + if (nupper >= THRESH) + quick_sort(mid, nupper, size, compar); + else { + SORT(mid, nupper); + if (nlower < THRESH) { + SORT(bot, nlower); + return; + } + } + nmemb = nlower; + } else { + if (nlower >= THRESH) + quick_sort(bot, nlower, size, compar); + else { + SORT(bot, nlower); + if (nupper < THRESH) { + SORT(mid, nupper); + return; + } + } + bot = mid; + nmemb = nupper; + } + goto partition; +} + +static void +insertion_sort(char* bot, int nmemb, int size, int (*compar)()) +{ + register int cnt; + register u_char ch; + register char *s1, *s2, *t1, *t2, *top; + + /* + * A simple insertion sort (see Knuth, Vol. 3, page 81, Algorithm + * S). Insertion sort has the same worst case as most simple sorts + * (O N^2). It gets used here because it is (O N) in the case of + * sorted data. + */ + top = bot + nmemb * size; + for (t1 = bot + size; t1 < top;) { + for (t2 = t1; (t2 -= size) >= bot && compar(t1, t2) < 0;); + if (t1 != (t2 += size)) { + /* Bubble bytes up through each element. */ + for (cnt = size; cnt--; ++t1) { + ch = *t1; + for (s1 = s2 = t1; (s2 -= size) >= t2; s1 = s2) + *s1 = *s2; + *s1 = ch; + } + } else + t1 += size; + } +} diff --git a/src/backend/lib/qsort.h b/src/backend/lib/qsort.h new file mode 100644 index 00000000000..d81d4e2e070 --- /dev/null +++ b/src/backend/lib/qsort.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * qsort.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: qsort.h,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef QSORT_H +#define QSORT_H + +#include + +extern void pg_qsort(void *bot, + size_t nmemb, + size_t size, + int (*compar)(void *, void *)); + +#endif /* QSORT_H */ + diff --git a/src/backend/lib/stringinfo.c b/src/backend/lib/stringinfo.c new file mode 100644 index 00000000000..7167ffcf65c --- /dev/null +++ b/src/backend/lib/stringinfo.c @@ -0,0 +1,116 @@ +/*------------------------------------------------------------------------- + * + * stringinfo.c-- + * These are routines that can be used to write informations to a string, + * without having to worry about string lengths, space allocation etc. + * Ideally the interface should look like the file i/o interface, + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/lib/stringinfo.c,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "nodes/pg_list.h" +#include "lib/stringinfo.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +/*--------------------------------------------------------------------- + * makeStringInfo + * + * Create a StringInfoData & return a pointer to it. + * + *--------------------------------------------------------------------- + */ +StringInfo +makeStringInfo() +{ + StringInfo res; + long size; + + res = (StringInfo) palloc(sizeof(StringInfoData)); + if (res == NULL) { + elog(WARN, "makeStringInfo: Out of memory!"); + } + + size = 100; + res->data = palloc(size); + if (res->data == NULL) { + elog(WARN, + "makeStringInfo: Out of memory! (%ld bytes requested)", size); + } + res->maxlen = size; + res->len = 0; + /* + * NOTE: we must initialize `res->data' to the empty string because + * we use 'strcat' in 'appendStringInfo', which of course it always + * expects a null terminated string. + */ + res->data[0] = '\0'; + + return(res); +} + +/*--------------------------------------------------------------------- + * appendStringInfo + * + * append to the current 'StringInfo' a new string. + * If there is not enough space in the current 'data', then reallocate + * some more... + * + * NOTE: if we reallocate space, we pfree the old one! + *--------------------------------------------------------------------- + */ +void +appendStringInfo(StringInfo str, char *buffer) +{ + int buflen, newlen; + char *s; + + Assert((str!=NULL)); + + /* + * do we have enough space to append the new string? + * (don't forget to count the null string terminating char!) + * If no, then reallocate some more. + */ + buflen = strlen(buffer); + if (buflen + str->len >= str->maxlen-1) { + /* + * how much more space to allocate ? + * Let's say double the current space... + * However we must check if this is enough! + */ + newlen = 2 * str->len; + while (buflen + str->len >= newlen-1) { + newlen = 2 * newlen; + } + /* + * allocate enough space. + */ + s = palloc(newlen); + if (s==NULL) { + elog(WARN, + "appendStringInfo: Out of memory (%d bytes requested)", + newlen); + } + memmove(s, str->data, str->len+1); + pfree(str->data); + str->maxlen = newlen; + str->data = s; + } + + /* + * OK, we have enough space now, append 'buffer' at the + * end of the string & update the string length. + * NOTE: this is a text string (i.e. printable characters) + * so 'strcat' will do the job (no need to use 'bcopy' et all...) + */ + (void) strcat(str->data, buffer); + str->len += buflen; +} diff --git a/src/backend/lib/stringinfo.h b/src/backend/lib/stringinfo.h new file mode 100644 index 00000000000..717f2ad5985 --- /dev/null +++ b/src/backend/lib/stringinfo.h @@ -0,0 +1,47 @@ +/*------------------------------------------------------------------------- + * + * stringinfo.h-- + * Declarations/definitons for "string" functions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: stringinfo.h,v 1.1.1.1 1996/07/09 06:21:29 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef STRINGINFO_H +#define STRINGINFO_H + +/*#include "c.h" */ /* for 'String' */ + +/*------------------------- + * StringInfoData holds information about a string. + * 'data' is the string. + * 'len' is the current string length (as returned by 'strlen') + * 'maxlen' is the size in bytes of 'data', i.e. the maximum string + * size (includeing the terminating '\0' char) that we can + * currently store in 'data' without having to reallocate + * more space. + */ +typedef struct StringInfoData { + char *data; + int maxlen; + int len; +} StringInfoData; + +typedef StringInfoData *StringInfo; + +/*------------------------ + * makeStringInfo + * create a 'StringInfoData' & return a pointer to it. + */ +extern StringInfo makeStringInfo(void); + +/*------------------------ + * appendStringInfo + * similar to 'strcat' but reallocates more space if necessary... + */ +extern void appendStringInfo(StringInfo str, char *buffer); + +#endif /* STRINGINFO_H */ diff --git a/src/backend/libpq/Makefile.inc b/src/backend/libpq/Makefile.inc new file mode 100644 index 00000000000..67052518d38 --- /dev/null +++ b/src/backend/libpq/Makefile.inc @@ -0,0 +1,26 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the (backend side) libpq module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/libpq/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ +# +#------------------------------------------------------------------------- + +# +# The frontend libpq interfaces to the backend through these files. +# +VPATH:= $(VPATH):$(CURDIR)/libpq + +SRCS_LIBPQ= be-dumpdata.c be-fsstubs.c be-pqexec.c + +# +# These files are shared with the frontend library. +# +SRCS_LIBPQ+= auth.c pqcomm.c portal.c portalbuf.c pqpacket.c pqsignal.c + +HEADERS+= auth.h be-fsstubs.h libpq-be.h libpq-fs.h libpq.h pqcomm.h pqsignal.h diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c new file mode 100644 index 00000000000..7b4437736f4 --- /dev/null +++ b/src/backend/libpq/auth.c @@ -0,0 +1,668 @@ +/*------------------------------------------------------------------------- + * + * auth.c-- + * Routines to handle network authentication + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * + * backend (postmaster) routines: + * be_recvauth receive authentication information + * be_setauthsvc do/do not permit an authentication service + * be_getauthsvc is an authentication service permitted? + * + * NOTES + * To add a new authentication system: + * 0. If you can't do your authentication over an existing socket, + * you lose -- get ready to hack around this framework instead of + * using it. Otherwise, you can assume you have an initialized + * and empty connection to work with. (Please don't leave leftover + * gunk in the connection after the authentication transactions, or + * the POSTGRES routines that follow will be very unhappy.) + * 1. Write a set of routines that: + * let a client figure out what user/principal name to use + * send authentication information (client side) + * receive authentication information (server side) + * You can include both routines in this file, using #ifdef FRONTEND + * to separate them. + * 2. Edit libpq/pqcomm.h and assign a MsgType for your protocol. + * 3. Edit the static "struct authsvc" array and the generic + * {be,fe}_{get,set}auth{name,svc} routines in this file to reflect + * the new service. You may have to change the arguments of these + * routines; they basically just reflect what Kerberos v4 needs. + * 4. Hack on src/{,bin}/Makefile.global and src/{backend,libpq}/Makefile + * to add library and CFLAGS hooks -- basically, grep the Makefile + * hierarchy for KRBVERS to see where you need to add things. + * + * Send mail to post_hackers@postgres.Berkeley.EDU if you have to make + * any changes to arguments, etc. Context diffs would be nice, too. + * + * Someday, this cruft will go away and magically be replaced by a + * nice interface based on the GSS API or something. For now, though, + * there's no (stable) UNIX security API to work with... + * + */ +#include +#include +#include /* for MAX{HOSTNAME,PATH}LEN, NOFILE */ +#include +#include /* isspace() declaration */ + +#include +#include +#include "libpq/auth.h" +#include "libpq/libpq.h" +#include "libpq/pqcomm.h" +#include "libpq/libpq-be.h" + +/*---------------------------------------------------------------- + * common definitions for generic fe/be routines + *---------------------------------------------------------------- + */ + +struct authsvc { + char name[16]; /* service nickname (for command line) */ + MsgType msgtype; /* startup packet header type */ + int allowed; /* initially allowed (before command line + * option parsing)? + */ +}; + +/* + * Command-line parsing routines use this structure to map nicknames + * onto service types (and the startup packets to use with them). + * + * Programs receiving an authentication request use this structure to + * decide which authentication service types are currently permitted. + * By default, all authentication systems compiled into the system are + * allowed. Unauthenticated connections are disallowed unless there + * isn't any authentication system. + */ +static struct authsvc authsvcs[] = { +#ifdef KRB4 + { "krb4", STARTUP_KRB4_MSG, 1 }, + { "kerberos", STARTUP_KRB4_MSG, 1 }, +#endif /* KRB4 */ +#ifdef KRB5 + { "krb5", STARTUP_KRB5_MSG, 1 }, + { "kerberos", STARTUP_KRB5_MSG, 1 }, +#endif /* KRB5 */ + { UNAUTHNAME, STARTUP_MSG, +#if defined(KRB4) || defined(KRB5) + 0 +#else /* !(KRB4 || KRB5) */ + 1 +#endif /* !(KRB4 || KRB5) */ + } +}; + +static n_authsvcs = sizeof(authsvcs) / sizeof(struct authsvc); + +#ifdef KRB4 +/*---------------------------------------------------------------- + * MIT Kerberos authentication system - protocol version 4 + *---------------------------------------------------------------- + */ + +#include "krb.h" + +#ifdef FRONTEND +/* moves to src/libpq/fe-auth.c */ +#else /* !FRONTEND */ + +/* + * pg_krb4_recvauth -- server routine to receive authentication information + * from the client + * + * Nothing unusual here, except that we compare the username obtained from + * the client's setup packet to the authenticated name. (We have to retain + * the name in the setup packet since we have to retain the ability to handle + * unauthenticated connections.) + */ +static int +pg_krb4_recvauth(int sock, + struct sockaddr_in *laddr, + struct sockaddr_in *raddr, + char *username) +{ + long krbopts = 0; /* one-way authentication */ + KTEXT_ST clttkt; + char instance[INST_SZ]; + AUTH_DAT auth_data; + Key_schedule key_sched; + char version[KRB_SENDAUTH_VLEN]; + int status; + + strcpy(instance, "*"); /* don't care, but arg gets expanded anyway */ + status = krb_recvauth(krbopts, + sock, + &clttkt, + PG_KRB_SRVNAM, + instance, + raddr, + laddr, + &auth_data, + PG_KRB_SRVTAB, + key_sched, + version); + if (status != KSUCCESS) { + (void) sprintf(PQerrormsg, + "pg_krb4_recvauth: kerberos error: %s\n", + krb_err_txt[status]); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (strncmp(version, PG_KRB4_VERSION, KRB_SENDAUTH_VLEN)) { + (void) sprintf(PQerrormsg, + "pg_krb4_recvauth: protocol version != \"%s\"\n", + PG_KRB4_VERSION); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (username && *username && + strncmp(username, auth_data.pname, NAMEDATALEN)) { + (void) sprintf(PQerrormsg, + "pg_krb4_recvauth: name \"%s\" != \"%s\"\n", + username, + auth_data.pname); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + return(STATUS_OK); +} + +#endif /* !FRONTEND */ + +#endif /* KRB4 */ + +#ifdef KRB5 +/*---------------------------------------------------------------- + * MIT Kerberos authentication system - protocol version 5 + *---------------------------------------------------------------- + */ + +#include "krb5/krb5.h" + +/* + * pg_an_to_ln -- return the local name corresponding to an authentication + * name + * + * XXX Assumes that the first aname component is the user name. This is NOT + * necessarily so, since an aname can actually be something out of your + * worst X.400 nightmare, like + * ORGANIZATION=U. C. Berkeley/NAME=Paul M. Aoki@CS.BERKELEY.EDU + * Note that the MIT an_to_ln code does the same thing if you don't + * provide an aname mapping database...it may be a better idea to use + * krb5_an_to_ln, except that it punts if multiple components are found, + * and we can't afford to punt. + */ +static char * +pg_an_to_ln(char *aname) +{ + char *p; + + if ((p = strchr(aname, '/')) || (p = strchr(aname, '@'))) + *p = '\0'; + return(aname); +} + +#ifdef FRONTEND +/* moves to src/libpq/fe-auth.c */ +#else /* !FRONTEND */ + +/* + * pg_krb4_recvauth -- server routine to receive authentication information + * from the client + * + * We still need to compare the username obtained from the client's setup + * packet to the authenticated name, as described in pg_krb4_recvauth. This + * is a bit more problematic in v5, as described above in pg_an_to_ln. + * + * In addition, as described above in pg_krb5_sendauth, we still need to + * canonicalize the server name v4-style before constructing a principal + * from it. Again, this is kind of iffy. + * + * Finally, we need to tangle with the fact that v5 doesn't let you explicitly + * set server keytab file names -- you have to feed lower-level routines a + * function to retrieve the contents of a keytab, along with a single argument + * that allows them to open the keytab. We assume that a server keytab is + * always a real file so we can allow people to specify their own filenames. + * (This is important because the POSTGRES keytab needs to be readable by + * non-root users/groups; the v4 tools used to force you do dump a whole + * host's worth of keys into a file, effectively forcing you to use one file, + * but kdb5_edit allows you to select which principals to dump. Yay!) + */ +static int +pg_krb5_recvauth(int sock, + struct sockaddr_in *laddr, + struct sockaddr_in *raddr, + char *username) +{ + char servbuf[MAXHOSTNAMELEN + 1 + + sizeof(PG_KRB_SRVNAM)]; + char *hostp, *kusername = (char *) NULL; + krb5_error_code code; + krb5_principal client, server; + krb5_address sender_addr; + krb5_rdreq_key_proc keyproc = (krb5_rdreq_key_proc) NULL; + krb5_pointer keyprocarg = (krb5_pointer) NULL; + + /* + * Set up server side -- since we have no ticket file to make this + * easy, we construct our own name and parse it. See note on + * canonicalization above. + */ + (void) strcpy(servbuf, PG_KRB_SRVNAM); + *(hostp = servbuf + (sizeof(PG_KRB_SRVNAM) - 1)) = '/'; + if (gethostname(++hostp, MAXHOSTNAMELEN) < 0) + (void) strcpy(hostp, "localhost"); + if (hostp = strchr(hostp, '.')) + *hostp = '\0'; + if (code = krb5_parse_name(servbuf, &server)) { + (void) sprintf(PQerrormsg, + "pg_krb5_recvauth: Kerberos error %d in krb5_parse_name\n", + code); + com_err("pg_krb5_recvauth", code, "in krb5_parse_name"); + return(STATUS_ERROR); + } + + /* + * krb5_sendauth needs this to verify the address in the client + * authenticator. + */ + sender_addr.addrtype = raddr->sin_family; + sender_addr.length = sizeof(raddr->sin_addr); + sender_addr.contents = (krb5_octet *) &(raddr->sin_addr); + + if (strcmp(PG_KRB_SRVTAB, "")) { + keyproc = krb5_kt_read_service_key; + keyprocarg = PG_KRB_SRVTAB; + } + + if (code = krb5_recvauth((krb5_pointer) &sock, + PG_KRB5_VERSION, + server, + &sender_addr, + (krb5_pointer) NULL, + keyproc, + keyprocarg, + (char *) NULL, + (krb5_int32 *) NULL, + &client, + (krb5_ticket **) NULL, + (krb5_authenticator **) NULL)) { + (void) sprintf(PQerrormsg, + "pg_krb5_recvauth: Kerberos error %d in krb5_recvauth\n", + code); + com_err("pg_krb5_recvauth", code, "in krb5_recvauth"); + krb5_free_principal(server); + return(STATUS_ERROR); + } + krb5_free_principal(server); + + /* + * The "client" structure comes out of the ticket and is therefore + * authenticated. Use it to check the username obtained from the + * postmaster startup packet. + */ + if ((code = krb5_unparse_name(client, &kusername))) { + (void) sprintf(PQerrormsg, + "pg_krb5_recvauth: Kerberos error %d in krb5_unparse_name\n", + code); + com_err("pg_krb5_recvauth", code, "in krb5_unparse_name"); + krb5_free_principal(client); + return(STATUS_ERROR); + } + krb5_free_principal(client); + if (!kusername) { + (void) sprintf(PQerrormsg, + "pg_krb5_recvauth: could not decode username\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + kusername = pg_an_to_ln(kusername); + if (username && strncmp(username, kusername, NAMEDATALEN)) { + (void) sprintf(PQerrormsg, + "pg_krb5_recvauth: name \"%s\" != \"%s\"\n", + username, kusername); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + free(kusername); + return(STATUS_ERROR); + } + free(kusername); + return(STATUS_OK); +} + +#endif /* !FRONTEND */ + +#endif /* KRB5 */ + + +/*---------------------------------------------------------------- + * host based authentication + *---------------------------------------------------------------- + * based on the securelib package originally written by William + * LeFebvre, EECS Department, Northwestern University + * (phil@eecs.nwu.edu) - orginal configuration file code handling + * by Sam Horrocks (sam@ics.uci.edu) + * + * modified and adapted for use with Postgres95 by Paul Fisher + * (pnfisher@unity.ncsu.edu) + */ + +#define CONF_FILE "pg_hba" /* Name of the config file */ + +#define MAX_LINES 255 /* Maximum number of config lines * + * that can apply to one database */ + +#define ALL_NAME "all" /* Name used in config file for * + * lines that apply to all databases */ + +#define MAX_TOKEN 80 /* Maximum size of one token in the * + * configuration file */ + +struct conf_line { /* Info about config file line */ + u_long adr, mask; +}; + +static int next_token(FILE *, char *, int); + +/* hba_recvauth */ +/* check for host-based authentication */ +/* + * hba_recvauth - check the sockaddr_in "addr" to see if it corresponds + * to an acceptable host for the database that's being + * connected to. Return STATUS_OK if acceptable, + * otherwise return STATUS_ERROR. + */ + +static int +hba_recvauth(struct sockaddr_in *addr, PacketBuf *pbuf, StartupInfo *sp) +{ + u_long ip_addr; + static struct conf_line conf[MAX_LINES]; + static int nconf; + int i; + + char buf[MAX_TOKEN]; + FILE *file; + + char *conf_file; + + /* put together the full pathname to the config file */ + conf_file = (char *) malloc((strlen(GetPGData())+strlen(CONF_FILE)+2)*sizeof(char)); + strcpy(conf_file, GetPGData()); + strcat(conf_file, "/"); + strcat(conf_file, CONF_FILE); + + + /* Open the config file. */ + file = fopen(conf_file, "r"); + if (file) + { + free(conf_file); + nconf = 0; + + /* Grab the "name" */ + while ((i = next_token(file, buf, sizeof(buf))) != EOF) + { + /* If only token on the line, ignore */ + if (i == '\n') continue; + + /* Comment -- read until end of line then next line */ + if (buf[0] == '#') + { + while (next_token(file, buf, sizeof(buf)) == 0) ; + continue; + } + + /* + * Check to make sure this says "all" or that it matches + * the database name. + */ + + if (strcmp(buf, ALL_NAME) == 0 || (strcmp(buf, sp->database) == 0)) + { + /* Get next token, if last on line, ignore */ + if (next_token(file, buf, sizeof(buf)) != 0) + continue; + + /* Got address */ + conf[nconf].adr = inet_addr(buf); + + /* Get next token (mask) */ + i = next_token(file, buf, sizeof(buf)); + + /* Only ignore if we got no text at all */ + if (i != EOF) + { + /* Add to list, quit if array is full */ + conf[nconf++].mask = inet_addr(buf); + if (nconf == MAX_LINES) break; + } + + /* If not at end-of-line, keep reading til we are */ + while (i == 0) + i = next_token(file, buf, sizeof(buf)); + } + } + fclose(file); + } + else + { (void) sprintf(PQerrormsg, + "hba_recvauth: config file does not exist or permissions are not setup correctly!\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + free(conf_file); + return(STATUS_ERROR); + } + + + /* Config lines now in memory so start checking address */ + /* grab just the address */ + ip_addr = addr->sin_addr.s_addr; + + /* + * Go through the conf array, turn off the bits given by the mask + * and then compare the result with the address. A match means + * that this address is ok. + */ + for (i = 0; i < nconf; ++i) + if ((ip_addr & ~conf[i].mask) == conf[i].adr) return(STATUS_OK); + + /* no match, so we can't approve the address */ + return(STATUS_ERROR); +} + +/* + * Grab one token out of fp. Defined as the next string of non-whitespace + * in the file. After we get the token, continue reading until EOF, end of + * line or the next token. If it's the last token on the line, return '\n' + * for the value. If we get EOF before reading a token, return EOF. In all + * other cases return 0. + */ +static int +next_token(FILE *fp, char *buf, int bufsz) +{ + int c; + char *eb = buf+(bufsz-1); + + /* Discard inital whitespace */ + while (isspace(c = getc(fp))) ; + + /* EOF seen before any token so return EOF */ + if (c == EOF) return -1; + + /* Form a token in buf */ + do { + if (buf < eb) *buf++ = c; + c = getc(fp); + } while (!isspace(c) && c != EOF); + *buf = '\0'; + + /* Discard trailing tabs and spaces */ + while (c == ' ' || c == '\t') c = getc(fp); + + /* Put back the char that was non-whitespace (putting back EOF is ok) */ + (void) ungetc(c, fp); + + /* If we ended with a newline, return that, otherwise return 0 */ + return (c == '\n' ? '\n' : 0); +} + +/* + * be_recvauth -- server demux routine for incoming authentication information + */ +int +be_recvauth(MsgType msgtype, Port *port, char *username, StartupInfo* sp) +{ + if (!username) { + (void) sprintf(PQerrormsg, + "be_recvauth: no user name passed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (!port) { + (void) sprintf(PQerrormsg, + "be_recvauth: no port structure passed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + + switch (msgtype) { +#ifdef KRB4 + case STARTUP_KRB4_MSG: + if (!be_getauthsvc(msgtype)) { + (void) sprintf(PQerrormsg, + "be_recvauth: krb4 authentication disallowed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (pg_krb4_recvauth(port->sock, &port->laddr, &port->raddr, + username) != STATUS_OK) { + (void) sprintf(PQerrormsg, + "be_recvauth: krb4 authentication failed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + break; +#endif +#ifdef KRB5 + case STARTUP_KRB5_MSG: + if (!be_getauthsvc(msgtype)) { + (void) sprintf(PQerrormsg, + "be_recvauth: krb5 authentication disallowed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (pg_krb5_recvauth(port->sock, &port->laddr, &port->raddr, + username) != STATUS_OK) { + (void) sprintf(PQerrormsg, + "be_recvauth: krb5 authentication failed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + break; +#endif + case STARTUP_MSG: + if (!be_getauthsvc(msgtype)) { + (void) sprintf(PQerrormsg, + "be_recvauth: unauthenticated connections disallowed failed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + break; + case STARTUP_HBA_MSG: + if (hba_recvauth(&port->raddr, &port->buf, sp) != STATUS_OK) { + (void) sprintf(PQerrormsg, + "be_recvauth: host-based authentication failed\n"); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + break; + default: + (void) sprintf(PQerrormsg, + "be_recvauth: unrecognized message type: %d\n", + msgtype); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + return(STATUS_OK); +} + +/* + * be_setauthsvc -- enable/disable the authentication services currently + * selected for use by the backend + * be_getauthsvc -- returns whether a particular authentication system + * (indicated by its message type) is permitted by the + * current selections + * + * be_setauthsvc encodes the command-line syntax that + * -a "" + * enables a service, whereas + * -a "no" + * disables it. + */ +void +be_setauthsvc(char *name) +{ + int i, j; + int turnon = 1; + + if (!name) + return; + if (!strncmp("no", name, 2)) { + turnon = 0; + name += 2; + } + if (name[0] == '\0') + return; + for (i = 0; i < n_authsvcs; ++i) + if (!strcmp(name, authsvcs[i].name)) { + for (j = 0; j < n_authsvcs; ++j) + if (authsvcs[j].msgtype == authsvcs[i].msgtype) + authsvcs[j].allowed = turnon; + break; + } + if (i == n_authsvcs) { + (void) sprintf(PQerrormsg, + "be_setauthsvc: invalid name %s, ignoring...\n", + name); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + } + return; +} + +int +be_getauthsvc(MsgType msgtype) +{ + int i; + + for (i = 0; i < n_authsvcs; ++i) + if (msgtype == authsvcs[i].msgtype) + return(authsvcs[i].allowed); + return(0); +} diff --git a/src/backend/libpq/auth.h b/src/backend/libpq/auth.h new file mode 100644 index 00000000000..adda8dc13c4 --- /dev/null +++ b/src/backend/libpq/auth.h @@ -0,0 +1,49 @@ +/*------------------------------------------------------------------------- + * + * auth.h-- + * Definitions for network authentication routines + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: auth.h,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef AUTH_H +#define AUTH_H + +#include "c.h" +#include "libpq/pqcomm.h" + +/*---------------------------------------------------------------- + * Common routines and definitions + *---------------------------------------------------------------- + */ + +/* what we call "no authentication system" */ +#define UNAUTHNAME "unauth" + +/* what a frontend uses by default */ +#if !defined(KRB4) && !defined(KRB5) +#define DEFAULT_CLIENT_AUTHSVC UNAUTHNAME +#else /* KRB4 || KRB5 */ +#define DEFAULT_CLIENT_AUTHSVC "kerberos" +#endif /* KRB4 || KRB5 */ + +extern int fe_sendauth(MsgType msgtype, Port *port, char *hostname); +extern void fe_setauthsvc(char *name); +extern MsgType fe_getauthsvc(); +extern char *fe_getauthname(void); +extern int be_recvauth(MsgType msgtype, Port *port, char *username, StartupInfo* sp); +extern void be_setauthsvc(char *name); +extern int be_getauthsvc(MsgType msgtype); + +/* the value that matches any dbName value when doing + host based authentication*/ +#define ALL_DBNAME "*" + +#define PG_KRB4_VERSION "PGVER4.1" /* at most KRB_SENDAUTH_VLEN chars */ +#define PG_KRB5_VERSION "PGVER5.1" + +#endif /* AUTH_H */ diff --git a/src/backend/libpq/be-dumpdata.c b/src/backend/libpq/be-dumpdata.c new file mode 100644 index 00000000000..fb6b90c1495 --- /dev/null +++ b/src/backend/libpq/be-dumpdata.c @@ -0,0 +1,323 @@ +/*------------------------------------------------------------------------- + * + * be-dumpdata.c-- + * support for collection of returned tuples from an internal + * PQ call into a backend buffer. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/be-dumpdata.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * be_portalinit - initialize backend portal administration + * be_portalpush - add a portal to the top of the portal stack + * be_portalpop - remove portal on the top of the stack & return it + * be_currentportal - return the top portal on the portal stack + * be_newportal - return a new portal. + * be_portalinit - initialize backend portal expected to hold results. + * be_printtup - add a tuple to a backend portal + * + * NOTES + * Since backend user-defined operators can call queries + * which in turn call user-defined operators can call queries... + * we have to keep track of portals on a stack. BeginCommand() + * puts portals on the stack and the PQ functions remove them. + * + */ +#include "postgres.h" + +#include "lib/dllist.h" +#include "libpq/libpq-be.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "storage/buf.h" +#include "utils/memutils.h" +#include "utils/palloc.h" +#include "fmgr.h" +#include "utils/mcxt.h" +#include "utils/elog.h" +#include "utils/exc.h" + +#include "utils/syscache.h" +#include "catalog/pg_type.h" +#include "catalog/catalog.h" +#include "access/printtup.h" + +/* ---------------- + * backend portal stack for recursive PQexec calls + * ---------------- + */ +static Dllist *be_portalstack; + +/* ---------------- + * be_portalinit - initialize backend portal administration + * + * This is called once from InitPostgres() to initialize + * the portal stack. + * ---------------- + */ +void +be_portalinit() +{ + be_portalstack = DLNewList(); +} + +/* ---------------- + * be_portalpush - add a portal to the top of the portal stack + * + * used by BeginCommand() + * ---------------- + */ +void +be_portalpush(PortalEntry *entry) +{ + DLAddTail(be_portalstack, DLNewElem(entry)); +} + +/* ---------------- + * be_portalpop - remove the portal on the top of the stack & return it + * + * used by PQexec() + * ---------------- + */ +PortalEntry * +be_portalpop() +{ + PortalEntry *p; + Dlelem* elt; + elt = DLRemTail(be_portalstack); + + p = (elt ? (PortalEntry*)DLE_VAL(elt) : NULL); + DLFreeElem(elt); + return p; + + +} + +/* ---------------- + * be_currentportal - return the top portal on the portal stack + * + * used by be_printtup() + * ---------------- + */ +PortalEntry * +be_currentportal() +{ + Dlelem* elt; + elt = DLGetTail(be_portalstack); + return (elt ? (PortalEntry*)DLE_VAL(elt) : NULL); +} + +/* ---------------- + * be_newportal - return a new portal. + * + * If the user-defined function does not specify a portal name, + * we generate a unique one. Names are generated from a combination + * of a postgres oid and an integer counter which is incremented + * every time we ask for a local portal. + * + * used by BeginCommand() + * ---------------- + */ + +static Oid be_portaloid; +static u_int be_portalcnt = 0; + +PortalEntry * +be_newportal() +{ + PortalEntry *entry; + char buf[PortalNameLength]; + + /* ---------------- + * generate a new name + * ---------------- + */ + if (be_portalcnt == 0) + be_portaloid = newoid(); + be_portalcnt++; + sprintf(buf, "be_%d_%d", be_portaloid, be_portalcnt); + + /* ---------------- + * initialize the new portal entry and keep track + * of the current memory context for be_printtup(). + * This is important - otherwise whatever we allocate + * will go away and the contents of the portal after + * PQexec() returns will be meaningless. + * ---------------- + */ + entry = pbuf_setup(buf); + entry->portalcxt = (Pointer) CurrentMemoryContext; + + return entry; +} + +/* ---------------- + * be_typeinit - initialize backend portal expected to hold + * query results. + * + * used by BeginCommand() + * ---------------- + */ +void +be_typeinit(PortalEntry *entry, + TupleDesc tupDesc, + int natts) +{ + PortalBuffer *portal; + GroupBuffer *group; + int i; + AttributeTupleForm *attrs = tupDesc->attrs; + + /* ---------------- + * add a new portal group to the portal + * ---------------- + */ + portal = entry->portal; + portal->no_groups++; + portal->groups = group = pbuf_addGroup(portal); + group->no_fields = natts; + + /* ---------------- + * initialize portal group type info + * ---------------- + */ + if (natts > 0) { + group->types = pbuf_addTypes(natts); + for (i = 0; i < natts; ++i) { + strncpy(group->types[i].name, attrs[i]->attname.data, NAMEDATALEN); + group->types[i].adtid = attrs[i]->atttypid; + group->types[i].adtsize = attrs[i]->attlen; + } + } +} + +/* ---------------- + * be_printtup - add a tuple to a backend portal + * + * used indirectly by ExecRetrieve() + * + * This code is pretty much copied from printtup(), dump_type() + * and dump_data(). -cim 2/12/91 + * ---------------- + */ +void +be_printtup(HeapTuple tuple, TupleDesc typeinfo) +{ + int i; + char *attr; + bool isnull; + Oid typoutput; + + PortalEntry *entry = NULL; + PortalBuffer *portal = NULL; + GroupBuffer *group = NULL ; + TupleBlock *tuples = NULL; + char **values; + int *lengths; + + MemoryContext savecxt; + + /* ---------------- + * get the current portal and group + * ---------------- + */ + entry = be_currentportal(); + portal = entry->portal; + group = portal->groups; + + /* ---------------- + * switch to the portal's memory context so that + * the tuples we allocate are returned to the user. + * ---------------- + */ + savecxt = MemoryContextSwitchTo((MemoryContext)entry->portalcxt); + + /* ---------------- + * If no tuple block yet, allocate one. + * If the current block is full, allocate another one. + * ---------------- + */ + if (group->tuples == NULL) { + tuples = group->tuples = pbuf_addTuples(); + tuples->tuple_index = 0; + } else { + tuples = group->tuples; + /* walk to the end of the linked list of TupleBlocks */ + while (tuples->next) + tuples = tuples->next; + /* now, tuples is the last TupleBlock, check to see if it is full. + If so, allocate a new TupleBlock and add it to the end of + the chain */ + + if (tuples->tuple_index == TupleBlockSize) { + tuples->next = pbuf_addTuples(); + tuples = tuples->next; + tuples->tuple_index = 0; + } + } + + /* ---------------- + * Allocate space for a tuple. + * ---------------- + */ + tuples->values[tuples->tuple_index] = pbuf_addTuple(tuple->t_natts); + tuples->lengths[tuples->tuple_index] = pbuf_addTupleValueLengths(tuple->t_natts); + /* ---------------- + * copy printable representations of the tuple's attributes + * to the portal. + * + * This seems silly, because the user's function which is calling + * PQexec() or PQfn() will probably just convert this back into the + * internal form anyways, but the point here is to provide a uniform + * libpq interface and this is how the fe libpq interface currently + * works. Pretty soon we'll have to add code to let the fe or be + * select the desired data representation and then deal with that. + * This should not be too hard, as there already exist typrecieve() + * and typsend() procedures for user-defined types (see pg_type.h) + * -cim 2/11/91 + * ---------------- + */ + + values = tuples->values[tuples->tuple_index]; + lengths = tuples->lengths[tuples->tuple_index]; + + for (i = 0; i < tuple->t_natts; i++) { + attr = heap_getattr(tuple, InvalidBuffer, i+1, typeinfo, &isnull); + typoutput = typtoout((Oid) typeinfo->attrs[i]->atttypid); + + lengths[i] = typeinfo->attrs[i]->attlen; + + if (lengths[i] == -1) /* variable length attribute */ + if (!isnull) + lengths[i] = VARSIZE(attr)-VARHDRSZ; + else + lengths[i] = 0; + + if (!isnull && OidIsValid(typoutput)) { + values[i] = fmgr(typoutput, attr, gettypelem(typeinfo->attrs[i]->atttypid)); + } else + values[i] = NULL; + + } + + /* ---------------- + * increment tuple group counters + * ---------------- + */ + portal->no_tuples++; + group->no_tuples++; + tuples->tuple_index++; + + /* ---------------- + * return to the original memory context + * ---------------- + */ + MemoryContextSwitchTo(savecxt); +} diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c new file mode 100644 index 00000000000..e32cd3b474b --- /dev/null +++ b/src/backend/libpq/be-fsstubs.c @@ -0,0 +1,351 @@ +/*------------------------------------------------------------------------- + * + * be-fsstubs.c-- + * support for filesystem operations on large objects + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/be-fsstubs.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + * NOTES + * This should be moved to a more appropriate place. It is here + * for lack of a better place. + * + * Builtin functions for open/close/read/write operations on large objects. + * + * These functions operate in the current portal variable context, which + * means the large object descriptors hang around between transactions and + * are not deallocated until explicitly closed, or until the portal is + * closed. + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "lib/dllist.h" +#include "libpq/libpq.h" +#include "libpq/libpq-fs.h" +#include "utils/mcxt.h" +#include "utils/palloc.h" + +#include "storage/fd.h" /* for O_ */ +#include "storage/large_object.h" + +#include "utils/elog.h" +#include "libpq/be-fsstubs.h" + +/*#define FSDB 1*/ +#define MAX_LOBJ_FDS 256 + +static LargeObjectDesc *cookies[MAX_LOBJ_FDS]; + +static GlobalMemory fscxt = NULL; + + +static int newLOfd(LargeObjectDesc *lobjCookie); +static void deleteLOfd(int fd); + + +/***************************************************************************** + * File Interfaces for Large Objects + *****************************************************************************/ + +int +lo_open(Oid lobjId, int mode) +{ + LargeObjectDesc *lobjDesc; + int fd; + MemoryContext currentContext; + +#if FSDB + elog(NOTICE,"LOopen(%d,%d)",lobjId,mode); +#endif + + if (fscxt == NULL) { + fscxt = CreateGlobalMemory("Filesystem"); + } + currentContext = MemoryContextSwitchTo((MemoryContext)fscxt); + + lobjDesc = inv_open(lobjId, mode); + + if (lobjDesc == NULL) { /* lookup failed */ + MemoryContextSwitchTo(currentContext); +#if FSDB + elog(NOTICE,"cannot open large object %d", lobjId); +#endif + return -1; + } + + fd = newLOfd(lobjDesc); + + /* switch context back to orig. */ + MemoryContextSwitchTo(currentContext); + + return fd; +} + +int +lo_close(int fd) +{ + MemoryContext currentContext; + + if (fd >= MAX_LOBJ_FDS) { + elog(WARN,"lo_close: large obj descriptor (%d) out of range", fd); + return -2; + } + if (cookies[fd] == NULL) { + elog(WARN,"lo_close: invalid large obj descriptor (%d)", fd); + return -3; + } +#if FSDB + elog(NOTICE,"LOclose(%d)",fd); +#endif + + Assert(fscxt != NULL); + currentContext = MemoryContextSwitchTo((MemoryContext)fscxt); + + inv_close(cookies[fd]); + + MemoryContextSwitchTo(currentContext); + + deleteLOfd(fd); + return 0; +} + +/* + * We assume the large object supports byte oriented reads and seeks so + * that our work is easier. + */ +int +lo_read(int fd, char *buf, int len) +{ + Assert(cookies[fd]!=NULL); + return inv_read(cookies[fd], buf, len); +} + +int +lo_write(int fd, char *buf, int len) +{ + Assert(cookies[fd]!=NULL); + return inv_write(cookies[fd], buf, len); +} + + +int +lo_lseek(int fd, int offset, int whence) +{ + if (fd >= MAX_LOBJ_FDS) { + elog(WARN,"lo_seek: large obj descriptor (%d) out of range", fd); + return -2; + } + return inv_seek(cookies[fd], offset, whence); +} + +Oid +lo_creat(int mode) +{ + LargeObjectDesc *lobjDesc; + MemoryContext currentContext; + Oid lobjId; + + if (fscxt == NULL) { + fscxt = CreateGlobalMemory("Filesystem"); + } + + currentContext = MemoryContextSwitchTo((MemoryContext)fscxt); + + lobjDesc = inv_create(mode); + + if (lobjDesc == NULL) { + MemoryContextSwitchTo(currentContext); + return InvalidOid; + } + + lobjId = lobjDesc->heap_r->rd_id; + + inv_close(lobjDesc); + + /* switch context back to original memory context */ + MemoryContextSwitchTo(currentContext); + + return lobjId; +} + +int +lo_tell(int fd) +{ + if (fd >= MAX_LOBJ_FDS) { + elog(WARN,"lo_tell: large object descriptor (%d) out of range",fd); + return -2; + } + if (cookies[fd] == NULL) { + elog(WARN,"lo_tell: invalid large object descriptor (%d)",fd); + return -3; + } + return inv_tell(cookies[fd]); +} + +int +lo_unlink(Oid lobjId) +{ + return (inv_destroy(lobjId)); +} + +/***************************************************************************** + * Read/Write using varlena + *****************************************************************************/ + +struct varlena * +LOread(int fd, int len) +{ + struct varlena *retval; + int totalread = 0; + + retval = (struct varlena *)palloc(sizeof(int32) + len); + totalread = lo_read(fd, VARDATA(retval), len); + VARSIZE(retval) = totalread + sizeof(int32); + + return retval; +} + +int LOwrite(int fd, struct varlena *wbuf) +{ + int totalwritten; + int bytestowrite; + + bytestowrite = VARSIZE(wbuf) - sizeof(int32); + totalwritten = lo_write(fd, VARDATA(wbuf), bytestowrite); + return totalwritten; +} + +/***************************************************************************** + * Import/Export of Large Object + *****************************************************************************/ + +/* + * lo_import - + * imports a file as an (inversion) large object. + */ +Oid +lo_import(text *filename) +{ + int fd; + int nbytes, tmp; +#define BUFSIZE 1024 + char buf[BUFSIZE]; + LargeObjectDesc *lobj; + Oid lobjOid; + + /* + * open the file to be read in + */ + fd = open(VARDATA(filename), O_RDONLY, 0666); + if (fd < 0) { /* error */ + elog(WARN, "lo_import: can't open unix file\"%s\"\n", filename); + } + + /* + * create an inversion "object" + */ + lobj = inv_create(INV_READ|INV_WRITE); + if (lobj == NULL) { + elog(WARN, "lo_import: can't create inv object for \"%s\"", + VARDATA(filename)); + } + + /* + * the oid for the large object is just the oid of the relation + * XInv??? which contains the data. + */ + lobjOid = lobj->heap_r->rd_id; + + /* + * read in from the Unix file and write to the inversion file + */ + while ((nbytes = read(fd, buf, BUFSIZE)) > 0) { + tmp = inv_write(lobj, buf, nbytes); + if (tmp < nbytes) { + elog(WARN, "lo_import: error while reading \"%s\"", + VARDATA(filename)); + } + } + + (void) close(fd); + (void) inv_close(lobj); + + return lobjOid; +} + +/* + * lo_export - + * exports an (inversion) large object. + */ +int4 +lo_export(Oid lobjId, text *filename) +{ + int fd; + int nbytes, tmp; +#define BUFSIZE 1024 + char buf[BUFSIZE]; + LargeObjectDesc *lobj; + + /* + * create an inversion "object" + */ + lobj = inv_open(lobjId, INV_READ); + if (lobj == NULL) { + elog(WARN, "lo_export: can't open inv object %d", + lobjId); + } + + /* + * open the file to be written to + */ + fd = open(VARDATA(filename), O_CREAT|O_WRONLY, 0666); + if (fd < 0) { /* error */ + elog(WARN, "lo_export: can't open unix file\"%s\"", + VARDATA(filename)); + } + + /* + * read in from the Unix file and write to the inversion file + */ + while ((nbytes = inv_read(lobj, buf, BUFSIZE)) > 0) { + tmp = write(fd, buf, nbytes); + if (tmp < nbytes) { + elog(WARN, "lo_export: error while writing \"%s\"", + VARDATA(filename)); + } + } + + (void) inv_close(lobj); + (void) close(fd); + + return 1; +} + + +/***************************************************************************** + * Support routines for this file + *****************************************************************************/ + +static int +newLOfd(LargeObjectDesc *lobjCookie) +{ + int i; + + for (i = 0; i < MAX_LOBJ_FDS; i++) { + + if (cookies[i] == NULL) { + cookies[i] = lobjCookie; + return i; + } + } + return -1; +} + +static void +deleteLOfd(int fd) +{ + cookies[fd] = NULL; +} diff --git a/src/backend/libpq/be-fsstubs.h b/src/backend/libpq/be-fsstubs.h new file mode 100644 index 00000000000..3929f42a69a --- /dev/null +++ b/src/backend/libpq/be-fsstubs.h @@ -0,0 +1,32 @@ +/*------------------------------------------------------------------------- + * + * be-fsstubs.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: be-fsstubs.h,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef BE_FSSTUBS_H +#define BE_FSSTUBS_H + +extern Oid lo_import(text *filename); +extern int4 lo_export(Oid lobjId, text *filename); + +extern Oid lo_creat(int mode); + +extern int lo_open(Oid lobjId, int mode); +extern int lo_close(int fd); +extern int lo_read(int fd, char *buf, int len); +extern int lo_write(int fd, char *buf, int len); +extern int lo_lseek(int fd, int offset, int whence); +extern int lo_tell(int fd); +extern int lo_unlink(Oid lobjId); + +extern struct varlena *LOread(int fd, int len); +extern int LOwrite(int fd, struct varlena *wbuf); + +#endif /* BE_FSSTUBS_H */ diff --git a/src/backend/libpq/be-pqexec.c b/src/backend/libpq/be-pqexec.c new file mode 100644 index 00000000000..1b1738d4fc4 --- /dev/null +++ b/src/backend/libpq/be-pqexec.c @@ -0,0 +1,382 @@ +/*------------------------------------------------------------------------- + * + * be-pqexec.c-- + * support for executing POSTGRES commands and functions from a + * user-defined function in a backend. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/be-pqexec.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * PQfn - call a POSTGRES function + * PQexec - execute a POSTGRES query + * + * NOTES + * These routines are compiled into the postgres backend. + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "tcop/dest.h" +#include "tcop/fastpath.h" +#include "tcop/tcopprot.h" +#include "lib/dllist.h" +#include "libpq/libpq-be.h" +#include "fmgr.h" +#include "utils/exc.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +/* ---------------------------------------------------------------- + * PQ interface routines + * ---------------------------------------------------------------- + */ + +/* ---------------- + * PQfn - Send a function call to the POSTGRES backend. + * + * fnid : function id + * result_buf : pointer to result buffer (&int if integer) + * result_len : length of return value. + * result_is_int : If the result is an integer, this must be non-zero + * args : pointer to a NULL terminated arg array. + * (length, if integer, and result-pointer) + * nargs : # of arguments in args array. + * + * This code scavanged from HandleFunctionRequest() in tcop/fastpath.h + * ---------------- + */ +char * +PQfn(int fnid, + int *result_buf, /* can't use void, dec compiler barfs */ + int result_len, + int result_is_int, + PQArgBlock *args, + int nargs) +{ + char *retval; /* XXX - should be datum, maybe ? */ + char *arg[8]; + int i; + + /* ---------------- + * fill args[] array + * ---------------- + */ + for (i = 0; i < nargs; i++) { + if (args[i].len == VAR_LENGTH_ARG) { + arg[i] = (char*) args[i].u.ptr; + } else if (args[i].len > 4) { + elog(WARN,"arg_length of argument %d too long",i); + } else { + arg[i] = (char*)args[i].u.integer; + } + } + + /* ---------------- + * call the postgres function manager + * ---------------- + */ + retval = (char *) + fmgr(fnid, arg[0], arg[1], arg[2], arg[3], + arg[4], arg[5], arg[6], arg[7]); + + /* ---------------- + * put the result in the buffer the user specified and + * return the proper code. + * ---------------- + */ + if (retval == (char *) NULL) /* void retval */ + return "0"; + + if (result_is_int) { + *result_buf = (int) retval; + } else { + memmove(result_buf, retval, result_len); + } + return "G"; +} + +/* ---------------- + * PQexec - Send a query to the POSTGRES backend + * + * The return value is a string. + * If 0 or more tuples fetched from the backend, return "P portal-name". + * If a query is does not return tuples, return "C query-command". + * If there is an error: return "E error-message". + * + * Note: if we get a serious error or an elog(WARN), then PQexec never + * returns because the system longjmp's back to the main loop. + * ---------------- + */ +char * +PQexec(char *query) +{ + PortalEntry *entry = NULL; + char *result = NULL; + + /* ---------------- + * create a new portal and put it on top of the portal stack. + * ---------------- + */ + entry = (PortalEntry *) be_newportal(); + be_portalpush(entry); + + /* ---------------- + * pg_eval_dest will put the query results in a portal which will + * end up on the top of the portal stack. + * ---------------- + */ + pg_eval_dest(query, (char **) NULL, (Oid *) NULL, 0, Local); + + /* ---------------- + * pop the portal off the portal stack and return the + * result. Note if result is null, we return C. + * ---------------- + */ + entry = (PortalEntry *) be_portalpop(); + result = entry->result; + if (result == NULL) { + char *PQE = "Cnull PQexec result"; + result = pstrdup(PQE); + } + + if (result[0] != 'P') + { + /* some successful command was executed, + but it's not one where we return the portal name so + here we should be sure to clear out the portal + (since the caller has no handle on it) + */ + pbuf_close(entry->name); + + } + return result; +} + +/* ---------------------------------------------------------------- + * pqtest support + * ---------------------------------------------------------------- + */ + +/* ---------------- + * pqtest_PQexec takes a text query and returns the number of + * tuples it returns. Note: there is no need to PQclear() + * here - the memory will go away at end transaction. + * ---------------- + */ +int +pqtest_PQexec(char *q) +{ + PortalBuffer *a; + char *res; + int t; + + /* ---------------- + * execute the postgres query + * ---------------- + */ + res = PQexec(q); + + /* ---------------- + * return number of tuples in portal or 0 if command returns no tuples. + * ---------------- + */ + t = 0; + switch(res[0]) { + case 'P': + a = PQparray(&res[1]); + if (a == NULL) + elog(WARN, "pqtest_PQexec: PQparray could not find portal %s", + res); + + t = PQntuples(a); + break; + case 'C': + break; + default: + elog(NOTICE, "pqtest_PQexec: PQexec(%s) returns %s", q, res); + break; + } + + return t; +} + +/* ---------------- + * utilities for pqtest_PQfn() + * ---------------- + */ +char * +strmake(char *str, int len) +{ + char *newstr; + if (str == NULL) return NULL; + if (len <= 0) len = strlen(str); + + newstr = (char *) palloc((unsigned) len+1); + (void) strncpy(newstr, str, len); + newstr[len] = (char) 0; + return newstr; +} + +#define SKIP 0 +#define SCAN 1 + +static char spacestr[] = " "; + +static int +strparse(char *s, char **fields, int *offsets, int maxfields) +{ + int len = strlen(s); + char *cp = s, *end = cp + len, *ep; + int parsed = 0; + int mode = SKIP, i = 0; + + if (*(end - 1) == '\n') end--; + + for (i=0; i maxfields) + parsed = 1; + + } + } + return i; +} + +/* ---------------- + * pqtest_PQfn converts it's string into a PQArgBlock and + * calls the specified function, which is assumed to return + * an integer value. + * ---------------- + */ +int +pqtest_PQfn(char *q) +{ + int k, j, i, v, f, offsets; + char *fields[8]; + PQArgBlock pqargs[7]; + int res; + char *pqres; + + /* ---------------- + * parse q into fields + * ---------------- + */ + i = strparse(q, fields, &offsets, 8); + printf("pqtest_PQfn: strparse returns %d fields\n", i); /* debug */ + if (i == 0) + return -1; + + /* ---------------- + * get the function id + * ---------------- + */ + f = atoi(fields[0]); + printf("pqtest_PQfn: func is %d\n", f); /* debug */ + if (f == 0) + return -1; + + /* ---------------- + * build a PQArgBlock + * ---------------- + */ + for (j=1; j +#include + +#include /* for O_ on some */ +#ifndef WIN32 +#include /* for SEEK_ on most */ +#endif /* WIN32 */ +#ifndef SEEK_SET +#include /* for SEEK_ on others */ +#endif /* SEEK_SET */ + +/* UNIX compatibility junk. This should be in all systems' include files, + but this is not always the case. */ + +#ifndef MAXNAMLEN +#define MAXNAMLEN 255 +#endif /* MAXNAMLEN */ + +struct pgdirent { + unsigned long d_ino; + unsigned short d_namlen; + char d_name[MAXNAMLEN+1]; +}; + +/* + * SysV struct dirent doesn't have d_namlen. + * This counts on d_name being last, which is moderately safe (ha) since + * it's the variable-length part of the structure. + */ +#ifdef SYSV_DIRENT +#define D_NAMLEN(dp) \ + ((dp)->d_reclen - offsetof(struct dirent, d_name[0])) +#else /* SYSV_DIRENT */ +#define D_NAMLEN(dp) \ + ((dp)->d_namlen) +#endif /* SYSV_DIRENT */ + +/* for stat(2) */ +#ifndef S_IRUSR +/* file modes */ + +#define S_IRWXU 00700 /* read, write, execute: owner */ +#define S_IRUSR 00400 /* read permission: owner */ +#define S_IWUSR 00200 /* write permission: owner */ +#define S_IXUSR 00100 /* execute permission: owner */ + +#define S_IRWXG 00070 /* read, write, execute: group */ +#define S_IRGRP 00040 /* read permission: group */ +#define S_IWGRP 00020 /* write permission: group */ +#define S_IXGRP 00010 /* execute permission: group */ + +#define S_IRWXO 00007 /* read, write, execute: other */ +#define S_IROTH 00004 /* read permission: other */ +#define S_IWOTH 00002 /* write permission: other */ +#define S_IXOTH 00001 /* execute permission: other */ + +#define _S_IFMT 0170000 /* type of file; sync with S_IFMT */ +#define _S_IFBLK 0060000 /* block special; sync with S_IFBLK */ +#define _S_IFCHR 0020000 /* character special sync with S_IFCHR */ +#define _S_IFDIR 0040000 /* directory; sync with S_IFDIR */ +#define _S_IFIFO 0010000 /* FIFO - named pipe; sync with S_IFIFO */ +#define _S_IFREG 0100000 /* regular; sync with S_IFREG */ + +#define S_IFDIR _S_IFDIR +#define S_IFREG _S_IFREG + +#define S_ISDIR( mode ) (((mode) & _S_IFMT) == _S_IFDIR) + +#endif /* S_IRUSR */ + +/* + * Inversion doesn't have links. + */ +#ifndef S_ISLNK +#define S_ISLNK(x) 0 +#endif + +/* + * Flags for inversion file system large objects. Normally, creat() + * takes mode arguments, but we don't use them in inversion, since + * you get postgres protections. Instead, we use the low sixteen bits + * of the integer mode argument to store the number of the storage + * manager to be used, and the high sixteen bits for flags. + */ + +#define INV_SMGRMASK 0x0000ffff +#define INV_ARCHIVE 0x00010000 +#define INV_WRITE 0x00020000 +#define INV_READ 0x00040000 + +/* Error values for p_errno */ +#define PEPERM 1 /* Not owner */ +#define PENOENT 2 /* No such file or directory */ +#define PEACCES 13 /* Permission denied */ +#define PEEXIST 17 /* File exists */ +#define PENOTDIR 20 /* Not a directory*/ +#define PEISDIR 21 /* Is a directory */ +#define PEINVAL 22 /* Invalid argument */ +#define PENAMETOOLONG 63 /* File name too long */ +#define PENOTEMPTY 66 /* Directory not empty */ +#define PEPGIO 99 /* postgres backend had problems */ + +#endif /* LIBPQ_FS_H */ diff --git a/src/backend/libpq/libpq.h b/src/backend/libpq/libpq.h new file mode 100644 index 00000000000..5fafbb148d6 --- /dev/null +++ b/src/backend/libpq/libpq.h @@ -0,0 +1,261 @@ +/*------------------------------------------------------------------------- + * + * libpq.h-- + * POSTGRES LIBPQ buffer structure definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: libpq.h,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + * NOTES + * This file contains definitions for structures and + * externs for functions used by both frontend applications + * and the POSTGRES backend. See the files libpq-fe.h and + * libpq-be.h for frontend/backend specific information + * + *------------------------------------------------------------------------- + */ +#ifndef LIBPQ_H +#define LIBPQ_H + +#include +#include +#include +#ifdef WIN32 +#include +#else +#include +#endif /* WIN32 */ + +#include "lib/dllist.h" +#include "utils/exc.h" +#include "postgres.h" + +#include "libpq/pqcomm.h" + +/* ---------------- + * PQArgBlock -- + * Information (pointer to array of this structure) required + * for the PQfn() call. + * ---------------- + */ +typedef struct { + int len; + int isint; + union { + int *ptr; /* can't use void (dec compiler barfs) */ + int integer; + } u; +} PQArgBlock; + +/* ---------------- + * TypeBlock -- + * Information about an attribute. + * ---------------- + */ +#define NameLength 16 + +typedef struct TypeBlock { + char name[NAMEDATALEN]; /* name of the attribute */ + int adtid; /* adtid of the type */ + int adtsize; /* adtsize of the type */ +} TypeBlock; + +/* ---------------- + * TupleBlock -- + * Data of a tuple. + * ---------------- + */ +#define TupleBlockSize 100 + +typedef struct TupleBlock { + char **values[TupleBlockSize]; /* an array of tuples */ + int *lengths[TupleBlockSize]; /* an array of length vec. foreach + tuple */ + struct TupleBlock *next; /* next tuple block */ + int tuple_index; /* current tuple index */ +} TupleBlock; + +/* ---------------- + * GroupBuffer -- + * A group of tuples with the same attributes. + * ---------------- + */ +typedef struct GroupBuffer { + int no_tuples; /* number of tuples in this group */ + int no_fields; /* number of attributes */ + TypeBlock *types; /* types of the attributes */ + TupleBlock *tuples; /* tuples in this group */ + struct GroupBuffer *next; /* next group */ +} GroupBuffer; + +/* ---------------- + * PortalBuffer -- + * Data structure of a portal buffer. + * ---------------- + */ +typedef struct PortalBuffer { + int rule_p; /* 1 if this is an asynchronized portal. */ + int no_tuples; /* number of tuples in this portal buffer */ + int no_groups; /* number of tuple groups */ + GroupBuffer *groups; /* linked list of tuple groups */ +} PortalBuffer; + +/* ---------------- + * PortalEntry -- + * an entry in the global portal table + * + * Note: the portalcxt is only meaningful for PQcalls made from + * within a postgres backend. frontend apps should ignore it. + * ---------------- + */ +#define PortalNameLength 32 + +typedef struct PortalEntry { + char name[PortalNameLength]; /* name of this portal */ + PortalBuffer *portal; /* tuples contained in this portal */ + Pointer portalcxt; /* memory context (for backend) */ + Pointer result; /* result for PQexec */ +} PortalEntry; + +#define PORTALS_INITIAL_SIZE 32 +#define PORTALS_GROW_BY 32 + +/* in portalbuf.c */ +extern PortalEntry** portals; +extern size_t portals_array_size; + +/* + * Asynchronous notification + */ +typedef struct PQNotifyList { + char relname[NAMEDATALEN]; /* name of relation containing data */ + int be_pid; /* process id of backend */ + int valid; /* has this already been handled by user. */ +/* SLNode Node; */ +} PQNotifyList; + +/* + * Exceptions. + */ + +#define libpq_raise(X, Y) ExcRaise((Exception *)(X), (ExcDetail) (Y),\ + (ExcData)0, (ExcMessage) 0) + +/* in portal.c */ +extern Exception MemoryError, PortalError, PostquelError, ProtocolError; + +/* + * POSTGRES backend dependent Constants. + */ + +/* ERROR_MSG_LENGTH should really be the same as ELOG_MAXLEN in utils/elog.h*/ +#define ERROR_MSG_LENGTH 4096 +#define COMMAND_LENGTH 20 +#define REMARK_LENGTH 80 + +extern char PQerrormsg[ERROR_MSG_LENGTH]; /* in portal.c */ + +/* + * External functions. + */ + +/* + * prototypes for functions in portal.c + */ +extern void pqdebug(char *target, char *msg); +extern void pqdebug2(char *target, char *msg1, char *msg2); +extern void PQtrace(void); +extern void PQuntrace(void); +extern int PQnportals(int rule_p); +extern void PQpnames(char **pnames, int rule_p); +extern PortalBuffer *PQparray(char *pname); +extern int PQrulep(PortalBuffer *portal); +extern int PQntuples(PortalBuffer *portal); +extern int PQninstances(PortalBuffer *portal); +extern int PQngroups(PortalBuffer *portal); +extern int PQntuplesGroup(PortalBuffer *portal, int group_index); +extern int PQninstancesGroup(PortalBuffer *portal, int group_index); +extern int PQnfieldsGroup(PortalBuffer *portal, int group_index); +extern int PQfnumberGroup(PortalBuffer *portal, int group_index, char *field_name); +extern char *PQfnameGroup(PortalBuffer *portal, int group_index, int field_number); +extern int PQftypeGroup(PortalBuffer *portal, int group_index, + int field_number); +extern int PQfsizeGroup(PortalBuffer *portal, int group_index, + int field_number); +extern GroupBuffer *PQgroup(PortalBuffer *portal, int tuple_index); +extern int PQgetgroup(PortalBuffer *portal, int tuple_index); +extern int PQnfields(PortalBuffer *portal, int tuple_index); +extern int PQfnumber(PortalBuffer *portal, int tuple_index, char *field_name); + extern char *PQfname(PortalBuffer *portal, int tuple_index, int field_number); +extern int PQftype(PortalBuffer *portal, int tuple_index, int field_number); +extern int PQfsize(PortalBuffer *portal, int tuple_index, int field_number); +extern int PQsametype(PortalBuffer *portal, int tuple_index1, int tuple_index2); +extern char *PQgetvalue(PortalBuffer *portal, int tuple_index, int field_number); +extern char *PQgetAttr(PortalBuffer *portal, int tuple_index, int field_number); +extern int PQgetlength(PortalBuffer *portal, int tuple_index, int field_number); +extern void PQclear(char *pname); +extern void PQcleanNotify(void); +extern void PQnotifies_init(void); +extern PQNotifyList *PQnotifies(void); +extern void PQremoveNotify(PQNotifyList *nPtr); +extern void PQappendNotify(char *relname, int pid); +/* + * prototypes for functions in portalbuf.c + */ +extern caddr_t pbuf_alloc(size_t size); +extern void pbuf_free(caddr_t pointer); +extern PortalBuffer *pbuf_addPortal(void); +extern GroupBuffer *pbuf_addGroup(PortalBuffer *portal); +extern TypeBlock *pbuf_addTypes(int n); +extern TupleBlock *pbuf_addTuples(void); +extern char **pbuf_addTuple(int n); +extern int *pbuf_addTupleValueLengths(int n); +extern char *pbuf_addValues(int n); +extern PortalEntry *pbuf_addEntry(void); +extern void pbuf_freeEntry(int i); +extern void pbuf_freeTypes(TypeBlock *types); +extern void pbuf_freeTuples(TupleBlock *tuples, int no_tuples, int no_fields); +extern void pbuf_freeGroup(GroupBuffer *group); +extern void pbuf_freePortal(PortalBuffer *portal); +extern int pbuf_getIndex(char *pname); +extern void pbuf_setportalinfo(PortalEntry *entry, char *pname); +extern PortalEntry *pbuf_setup(char *pname); +extern void pbuf_close(char *pname); +extern GroupBuffer *pbuf_findGroup(PortalBuffer *portal, int group_index); +extern int pbuf_findFnumber(GroupBuffer *group, char *field_name); +extern void pbuf_checkFnumber(GroupBuffer *group, int field_number); +extern char *pbuf_findFname(GroupBuffer *group, int field_number); + +/* + * prototypes for functions in pqcomm.c + */ +extern void pq_init(int fd); +extern void pq_gettty(char *tp); +extern int pq_getport(void); +extern void pq_close(void); +extern void pq_flush(void); +extern int pq_getstr(char *s, int maxlen); +extern int PQgetline(char *s, int maxlen); +extern int PQputline(char *s); +extern int pq_getnchar(char *s, int off, int maxlen); +extern int pq_getint(int b); +extern void pq_putstr(char *s); +extern void pq_putnchar(char *s, int n); +extern void pq_putint(int i, int b); +extern int pq_sendoob(char *msg, int len); +extern int pq_recvoob(char *msgPtr, int *lenPtr); +extern int pq_getinaddr(struct sockaddr_in *sin, char *host, int port); +extern int pq_getinserv(struct sockaddr_in *sin, char *host, char *serv); +extern int pq_connect(char *dbname, char *user, char *args, char *hostName, + char *debugTty, char *execFile, short portName); +extern int StreamOpen(char *hostName, short portName, Port *port); +extern void pq_regoob(void (*fptr)()); +extern void pq_unregoob(void); +extern void pq_async_notify(void); +extern int StreamServerPort(char *hostName, short portName, int *fdP); +extern int StreamConnection(int server_fd, Port *port); +extern void StreamClose(int sock); + +#endif /* LIBPQ_H */ diff --git a/src/backend/libpq/portal.c b/src/backend/libpq/portal.c new file mode 100644 index 00000000000..ca27fd83089 --- /dev/null +++ b/src/backend/libpq/portal.c @@ -0,0 +1,783 @@ +/*------------------------------------------------------------------------- + * + * portal.c-- + * generalized portal support routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portal.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * UTILITY ROUTINES + * pqdebug - send a string to the debugging output port + * pqdebug2 - send two strings to stdout + * PQtrace - turn on pqdebug() tracing + * PQuntrace - turn off pqdebug() tracing + * + * INTERFACE ROUTINES + * PQnportals - Return the number of open portals. + * PQpnames - Return all the portal names + * PQparray - Return the portal buffer given a portal name + * PQrulep - Return 1 if an asynchronous portal + * PQntuples - Return the number of tuples in a portal buffer + * PQninstances - same as PQntuples using object terminology + * PQngroups - Return the number of tuple groups in a portal buffer + * PQntuplesGroup - Return the number of tuples in a tuple group + * PQninstancesGroup - same as PQntuplesGroup using object terminology + * PQnfieldsGroup - Return the number of fields in a tuple group + * PQfnumberGroup - Return field number given (group index, field name) + * PQftypeGroup - Return field type given (group index, field index) + * PQfsizeGroup - Return field size given (group index, field index) + * PQfnameGroup - Return field name given (group index, field index) + * PQgroup - Return the tuple group that a particular tuple is in + * PQgetgroup - Return the index of the group that a tuple is in + * PQnfields - Return the number of fields in a tuple + * PQfnumber - Return the field index of a field name in a tuple + * PQfname - Return the name of a field + * PQftype - Return the type of a field + * PQfsize - Return the size of a field + * PQftype - Return the type of a field + * PQsametype - Return 1 if the two tuples have the same type + * PQgetvalue - Return an attribute (field) value + * PQgetlength - Return an attribute (field) length + * PQclear - free storage claimed by named portal + * PQnotifies - Return a list of relations on which notification + * has occurred. + * PQremoveNotify - Remove this notification from the list. + * + * NOTES + * These functions may be used by both frontend routines which + * communicate with a backend or by user-defined functions which + * are compiled or dynamically loaded into a backend. + * + * the portals[] array should be organized as a hash table for + * quick portal-by-name lookup. + * + * Do not confuse "PortalEntry" (or "PortalBuffer") with "Portal" + * see utils/mmgr/portalmem.c for why. -cim 2/22/91 + * + */ +#include /* for sprintf() */ +#include + +#include "c.h" +#include "lib/dllist.h" +#include "libpq/libpq.h" /* where the declarations go */ +#include "utils/exc.h" +#include "utils/palloc.h" + +/* ---------------- + * exceptions + * ---------------- + */ +Exception MemoryError = {"Memory Allocation Error"}; +Exception PortalError = {"Invalid arguments to portal functions"}; +Exception PostquelError = {"Sql Error"}; +Exception ProtocolError = {"Protocol Error"}; +char PQerrormsg[ERROR_MSG_LENGTH]; + +int PQtracep = 0; /* 1 to print out debugging messages */ +FILE *debug_port = (FILE *) NULL; + +static int +in_range(char *msg, int value, int min, int max) +{ + if (value < min || value >= max) { + (void) sprintf(PQerrormsg, "FATAL: %s, %d is not in range [%d,%d)\n", + msg, value, min, max); + pqdebug("%s", PQerrormsg); + fputs(PQerrormsg, stderr); + return(0); + } + return(1); +} + +static int +valid_pointer(char *msg, void *ptr) +{ + if (!ptr) { + (void) sprintf(PQerrormsg, "FATAL: %s\n", msg); + pqdebug("%s", PQerrormsg); + fputs(PQerrormsg, stderr); + return(0); + } + return(1); +} + +/* ---------------------------------------------------------------- + * PQ utility routines + * ---------------------------------------------------------------- + */ +void +pqdebug(char *target, char *msg) +{ + if (!target) + return; + + if (PQtracep) { + /* + * if nothing else was suggested default to stdout + */ + if (!debug_port) + debug_port = stdout; + fprintf(debug_port, target, msg); + fprintf(debug_port, "\n"); + } +} + +void +pqdebug2(char *target, char *msg1, char *msg2) +{ + if (!target) + return; + + if (PQtracep) { + /* + * if nothing else was suggested default to stdout + */ + if (!debug_port) + debug_port = stdout; + fprintf(debug_port, target, msg1, msg2); + fprintf(debug_port, "\n"); + } +} + +/* -------------------------------- + * PQtrace() / PQuntrace() + * -------------------------------- + */ +void +PQtrace() +{ + PQtracep = 1; +} + +void +PQuntrace() +{ + PQtracep = 0; +} + +/* ---------------------------------------------------------------- + * PQ portal interface routines + * ---------------------------------------------------------------- + */ + +/* -------------------------------- + * PQnportals - Return the number of open portals. + * If rule_p, only return asynchronous portals. + * -------------------------------- + */ +int +PQnportals(int rule_p) +{ + int i, n = 0; + + for (i = 0; i < portals_array_size; ++i) { + if (portals[i] && portals[i]->portal) { + if (!rule_p || portals[i]->portal->rule_p) { + ++n; + } + } + } + return(n); +} + +/* -------------------------------- + * PQpnames - Return all the portal names + * If rule_p, only return asynchronous portals. + * + * the caller must have allocated sufficient memory for char** pnames + * (an array of PQnportals strings of length PortalNameLength). + * + * notice that this assumes that the user is calling PQnportals and + * PQpnames with the same rule_p argument, and with no intervening + * portal closures. if not, you can get in heap big trouble.. + * -------------------------------- + */ +void +PQpnames(char **pnames, int rule_p) +{ + int i, cur_pname = 0; + + if (!valid_pointer("PQpnames: invalid name buffer", pnames)) + return; + + for (i = 0; i < portals_array_size; ++i) { + if (portals[i] && portals[i]->portal) { + if (!rule_p || portals[i]->portal->rule_p) { + (void) strncpy(pnames[cur_pname], portals[i]->name, PortalNameLength); + ++cur_pname; + } + } + } +} + +/* -------------------------------- + * PQparray - Return the portal buffer given a portal name + * -------------------------------- + */ +PortalBuffer * +PQparray(char *pname) +{ + int i; + + if (!valid_pointer("PQparray: invalid name buffer", pname)) + return NULL; + + if ((i = pbuf_getIndex(pname)) < 0) + return((PortalBuffer *) NULL); + return(portals[i]->portal); +} + +/* -------------------------------- + * PQrulep - Return 1 if an asynchronous portal + * -------------------------------- + */ +int +PQrulep(PortalBuffer *portal) +{ + if (!valid_pointer("PQrulep: invalid portal pointer", portal)) + return(-1); + + return(portal->rule_p); +} + +/* -------------------------------- + * PQntuples - Return the number of tuples in a portal buffer + * -------------------------------- + */ +int +PQntuples(PortalBuffer *portal) +{ + if (!valid_pointer("PQntuples: invalid portal pointer", portal)) + return(-1); + + return(portal->no_tuples); +} + +int +PQninstances(PortalBuffer *portal) +{ + return(PQntuples(portal)); +} + +/* -------------------------------- + * PQngroups - Return the number of tuple groups in a portal buffer + * -------------------------------- + */ +int +PQngroups(PortalBuffer *portal) +{ + if (!valid_pointer("PQngroups: invalid portal pointer", portal)) + return(-1); + + return(portal->no_groups); +} + +/* -------------------------------- + * PQntuplesGroup - Return the number of tuples in a tuple group + * -------------------------------- + */ +int +PQntuplesGroup(PortalBuffer *portal, int group_index) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQntuplesGroup: invalid portal pointer", portal) || + !in_range("PQntuplesGroup: group index", + group_index, 0, portal->no_groups)) + return(-1); + + gbp = pbuf_findGroup(portal, group_index); + if (gbp) + return(gbp->no_tuples); + return(-1); +} + +int +PQninstancesGroup(PortalBuffer *portal, int group_index) +{ + return(PQntuplesGroup(portal, group_index)); +} + +/* -------------------------------- + * PQnfieldsGroup - Return the number of fields in a tuple group + * -------------------------------- + */ +int +PQnfieldsGroup(PortalBuffer *portal, int group_index) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQnfieldsGroup: invalid portal pointer", portal) || + !in_range("PQnfieldsGroup: group index", + group_index, 0, portal->no_groups)) + return(-1); + gbp = pbuf_findGroup(portal, group_index); + if (gbp) + return(gbp->no_fields); + return(-1); +} + +/* -------------------------------- + * PQfnumberGroup - Return the field number (index) given + * the group index and the field name + * -------------------------------- + */ +int +PQfnumberGroup(PortalBuffer *portal, int group_index, char *field_name) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfnumberGroup: invalid portal pointer", portal) || + !valid_pointer("PQfnumberGroup: invalid field name pointer", + field_name) || + !in_range("PQfnumberGroup: group index", + group_index, 0, portal->no_groups)) + return(-1); + gbp = pbuf_findGroup(portal, group_index); + if (gbp) + return(pbuf_findFnumber(gbp, field_name)); + return(-1); +} + +/* -------------------------------- + * PQfnameGroup - Return the field (attribute) name given + * the group index and field index. + * -------------------------------- + */ +char * +PQfnameGroup(PortalBuffer *portal, int group_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfnameGroup: invalid portal pointer", portal) || + !in_range("PQfnameGroup: group index", + group_index, 0, portal->no_groups)) + return((char *) NULL); + + if ((gbp = pbuf_findGroup(portal, group_index)) && + in_range("PQfnameGroup: field number", + field_number, 0, gbp->no_fields)) + return(pbuf_findFname(gbp, field_number)); + return((char *) NULL); +} + +/* -------------------------------- + * PQftypeGroup - Return the type of a field given + * the group index and field index + * -------------------------------- + */ +int +PQftypeGroup(PortalBuffer *portal, int group_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQftypeGroup: invalid portal pointer", portal) || + !in_range("PQftypeGroup: group index", + group_index, 0, portal->no_groups)) + return(-1); + + if ((gbp = pbuf_findGroup(portal, group_index)) && + in_range("PQftypeGroup: field number", field_number, 0, gbp->no_fields)) + return(gbp->types[field_number].adtid); + return(-1); +} + +/* -------------------------------- + * PQfsizeGroup - Return the size of a field given + * the group index and field index + * -------------------------------- + */ +int +PQfsizeGroup(PortalBuffer *portal, int group_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfsizeGroup: invalid portal pointer", portal) || + !in_range("PQfsizeGroup: tuple index", + group_index, 0, portal->no_groups)) + return(-1); + + if ((gbp = pbuf_findGroup(portal, group_index)) && + in_range("PQfsizeGroup: field number", field_number, 0, gbp->no_fields)) + return(gbp->types[field_number].adtsize); + return(-1); +} + + +/* -------------------------------- + * PQgroup - Return the tuple group that a particular tuple is in + * -------------------------------- + */ +GroupBuffer * +PQgroup(PortalBuffer *portal, int tuple_index) +{ + GroupBuffer *gbp; + int tuple_count = 0; + + if (!valid_pointer("PQgroup: invalid portal pointer", portal) || + !in_range("PQgroup: tuple index", + tuple_index, 0, portal->no_tuples)) + return((GroupBuffer *) NULL); + + for (gbp = portal->groups; + gbp && tuple_index >= (tuple_count += gbp->no_tuples); + gbp = gbp->next) + ; + if (!in_range("PQgroup: tuple not found: tuple index", + tuple_index, 0, tuple_count)) + return((GroupBuffer *) NULL); + return(gbp); +} + +/* -------------------------------- + * PQgetgroup - Return the index of the group that a + * particular tuple is in + * -------------------------------- + */ +int +PQgetgroup(PortalBuffer *portal, int tuple_index) +{ + GroupBuffer *gbp; + int tuple_count = 0, group_count = 0; + + if (!valid_pointer("PQgetgroup: invalid portal pointer", portal) || + !in_range("PQgetgroup: tuple index", + tuple_index, 0, portal->no_tuples)) + return(-1); + + for (gbp = portal->groups; + gbp && tuple_index >= (tuple_count += gbp->no_tuples); + gbp = gbp->next) + ++group_count; + if (!gbp || !in_range("PQgetgroup: tuple not found: tuple index", + tuple_index, 0, tuple_count)) + return(-1); + return(group_count); +} + +/* -------------------------------- + * PQnfields - Return the number of fields in a tuple + * -------------------------------- + */ +int +PQnfields(PortalBuffer *portal, int tuple_index) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQnfields: invalid portal pointer", portal) || + !in_range("PQnfields: tuple index", + tuple_index, 0, portal->no_tuples)) + return(-1); + gbp = PQgroup(portal, tuple_index); + if (gbp) + return(gbp->no_fields); + return(-1); +} + +/* -------------------------------- + * PQfnumber - Return the field index of a given + * field name within a tuple. + * -------------------------------- + */ +int +PQfnumber(PortalBuffer *portal, int tuple_index, char *field_name) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfnumber: invalid portal pointer", portal) || + !valid_pointer("PQfnumber: invalid field name pointer", field_name) || + !in_range("PQfnumber: tuple index", + tuple_index, 0, portal->no_tuples)) + return(-1); + gbp = PQgroup(portal, tuple_index); + if (gbp) + return(pbuf_findFnumber(gbp, field_name)); + return(-1); +} + +/* -------------------------------- + * PQfname - Return the name of a field + * -------------------------------- + */ +char * +PQfname(PortalBuffer *portal, int tuple_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfname: invalid portal pointer", portal) || + !in_range("PQfname: tuple index", + tuple_index, 0, portal->no_tuples)) + return((char *) NULL); + + if ((gbp = PQgroup(portal, tuple_index)) && + in_range("PQfname: field number", + field_number, 0, gbp->no_fields)) + return(pbuf_findFname(gbp, field_number)); + return((char *) NULL); +} + +/* -------------------------------- + * PQftype - Return the type of a field + * -------------------------------- + */ +int +PQftype(PortalBuffer *portal, int tuple_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQftype: invalid portal pointer", portal) || + !in_range("PQfname: tuple index", + tuple_index, 0, portal->no_tuples)) + return(-1); + + if ((gbp = PQgroup(portal, tuple_index)) && + in_range("PQftype: field number", field_number, 0, gbp->no_fields)) + return(gbp->types[field_number].adtid); + return(-1); +} + +/* -------------------------------- + * PQfsize - Return the size of a field + * -------------------------------- + */ +int +PQfsize(PortalBuffer *portal, int tuple_index, int field_number) +{ + GroupBuffer *gbp; + + if (!valid_pointer("PQfsize: invalid portal pointer", portal) || + !in_range("PQfsize: tuple index", + tuple_index, 0, portal->no_tuples)) + return(-1); + + if ((gbp = PQgroup(portal, tuple_index)) && + in_range("PQfsize: field number", field_number, 0, gbp->no_fields)) + return(gbp->types[field_number].adtsize); + return(-1); +} + + + +/* -------------------------------- + * PQsametype - Return 1 if the two tuples have the same type + * (in the same group) + * -------------------------------- + */ +int +PQsametype(PortalBuffer *portal, int tuple_index1, int tuple_index2) +{ + GroupBuffer *gbp1, *gbp2; + + if (!valid_pointer("PQsametype: invalid portal pointer", portal) || + !in_range("PQsametype: tuple index 1", + tuple_index1, 0, portal->no_tuples) || + !in_range("PQsametype: tuple index 2", + tuple_index2, 0, portal->no_tuples)) + return(-1); + + gbp1 = PQgroup(portal, tuple_index1); + gbp2 = PQgroup(portal, tuple_index2); + if (gbp1 && gbp2) + return(gbp1 == gbp2); + return(-1); +} + +static TupleBlock * +PQGetTupleBlock(PortalBuffer *portal, + int tuple_index, + int *tuple_offset) +{ + GroupBuffer *gbp; + TupleBlock *tbp; + int tuple_count = 0; + + if (!valid_pointer("PQGetTupleBlock: invalid portal pointer", portal) || + !valid_pointer("PQGetTupleBlock: invalid offset pointer", + tuple_offset) || + !in_range("PQGetTupleBlock: tuple index", + tuple_index, 0, portal->no_tuples)) + return((TupleBlock *) NULL); + + for (gbp = portal->groups; + gbp && tuple_index >= (tuple_count += gbp->no_tuples); + gbp = gbp->next) + ; + if (!gbp || + !in_range("PQGetTupleBlock: tuple not found: tuple index", + tuple_index, 0, tuple_count)) + return((TupleBlock *) NULL); + tuple_count -= gbp->no_tuples; + for (tbp = gbp->tuples; + tbp && tuple_index >= (tuple_count += TupleBlockSize); + tbp = tbp->next) + ; + if (!tbp || + !in_range("PQGetTupleBlock: tuple not found: tuple index", + tuple_index, 0, tuple_count)) + return((TupleBlock *) NULL); + tuple_count -= TupleBlockSize; + + *tuple_offset = tuple_index - tuple_count; + return(tbp); +} + +/* -------------------------------- + * PQgetvalue - Return an attribute (field) value + * -------------------------------- + */ +char * +PQgetvalue(PortalBuffer *portal, + int tuple_index, + int field_number) +{ + TupleBlock *tbp; + int tuple_offset; + + tbp = PQGetTupleBlock(portal, tuple_index, &tuple_offset); + if (tbp) + return(tbp->values[tuple_offset][field_number]); + return((char *) NULL); +} + +/* -------------------------------- + * PQgetAttr - Return an attribute (field) value + * this differs from PQgetvalue in that the value returned is + * a copy. The CALLER is responsible for free'ing the data returned. + * -------------------------------- + */ +char * +PQgetAttr(PortalBuffer *portal, + int tuple_index, + int field_number) +{ + TupleBlock *tbp; + int tuple_offset; + int len; + char* result = NULL; + + tbp = PQGetTupleBlock(portal, tuple_index, &tuple_offset); + if (tbp) { + len = tbp->lengths[tuple_offset][field_number]; + result = malloc(len + 1); + memcpy(result, + tbp->values[tuple_offset][field_number], + len); + result[len] = '\0'; + } + return result; +} + + +/* -------------------------------- + * PQgetlength - Return an attribute (field) length + * -------------------------------- + */ +int +PQgetlength(PortalBuffer *portal, + int tuple_index, + int field_number) +{ + TupleBlock *tbp; + int tuple_offset; + + tbp = PQGetTupleBlock(portal, tuple_index, &tuple_offset); + if (tbp) + return(tbp->lengths[tuple_offset][field_number]); + return(-1); +} + +/* ---------------- + * PQclear - free storage claimed by named portal + * ---------------- + */ +void +PQclear(char *pname) +{ + if (!valid_pointer("PQclear: invalid portal name pointer", pname)) + return; + pbuf_close(pname); +} + +/* + * async notification. + * This is going away with pending rewrite of comm. code... + */ +/* static SLList pqNotifyList;*/ +static Dllist *pqNotifyList = NULL; + +/* remove invalid notifies before returning */ +void +PQcleanNotify() +{ + Dlelem *e, *next; + PQNotifyList *p; + + e = DLGetHead(pqNotifyList); + + while (e) { + next = DLGetSucc(e); + p = (PQNotifyList*)DLE_VAL(e); + if (p->valid == 0) { + DLRemove(e); + DLFreeElem(e); + pfree(p); + } + e = next; + } +} + +void +PQnotifies_init() +{ + Dlelem *e; + PQNotifyList *p; + + if (pqNotifyList == NULL) { + pqNotifyList = DLNewList(); + } + else { + /* clean all notifies */ + for (e = DLGetHead(pqNotifyList); e != NULL; e = DLGetSucc(e)) { + p = (PQNotifyList*)DLE_VAL(e); + p->valid = 0; + } + PQcleanNotify(); + } +} + +PQNotifyList * +PQnotifies() +{ + Dlelem *e; + PQcleanNotify(); + e = DLGetHead(pqNotifyList); + return (e ? (PQNotifyList*)DLE_VAL(e) : NULL); +} + +void +PQremoveNotify(PQNotifyList *nPtr) +{ + nPtr->valid = 0; /* remove later */ +} + +void +PQappendNotify(char *relname, int pid) +{ + PQNotifyList *p; + + if (pqNotifyList == NULL) + pqNotifyList = DLNewList(); + + p = (PQNotifyList*)pbuf_alloc(sizeof(PQNotifyList)); + strncpy(p->relname, relname, NAMEDATALEN); + p->be_pid = pid; + p->valid = 1; + DLAddTail(pqNotifyList, DLNewElem(p)); +} diff --git a/src/backend/libpq/portalbuf.c b/src/backend/libpq/portalbuf.c new file mode 100644 index 00000000000..f927e268edf --- /dev/null +++ b/src/backend/libpq/portalbuf.c @@ -0,0 +1,511 @@ +/*------------------------------------------------------------------------- + * + * portalbuf.c-- + * portal buffer support routines for src/libpq/portal.c + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/portalbuf.c,v 1.1.1.1 1996/07/09 06:21:30 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * pbuf_alloc - allocate memory for libpq routines + * pbuf_free - free memory for libpq routines + * pbuf_addPortal - Allocate a new portal buffer + * pbuf_addGroup - Add a new tuple group to the portal + * pbuf_addTypes - Allocate n type blocks + * pbuf_addTuples - Allocate a tuple block + * pbuf_addTuple - Allocate a tuple of n fields (attributes) + * pbuf_addValues - Allocate n bytes for a value + * pbuf_addEntry - Allocate a portal entry + * pbuf_freeEntry - Free a portal entry in the portal table + * pbuf_freeTypes - Free up the space used by a portal + * pbuf_freeTuples - free space used by tuple block + * pbuf_freeGroup - free space used by group, types and tuples + * pbuf_freePortal - free space used by portal and portal's group + * pbuf_getIndex - Return the index of the portal entry + * pbuf_setup - Set up a portal for dumping data + * pbuf_close - Close a portal, remove it from the portal table + * pbuf_findGroup - Return group given the group_index + * pbuf_findFnumber - Return field index of a given field within a group + * pbuf_findFname - Find the field name given the field index + * pbuf_checkFnumber - signal an error if field number is out of bounds + * + * NOTES + * These functions may be used by both frontend routines which + * communicate with a backend or by user-defined functions which + * are compiled or dynamically loaded into a backend. + * + * the portals[] array should be organized as a hash table for + * quick portal-by-name lookup. + * + * Do not confuse "PortalEntry" (or "PortalBuffer") with "Portal" + * see utils/mmgr/portalmem.c for why. -cim 2/22/91 + * + */ +#include +#include "c.h" + +#include "libpq/libpq.h" /* where the declarations go */ +#include "utils/exc.h" +#include "utils/palloc.h" + +PortalEntry** portals = (PortalEntry**) NULL; +size_t portals_array_size = 0; + +/* portals array memory is malloc'd instead of using MemoryContexts */ +/* since it will be used by both front and backend programs*/ +/* GlobalMemory portals_mmcxt = (GlobalMemory) NULL; */ + +/* ------------------------------- + * portals_realloc -- + * grow the size of the portals array by size + * + * also ensures that elements are initially NULL + */ + +static void +portals_realloc(size_t size) +{ + size_t oldsize; + int i; + PortalEntry** newp; + + oldsize = portals_array_size; + + portals_array_size += size; + if (portals) + newp= (PortalEntry**)realloc(portals, + portals_array_size*sizeof(PortalEntry*)); + else + newp= (PortalEntry**)malloc(portals_array_size*sizeof(PortalEntry*)); + + if (newp) + portals = newp; + else + libpq_raise(&PortalError, + form("Cannot alloc more memory in portals_realloc")); + + for (i=oldsize;irule_p = 0; + portal->no_tuples = 0; + portal->no_groups = 0; + portal->groups = NULL; + + return (portal); +} + +/* -------------------------------- + * pbuf_addGroup - Add a new tuple group to the portal + * -------------------------------- + */ +GroupBuffer * +pbuf_addGroup(PortalBuffer *portal) +{ + GroupBuffer *group, *group1; + + group = (GroupBuffer *) + pbuf_alloc(sizeof (GroupBuffer)); + + /* Initialize the new group buffer. */ + group->no_tuples = 0; + group->no_fields = 0; + group->types = NULL; + group->tuples = NULL; + group->next = NULL; + + if ((group1 = portal->groups) == NULL) + portal->groups = group; + else { + while (group1->next != NULL) + group1 = group1->next; + group1->next = group; + } + + return (group); +} + +/* -------------------------------- + * pbuf_addTypes - Allocate n type blocks + * -------------------------------- + */ +TypeBlock * +pbuf_addTypes(int n) +{ + TypeBlock *types; + + types = (TypeBlock *) + pbuf_alloc(n * sizeof (TypeBlock)); + + return (types); +} + +/* -------------------------------- + * pbuf_addTuples - Allocate a tuple block + * -------------------------------- + */ +TupleBlock * +pbuf_addTuples() +{ + TupleBlock *tuples; + + tuples = (TupleBlock *) + pbuf_alloc(sizeof (TupleBlock)); + + tuples->next = NULL; + tuples->tuple_index = 0; + + return (tuples); +} + +/* -------------------------------- + * pbuf_addTuple - Allocate a tuple of n fields (attributes) + * -------------------------------- + */ +char ** +pbuf_addTuple(int n) +{ + return (char **) + pbuf_alloc(n * sizeof (char *)); +} + +/* -------------------------------- + * pbuf_addTupleValueLengths - Allocate a tuple of n lengths (attributes) + * -------------------------------- + */ +int * +pbuf_addTupleValueLengths(int n) +{ + return (int *) + pbuf_alloc(n * sizeof(int)); +} + +/* -------------------------------- + * pbuf_addValues - Allocate n bytes for a value + * -------------------------------- + */ +char * +pbuf_addValues(int n) +{ + return + pbuf_alloc(n); +} + +/* -------------------------------- + * pbuf_addEntry - Allocate a portal entry + * -------------------------------- + */ +PortalEntry *pbuf_addEntry() +{ + return (PortalEntry *) + pbuf_alloc (sizeof (PortalEntry)); +} + +/* -------------------------------- + * pbuf_freeEntry - Free a portal entry in the portal table + * the portal is freed separately. + * -------------------------------- + */ +void +pbuf_freeEntry(int i) +{ + if (portals) + { + pbuf_free ((caddr_t)portals[i]); + portals[i] = NULL; + } +} + + +/* -------------------------------- + * pbuf_freeTypes - Free up the space used by a portal + * -------------------------------- + */ +void +pbuf_freeTypes(TypeBlock *types) +{ + pbuf_free((caddr_t)types); +} + +/* -------------------------------- + * pbuf_freeTuples - free space used by tuple block + * -------------------------------- + */ +void +pbuf_freeTuples(TupleBlock *tuples, + int no_tuples, + int no_fields) +{ + int i, j; + + if (no_tuples > TupleBlockSize) { + pbuf_freeTuples (tuples->next, no_tuples - TupleBlockSize, no_fields); + no_tuples = TupleBlockSize; + } + + /* For each tuple, free all its attribute values. */ + for (i = 0; i < no_tuples; i++) { + for (j = 0; j < no_fields; j++) + if (tuples->values[i][j] != NULL) + pbuf_free((caddr_t)tuples->values[i][j]); + if (tuples->lengths[i]) + pbuf_free((caddr_t)tuples->lengths[i]); + if (tuples->values[i]) + pbuf_free((caddr_t)tuples->values[i]); + } + + pbuf_free((caddr_t)tuples); +} + +/* -------------------------------- + * pbuf_freeGroup - free space used by group, types and tuples + * -------------------------------- + */ +void +pbuf_freeGroup(GroupBuffer *group) +{ + if (group->next != NULL) + pbuf_freeGroup(group->next); + + if (group->types != NULL) + pbuf_freeTypes(group->types); + + if (group->tuples != NULL) + pbuf_freeTuples(group->tuples, group->no_tuples,group->no_fields); + + pbuf_free((caddr_t)group); +} + +/* -------------------------------- + * pbuf_freePortal - free space used by portal and portal's group + * -------------------------------- + */ +void +pbuf_freePortal(PortalBuffer *portal) +{ + if (portal->groups != NULL) + pbuf_freeGroup(portal->groups); + + pbuf_free((caddr_t)portal); +} + +/* -------------------------------- + * pbuf_getIndex - Return the index of the portal entry + * note: portals[] maps portal names to portal buffers. + * -------------------------------- + */ +int +pbuf_getIndex(char *pname) +{ + int i; + + if (portals) { + for (i = 0; i < portals_array_size; i++) + if (portals[i] != NULL && + strncmp(portals[i]->name, pname, PortalNameLength) == 0) + return i; + } + + return (-1); +} + +/* -------------------------------- + * pbuf_setportalname - assign a user given name to a portal + * -------------------------------- + */ +void +pbuf_setportalinfo(PortalEntry *entry, char *pname) +{ + if (entry) + strncpy(entry->name, pname, PortalNameLength-1); + entry->name[PortalNameLength-1] = '\0'; +} + +/* -------------------------------- + * pbuf_setup - Set up a portal for dumping data + * -------------------------------- + */ +PortalEntry * +pbuf_setup(char *pname) +{ + int i; + + if (!portals) /* the portals array has not been allocated yet */ + { + /* allocate portals[] array here */ + portals_realloc(PORTALS_INITIAL_SIZE); + } + + /* If a portal with the same name already exists, close it. */ + /* else look for an empty entry in the portal table. */ + if ((i = pbuf_getIndex(pname)) != -1) + pbuf_freePortal(portals[i]->portal); + else { + for (i = 0; i < portals_array_size; i++) + if (portals[i] == NULL) + break; + + /* If the portal table is full, enlarge it */ + if (i >= portals_array_size) + portals_realloc(PORTALS_GROW_BY); + + portals[i] = pbuf_addEntry(); + strncpy(portals[i]->name, pname, PortalNameLength); + } + portals[i]->portal = pbuf_addPortal(); + portals[i]->portalcxt = NULL; + portals[i]->result = NULL; + + return portals[i]; +} + +/* -------------------------------- + * pbuf_close - Close a portal, remove it from the portal table + * and free up the space + * -------------------------------- + */ +void +pbuf_close(char *pname) +{ + int i; + + if ((i = pbuf_getIndex(pname)) == -1) + libpq_raise(&PortalError, form("Portal %s does not exist.", pname)); + + pbuf_freePortal(portals[i]->portal); + pbuf_freeEntry(i); +} + +/* -------------------------------- + * pbuf_findGroup - Return the group given the group_index + * -------------------------------- + */ +GroupBuffer * +pbuf_findGroup(PortalBuffer *portal, + int group_index) +{ + GroupBuffer *group; + + group = portal->groups; + while (group_index > 0 && group != NULL) { + group = group->next; + group_index--; + } + + if (group == NULL) + libpq_raise(&PortalError, + form("Group index %d out of bound.", group_index)); + + return (group); +} + +/* -------------------------------- + * pbuf_findFnumber - Return the field index of a given field within a group + * -------------------------------- + */ +int +pbuf_findFnumber(GroupBuffer *group, + char *field_name) +{ + TypeBlock *types; + int i; + + types = group->types; + + for (i = 0; i < group->no_fields; i++) + if (strncmp(types[i].name, field_name, NAMEDATALEN) == 0) + return (i); + + libpq_raise(&PortalError, + form("Field-name %s does not exist.", field_name)); + + /* not reached, here to make compiler happy */ + return 0; + +} + +/* -------------------------------- + * pbuf_checkFnumber - signal an error if field number is out of bounds + * -------------------------------- + */ +void +pbuf_checkFnumber(GroupBuffer *group, + int field_number) +{ + if (field_number < 0 || field_number >= group->no_fields) + libpq_raise(&PortalError, + form("Field number %d out of bound.", field_number)); +} + +/* -------------------------------- + * pbuf_findFname - Find the field name given the field index + * -------------------------------- + */ +char * +pbuf_findFname(GroupBuffer *group, + int field_number) +{ + pbuf_checkFnumber(group, field_number); + return + (group->types[field_number]).name; +} + diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c new file mode 100644 index 00000000000..7fc4a85f200 --- /dev/null +++ b/src/backend/libpq/pqcomm.c @@ -0,0 +1,724 @@ +/*------------------------------------------------------------------------- + * + * pqcomm.c-- + * Communication functions between the Frontend and the Backend + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/pqcomm.c,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * INTERFACE ROUTINES + * pq_gettty - return the name of the tty in the given buffer + * pq_getport - return the PGPORT setting + * pq_close - close input / output connections + * pq_flush - flush pending output + * pq_getstr - get a null terminated string from connection + * pq_getnchar - get n characters from connection + * pq_getint - get an integer from connection + * pq_putstr - send a null terminated string to connection + * pq_putnchar - send n characters to connection + * pq_putint - send an integer to connection + * pq_getinaddr - initialize address from host and port number + * pq_getinserv - initialize address from host and service name + * pq_connect - create remote input / output connection + * pq_accept - accept remote input / output connection + * pq_async_notify - receive notification from backend. + * + * NOTES + * These functions are used by both frontend applications and + * the postgres backend. + * + */ +#include "libpq/pqsignal.h" /* substitute for */ +#include +#include +#ifndef WIN32 +#include /* for ttyname() */ +#include +#include +#include +#include +#else +#include +#endif /* WIN32 */ +#include +#include + +#ifdef PORTNAME_linux +#ifndef SOMAXCONN +#define SOMAXCONN 5 /* from Linux listen(2) man page */ +#endif /* SOMAXCONN */ +#endif /* PORTNAME_linux */ + +#include "c.h" +#include "libpq/auth.h" +#include "libpq/libpq.h" /* where the declarations go */ +#include "libpq/pqcomm.h" +#include "utils/elog.h" + +/* ---------------- + * declarations + * ---------------- + */ +FILE *Pfout, *Pfin; +FILE *Pfdebug; /* debugging libpq */ +int PQAsyncNotifyWaiting; /* for async. notification */ + +/* -------------------------------- + * pq_init - open portal file descriptors + * -------------------------------- + */ +void +pq_init(int fd) +{ +#ifdef WIN32 + int in, out; + + in = _open_osfhandle(fd, _O_RDONLY); + out = _open_osfhandle(fd, _O_APPEND); + Pfin = fdopen(in, "rb"); + Pfout = fdopen(out, "wb"); +#else + Pfin = fdopen(fd, "r"); + Pfout = fdopen(dup(fd), "w"); +#endif /* WIN32 */ + if (!Pfin || !Pfout) + elog(FATAL, "pq_init: Couldn't initialize socket connection"); + PQnotifies_init(); + if (getenv("LIBPQ_DEBUG")) { + Pfdebug = stderr; + }else { + Pfdebug = NULL; + } +} + +/* ------------------------- + * pq_getc(File* fin) + * + * get a character from the input file, + * + * if Pfdebug is set, also echo the character fetched into Pfdebug + * + * used for debugging libpq + */ +static int +pq_getc(FILE* fin) +{ + int c; + + c = getc(fin); + if (Pfdebug && c != EOF) + putc(c,Pfdebug); + return c; +} + +/* -------------------------------- + * pq_gettty - return the name of the tty in the given buffer + * -------------------------------- + */ +void +pq_gettty(char *tp) +{ + (void) strncpy(tp, ttyname(0), 19); +} + +/* -------------------------------- + * pq_getport - return the PGPORT setting + * -------------------------------- + */ +int +pq_getport() +{ + char *envport = getenv("PGPORT"); + + if (envport) + return(atoi(envport)); + return(atoi(POSTPORT)); +} + +/* -------------------------------- + * pq_close - close input / output connections + * -------------------------------- + */ +void +pq_close() +{ + if (Pfin) { + fclose(Pfin); + Pfin = NULL; + } + if (Pfout) { + fclose(Pfout); + Pfout = NULL; + } + PQAsyncNotifyWaiting = 0; + PQnotifies_init(); + pq_unregoob(); +} + +/* -------------------------------- + * pq_flush - flush pending output + * -------------------------------- + */ +void +pq_flush() +{ + if (Pfout) + fflush(Pfout); +} + +/* -------------------------------- + * pq_getstr - get a null terminated string from connection + * -------------------------------- + */ +int +pq_getstr(char *s, int maxlen) +{ + int c; + + if (Pfin == (FILE *) NULL) { +/* elog(DEBUG, "Input descriptor is null"); */ + return(EOF); + } + + while (maxlen-- && (c = pq_getc(Pfin)) != EOF && c) + *s++ = c; + *s = '\0'; + + /* ----------------- + * If EOF reached let caller know. + * (This will only happen if we hit EOF before the string + * delimiter is reached.) + * ----------------- + */ + if (c == EOF) + return(EOF); + return(!EOF); +} + +/* + * USER FUNCTION - gets a newline-terminated string from the backend. + * + * Chiefly here so that applications can use "COPY to stdout" + * and read the output string. Returns a null-terminated string in s. + * + * PQgetline reads up to maxlen-1 characters (like fgets(3)) but strips + * the terminating \n (like gets(3)). + * + * RETURNS: + * EOF if it is detected or invalid arguments are given + * 0 if EOL is reached (i.e., \n has been read) + * (this is required for backward-compatibility -- this + * routine used to always return EOF or 0, assuming that + * the line ended within maxlen bytes.) + * 1 in other cases + */ +int +PQgetline(char *s, int maxlen) +{ + int c = '\0'; + + if (!Pfin || !s || maxlen <= 1) + return(EOF); + + for (; maxlen > 1 && (c = pq_getc(Pfin)) != '\n' && c != EOF; --maxlen) { + *s++ = c; + } + *s = '\0'; + + if (c == EOF) { + return(EOF); /* error -- reached EOF before \n */ + } else if (c == '\n') { + return(0); /* done with this line */ + } + return(1); /* returning a full buffer */ +} + +/* + * USER FUNCTION - sends a string to the backend. + * + * Chiefly here so that applications can use "COPY from stdin". + * + * RETURNS: + * 0 in all cases. + */ +int +PQputline(char *s) +{ + if (Pfout) { + (void) fputs(s, Pfout); + fflush(Pfout); + } + return(0); +} + +/* -------------------------------- + * pq_getnchar - get n characters from connection + * -------------------------------- + */ +int +pq_getnchar(char *s, int off, int maxlen) +{ + int c; + + if (Pfin == (FILE *) NULL) { +/* elog(DEBUG, "Input descriptor is null"); */ + return(EOF); + } + + s += off; + while (maxlen-- && (c = pq_getc(Pfin)) != EOF) + *s++ = c; + + /* ----------------- + * If EOF reached let caller know + * ----------------- + */ + if (c == EOF) + return(EOF); + return(!EOF); +} + +/* -------------------------------- + * pq_getint - get an integer from connection + * we receive an integer a byte at a type and reconstruct it so that + * machines with different ENDIAN representations can talk to each + * other + * -------------------------------- + */ +int +pq_getint(int b) +{ + int n, c, p; + + if (Pfin == (FILE *) NULL) { +/* elog(DEBUG, "pq_getint: Input descriptor is null"); */ + return(EOF); + } + + n = p = 0; + while (b-- && (c = pq_getc(Pfin)) != EOF && p < 32) { + n |= (c & 0xff) << p; + p += 8; + } + + return(n); +} + +/* -------------------------------- + * pq_putstr - send a null terminated string to connection + * -------------------------------- + */ +void +pq_putstr(char *s) +{ + int status; + + if (Pfout) { + status = fputs(s, Pfout); + if (status == EOF) { + (void) sprintf(PQerrormsg, + "FATAL: pq_putstr: fputs() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + } + status = fputc('\0', Pfout); + if (status == EOF) { + (void) sprintf(PQerrormsg, + "FATAL: pq_putstr: fputc() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + } + } +} + +/* -------------------------------- + * pq_putnchar - send n characters to connection + * -------------------------------- + */ +void +pq_putnchar(char *s, int n) +{ + int status; + + if (Pfout) { + while (n--) { + status = fputc(*s++, Pfout); + if (status == EOF) { + (void) sprintf(PQerrormsg, + "FATAL: pq_putnchar: fputc() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + } + } + } +} + +/* -------------------------------- + * pq_putint - send an integer to connection + * we chop an integer into bytes and send individual bytes + * machines with different ENDIAN representations can still talk to each + * other + * -------------------------------- + */ +void +pq_putint(int i, int b) +{ + int status; + + if (b > 4) + b = 4; + + if (Pfout) { + while (b--) { + status = fputc(i & 0xff, Pfout); + i >>= 8; + if (status == EOF) { + (void) sprintf(PQerrormsg, + "FATAL: pq_putint: fputc() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + } + } + } +} + +/* --- + * pq_sendoob - send a string over the out-of-band channel + * pq_recvoob - receive a string over the oob channel + * NB: Fortunately, the out-of-band channel doesn't conflict with + * buffered I/O because it is separate from regular com. channel. + * --- + */ +int +pq_sendoob(char *msg, int len) +{ + int fd = fileno(Pfout); + + return(send(fd,msg,len,MSG_OOB)); +} + +int +pq_recvoob(char *msgPtr, int *lenPtr) +{ + int fd = fileno(Pfout); + int len = 0; + + len = recv(fd,msgPtr+len,*lenPtr,MSG_OOB); + *lenPtr = len; + return(len); +} + +/* -------------------------------- + * pq_getinaddr - initialize address from host and port number + * -------------------------------- + */ +int +pq_getinaddr(struct sockaddr_in *sin, + char *host, + int port) +{ + struct hostent *hs; + + memset((char *) sin, 0, sizeof(*sin)); + + if (host) { + if (*host >= '0' && *host <= '9') + sin->sin_addr.s_addr = inet_addr(host); + else { + if (!(hs = gethostbyname(host))) { + perror(host); + return(1); + } + if (hs->h_addrtype != AF_INET) { + (void) sprintf(PQerrormsg, + "FATAL: pq_getinaddr: %s not on Internet\n", + host); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(1); + } + memmove((char *) &sin->sin_addr, + hs->h_addr, + hs->h_length); + } + } + sin->sin_family = AF_INET; + sin->sin_port = htons(port); + return(0); +} + +/* -------------------------------- + * pq_getinserv - initialize address from host and servive name + * -------------------------------- + */ +int +pq_getinserv(struct sockaddr_in *sin, char *host, char *serv) +{ + struct servent *ss; + + if (*serv >= '0' && *serv <= '9') + return(pq_getinaddr(sin, host, atoi(serv))); + if (!(ss = getservbyname(serv, NULL))) { + (void) sprintf(PQerrormsg, + "FATAL: pq_getinserv: unknown service: %s\n", + serv); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(1); + } + return(pq_getinaddr(sin, host, ntohs(ss->s_port))); +} + +/* + * register an out-of-band listener proc--at most one allowed. + * This is used for receiving async. notification from the backend. + */ +void +pq_regoob(void (*fptr)()) +{ +#ifdef WIN32 + /* Who knows what to do here? */ + return; +#else + int fd = fileno(Pfout); +#ifdef PORTNAME_hpux + ioctl(fd, FIOSSAIOOWN, getpid()); +#else /* PORTNAME_hpux */ + fcntl(fd, F_SETOWN, getpid()); +#endif /* PORTNAME_hpux */ + (void) signal(SIGURG,fptr); +#endif /* WIN32 */ +} + +void +pq_unregoob() +{ +#ifndef WIN32 + signal(SIGURG,SIG_DFL); +#endif /* WIN32 */ +} + + +void +pq_async_notify() +{ + char msg[20]; + /* int len = sizeof(msg);*/ + int len = 20; + + if (pq_recvoob(msg,&len) >= 0) { + /* debugging */ + printf("received notification: %s\n",msg); + PQAsyncNotifyWaiting = 1; + /* PQappendNotify(msg+1);*/ + } else { + extern int errno; + printf("SIGURG but no data: len = %d, err=%d\n",len,errno); + } +} + +/* + * Streams -- wrapper around Unix socket system calls + * + * + * Stream functions are used for vanilla TCP connection protocol. + */ + +/* + * StreamServerPort -- open a sock stream "listening" port. + * + * This initializes the Postmaster's connection + * accepting port. + * + * ASSUME: that this doesn't need to be non-blocking because + * the Postmaster uses select() to tell when the socket + * is ready. + * + * RETURNS: STATUS_OK or STATUS_ERROR + */ +int +StreamServerPort(char *hostName, short portName, int *fdP) +{ + struct sockaddr_in sin; + int fd; + +#ifdef WIN32 + /* This is necessary to make it possible for a backend to use + ** stdio to read from the socket. + */ + int optionvalue = SO_SYNCHRONOUS_NONALERT; + + setsockopt(INVALID_SOCKET, SOL_SOCKET, SO_OPENTYPE, (char *)&optionvalue, + sizeof(optionvalue)); +#endif /* WIN32 */ + + if (! hostName) + hostName = "localhost"; + + memset((char *)&sin, 0, sizeof sin); + + if ((fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + (void) sprintf(PQerrormsg, + "FATAL: StreamServerPort: socket() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + + sin.sin_family = AF_INET; + sin.sin_port = htons(portName); + + if (bind(fd, (struct sockaddr *)&sin, sizeof sin) < 0) { + (void) sprintf(PQerrormsg, + "FATAL: StreamServerPort: bind() failed: errno=%d\n", + errno); + pqdebug("%s", PQerrormsg); + (void) strcat(PQerrormsg, "\tIs another postmaster already running on that port?\n"); + (void) strcat(PQerrormsg, "\tIf not, wait a few seconds and retry.\n"); + fputs(PQerrormsg, stderr); + return(STATUS_ERROR); + } + + listen(fd, SOMAXCONN); + + /* MS: I took this code from Dillon's version. It makes the + * listening port non-blocking. That is not necessary (and + * may tickle kernel bugs). + + (void) fcntl(fd, F_SETFD, 1); + (void) fcntl(fd, F_SETFL, FNDELAY); + */ + + *fdP = fd; + return(STATUS_OK); +} + +/* + * StreamConnection -- create a new connection with client using + * server port. + * + * This one should be non-blocking. + * + * RETURNS: STATUS_OK or STATUS_ERROR + */ +int +StreamConnection(int server_fd, Port *port) +{ + int addrlen; + + /* accept connection (and fill in the client (remote) address) */ + addrlen = sizeof(struct sockaddr_in); + if ((port->sock = accept(server_fd, + (struct sockaddr *) &port->raddr, + &addrlen)) < 0) { + elog(WARN, "postmaster: StreamConnection: accept: %m"); + return(STATUS_ERROR); + } + + /* fill in the server (local) address */ + addrlen = sizeof(struct sockaddr_in); + if (getsockname(port->sock, (struct sockaddr *) &port->laddr, + &addrlen) < 0) { + elog(WARN, "postmaster: StreamConnection: getsockname: %m"); + return(STATUS_ERROR); + } + + port->mask = 1 << port->sock; + +#ifndef WIN32 + /* reset to non-blocking */ + fcntl(port->sock, F_SETFL, 1); +#endif /* WIN32 */ + + return(STATUS_OK); +} + +/* + * StreamClose -- close a client/backend connection + */ +void +StreamClose(int sock) +{ + (void) close(sock); +} + +/* --------------------------- + * StreamOpen -- From client, initiate a connection with the + * server (Postmaster). + * + * RETURNS: STATUS_OK or STATUS_ERROR + * + * NOTE: connection is NOT established just because this + * routine exits. Local state is ok, but we haven't + * spoken to the postmaster yet. + * --------------------------- + */ +int +StreamOpen(char *hostName, short portName, Port *port) +{ + struct hostent *hp; + int laddrlen = sizeof(struct sockaddr_in); + extern int errno; + + if (!hostName) + hostName = "localhost"; + + /* set up the server (remote) address */ + if (!(hp = gethostbyname(hostName)) || hp->h_addrtype != AF_INET) { + (void) sprintf(PQerrormsg, + "FATAL: StreamOpen: unknown hostname: %s\n", + hostName); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + memset((char *) &port->raddr, 0, sizeof(port->raddr)); + memmove((char *) &(port->raddr.sin_addr), + (char *) hp->h_addr, + hp->h_length); + port->raddr.sin_family = AF_INET; + port->raddr.sin_port = htons(portName); + + /* connect to the server */ + if ((port->sock = socket(AF_INET, SOCK_STREAM, 0)) < 0) { + (void) sprintf(PQerrormsg, + "FATAL: StreamOpen: socket() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + if (connect(port->sock, (struct sockaddr *)&port->raddr, + sizeof(port->raddr)) < 0) { + (void) sprintf(PQerrormsg, + "FATAL: StreamOpen: connect() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + + /* fill in the client address */ + if (getsockname(port->sock, (struct sockaddr *) &port->laddr, + &laddrlen) < 0) { + (void) sprintf(PQerrormsg, + "FATAL: StreamOpen: getsockname() failed: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + pqdebug("%s", PQerrormsg); + return(STATUS_ERROR); + } + + return(STATUS_OK); +} diff --git a/src/backend/libpq/pqcomm.h b/src/backend/libpq/pqcomm.h new file mode 100644 index 00000000000..a7870871ea8 --- /dev/null +++ b/src/backend/libpq/pqcomm.h @@ -0,0 +1,124 @@ +/*------------------------------------------------------------------------- + * + * pqcomm.h-- + * Parameters for the communication module + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pqcomm.h,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ + * + * NOTES + * Some of this should move to libpq.h + * + *------------------------------------------------------------------------- + */ +#ifndef PQCOMM_H +#define PQCOMM_H + +#include +#ifdef WIN32 +#include +#else +#include +#endif /* WIN32 */ + +#include "postgres.h" + +/* + * startup msg parameters: path length, argument string length + */ +#define PATH_SIZE 64 +#define ARGV_SIZE 64 + + +typedef enum _MsgType { + ACK_MSG = 0, /* acknowledge a message */ + ERROR_MSG=1, /* error response to client from server */ + RESET_MSG=2, /* client must reset connection */ + PRINT_MSG=3, /* tuples for client from server */ + NET_ERROR=4, /* error in net system call */ + FUNCTION_MSG=5, /* fastpath call (unused) */ + QUERY_MSG=6, /* client query to server */ + STARTUP_MSG=7, /* initialize a connection with a backend */ + DUPLICATE_MSG=8, /* duplicate msg arrived (errors msg only) */ + INVALID_MSG=9, /* for some control functions */ + STARTUP_KRB4_MSG=10, /* krb4 session follows startup packet */ + STARTUP_KRB5_MSG=11, /* krb5 session follows startup packet */ + STARTUP_HBA_MSG=12 /* use host-based authentication */ + /* insert new values here -- DO NOT REORDER OR DELETE ENTRIES */ +} MsgType; + +typedef char *Addr; +typedef int PacketLen; /* packet length */ + + +typedef struct StartupInfo { +/* PacketHdr hdr; */ + char database[PATH_SIZE]; /* database name */ + char user[NAMEDATALEN]; /* user name */ + char options[ARGV_SIZE]; /* possible additional args */ + char execFile[ARGV_SIZE]; /* possible backend to use */ + char tty[PATH_SIZE]; /* possible tty for debug output*/ +} StartupInfo; + +/* amount of available data in a packet buffer */ +#define MESSAGE_SIZE sizeof(StartupInfo) + 5 + +/* I/O can be blocking or non-blocking */ +#define BLOCKING (FALSE) +#define NON_BLOCKING (TRUE) + +/* a PacketBuf gets shipped from client to server so be careful + of differences in representation. + Be sure to use htonl() and ntohl() on the len and msgtype fields! */ +typedef struct PacketBuf { + int len; + MsgType msgtype; + char data[MESSAGE_SIZE]; +} PacketBuf; + +/* update the conversion routines + StartupInfo2PacketBuf() and PacketBuf2StartupInfo() (decl. below) + if StartupInfo or PacketBuf structs ever change */ + +/* + * socket descriptor port + * we need addresses of both sides to do authentication calls + */ +typedef struct Port { + int sock; /* file descriptor */ + int mask; /* select mask */ + int nBytes; /* nBytes read in so far */ + struct sockaddr_in laddr; /* local addr (us) */ + struct sockaddr_in raddr; /* remote addr (them) */ +/* PacketBufId id;*/ /* id of packet buf currently in use */ + PacketBuf buf; /* stream implementation (curr pack buf) */ +} Port; + +/* invalid socket descriptor */ +#define INVALID_SOCK (-1) + +#define INVALID_ID (-1) +#define MAX_CONNECTIONS 10 +#define N_PACK_BUFS 20 + +/* no multi-packet messages yet */ +#define MAX_PACKET_BACKLOG 1 + +#define DEFAULT_STRING "" + +extern FILE *Pfout, *Pfin; +extern int PQAsyncNotifyWaiting; + +/* + * prototypes for functions in pqpacket.c + */ +extern int PacketReceive(Port *port, PacketBuf *buf, bool nonBlocking); +extern int PacketSend(Port *port, PacketBuf *buf, + PacketLen len, bool nonBlocking); +/* extern PacketBuf* StartupInfo2PacketBuf(StartupInfo*); */ +/* extern StartupInfo* PacketBuf2StartupInfo(PacketBuf*); */ + + +#endif /* PQCOMM_H */ diff --git a/src/backend/libpq/pqpacket.c b/src/backend/libpq/pqpacket.c new file mode 100644 index 00000000000..edf373d1afb --- /dev/null +++ b/src/backend/libpq/pqpacket.c @@ -0,0 +1,283 @@ +/*------------------------------------------------------------------------- + * + * pqpacket.c-- + * routines for reading and writing data packets sent/received by + * POSTGRES clients and servers + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/Attic/pqpacket.c,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* NOTES + * This is the module that understands the lowest-level part + * of the communication protocol. All of the trickiness in + * this module is for making sure that non-blocking I/O in + * the Postmaster works correctly. Check the notes in PacketRecv + * on non-blocking I/O. + * + * Data Structures: + * Port has two important functions. (1) It records the + * sock/addr used in communication. (2) It holds partially + * read in messages. This is especially important when + * we haven't seen enough to construct a complete packet + * header. + * + * PacketBuf -- None of the clients of this module should know + * what goes into a packet hdr (although they know how big + * it is). This routine is in charge of host to net order + * conversion for headers. Data conversion is someone elses + * responsibility. + * + * IMPORTANT: these routines are called by backends, clients, and + * the Postmaster. + * + */ +#include +#include +#ifndef WIN32 +#include +#include +#include +#else +#include +#endif /*WIN32 */ +#include +#include + +#include "postgres.h" +#include "miscadmin.h" +#include "utils/elog.h" +#include "storage/ipc.h" +#include "libpq/pqcomm.h" /* where the declarations go */ +#include "libpq/libpq.h" + +/* + * PacketReceive -- receive a packet on a port. + * + * RETURNS: connection id of the packet sender, if one + * is available. + * + */ +int +PacketReceive(Port *port, /* receive port */ + PacketBuf *buf, /* MAX_PACKET_SIZE-worth of buffer space */ + bool nonBlocking) /* NON_BLOCKING or BLOCKING i/o */ +{ + PacketLen max_size = sizeof(PacketBuf); + PacketLen cc; /* character count -- bytes recvd */ + PacketLen packetLen; /* remaining packet chars to read */ + Addr tmp; /* curr recv buf pointer */ + int addrLen = sizeof(struct sockaddr_in); + int hdrLen; + int flag; + int decr; + + hdrLen = sizeof(buf->len); + + if (nonBlocking == NON_BLOCKING) { + flag = MSG_PEEK; + decr = 0; + } else { + flag = 0; + decr = hdrLen; + } + /* + * Assume port->nBytes is zero unless we were interrupted during + * non-blocking I/O. This first recvfrom() is to get the hdr + * information so we know how many bytes to read. Life would + * be very complicated if we read too much data (buffering). + */ + tmp = ((Addr)buf) + port->nBytes; + + if (port->nBytes >= hdrLen) { + packetLen = ntohl(buf->len) - port->nBytes; + } + else { + /* peeking into the incoming message */ + cc = recvfrom(port->sock, (char *)&(buf->len), hdrLen, flag, + (struct sockaddr*) &(port->raddr), &addrLen); + if (cc < hdrLen) { + /* if cc is negative, the system call failed */ + if (cc < 0) { + return(STATUS_ERROR); + } + /* + * cc == 0 means the connection was broken at the + * other end. + */ + else if (! cc) { + return(STATUS_INVALID); + + } else { + /* + * Worst case. We didn't even read in enough data to + * get the header length. + * since we are using a data stream, + * this happens only if the client is mallicious. + * + * Don't save the number of bytes we've read so far. + * Since we only peeked at the incoming message, the + * kernel is going to keep it for us. + */ + return(STATUS_NOT_DONE); + } + } else { + /* + * great. got the header. now get the true length (including + * header size). + */ + packetLen = ntohl(buf->len); + /* + * if someone is sending us junk, close the connection + */ + if (packetLen > max_size) { + port->nBytes = packetLen; + return(STATUS_BAD_PACKET); + } + packetLen -= decr; + tmp += decr - port->nBytes; + } + } + + /* + * Now that we know how big it is, read the packet. We read + * the entire packet, since the last call was just a peek. + */ + while (packetLen) { + cc = recvfrom(port->sock, tmp, packetLen, 0, + (struct sockaddr*) &(port->raddr), &addrLen); + if (cc < 0) + return(STATUS_ERROR); + /* + * cc == 0 means the connection was broken at the + * other end. + */ + else if (! cc) + return(STATUS_INVALID); + +/* + fprintf(stderr,"expected packet of %d bytes, got %d bytes\n", + packetLen, cc); +*/ + tmp += cc; + packetLen -= cc; + + /* if non-blocking, we're done. */ + if (nonBlocking && packetLen) { + port->nBytes += cc; + return(STATUS_NOT_DONE); + } + } + + port->nBytes = 0; + return(STATUS_OK); +} + +/* + * PacketSend -- send a single-packet message. + * + * RETURNS: STATUS_ERROR if the write fails, STATUS_OK otherwise. + * SIDE_EFFECTS: may block. + * NOTES: Non-blocking writes would significantly complicate + * buffer management. For now, we're not going to do it. + * + */ +int +PacketSend(Port *port, + PacketBuf *buf, + PacketLen len, + bool nonBlocking) +{ + PacketLen totalLen; + int addrLen = sizeof(struct sockaddr_in); + + Assert(!nonBlocking); + Assert(buf); + + totalLen = len; + + len = sendto(port->sock, (Addr) buf, totalLen, /* flags */ 0, + (struct sockaddr *)&(port->raddr), addrLen); + + if (len < totalLen) { + (void) sprintf(PQerrormsg, + "FATAL: PacketSend: couldn't send complete packet: errno=%d\n", + errno); + fputs(PQerrormsg, stderr); + return(STATUS_ERROR); + } + + return(STATUS_OK); +} + +/* + * StartupInfo2PacketBuf - + * convert the fields of the StartupInfo to a PacketBuf + * + */ +/* moved to src/libpq/fe-connect.c */ +/* +PacketBuf* +StartupInfo2PacketBuf(StartupInfo* s) +{ + PacketBuf* res; + char* tmp; + + res = (PacketBuf*)malloc(sizeof(PacketBuf)); + res->len = htonl(sizeof(PacketBuf)); + res->data[0] = '\0'; + + tmp= res->data; + + strncpy(tmp, s->database, sizeof(s->database)); + tmp += sizeof(s->database); + strncpy(tmp, s->user, sizeof(s->user)); + tmp += sizeof(s->user); + strncpy(tmp, s->options, sizeof(s->options)); + tmp += sizeof(s->options); + strncpy(tmp, s->execFile, sizeof(s->execFile)); + tmp += sizeof(s->execFile); + strncpy(tmp, s->tty, sizeof(s->execFile)); + + return res; +} +*/ + +/* + * PacketBuf2StartupInfo - + * convert the fields of the StartupInfo to a PacketBuf + * + */ +/* moved to postmaster.c +StartupInfo* +PacketBuf2StartupInfo(PacketBuf* p) +{ + StartupInfo* res; + char* tmp; + + res = (StartupInfo*)malloc(sizeof(StartupInfo)); + + res->database[0]='\0'; + res->user[0]='\0'; + res->options[0]='\0'; + res->execFile[0]='\0'; + res->tty[0]='\0'; + + tmp= p->data; + strncpy(res->database,tmp,sizeof(res->database)); + tmp += sizeof(res->database); + strncpy(res->user,tmp, sizeof(res->user)); + tmp += sizeof(res->user); + strncpy(res->options,tmp, sizeof(res->options)); + tmp += sizeof(res->options); + strncpy(res->execFile,tmp, sizeof(res->execFile)); + tmp += sizeof(res->execFile); + strncpy(res->tty,tmp, sizeof(res->tty)); + + return res; +} +*/ diff --git a/src/backend/libpq/pqsignal.c b/src/backend/libpq/pqsignal.c new file mode 100644 index 00000000000..b60a5659ccb --- /dev/null +++ b/src/backend/libpq/pqsignal.c @@ -0,0 +1,40 @@ +/*------------------------------------------------------------------------- + * + * pqsignal.c-- + * reliable BSD-style signal(2) routine stolen from RWW who stole it + * from Stevens... + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/libpq/pqsignal.c,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ + * + * NOTES + * This shouldn't be in libpq, but the monitor and some other + * things need it... + * + *------------------------------------------------------------------------- + */ +#include "libpq/pqsignal.h" + +pqsigfunc +pqsignal(int signo, pqsigfunc func) +{ +#if defined(USE_POSIX_SIGNALS) + struct sigaction act, oact; + + act.sa_handler = func; + sigemptyset(&act.sa_mask); + act.sa_flags = 0; + if (signo != SIGALRM) { + act.sa_flags |= SA_RESTART; + } + if (sigaction(signo, &act, &oact) < 0) + return(SIG_ERR); + return(oact.sa_handler); +#else /* !USE_POSIX_SIGNALS */ + Assert(0); + return 0; +#endif /* !USE_POSIX_SIGNALS */ +} diff --git a/src/backend/libpq/pqsignal.h b/src/backend/libpq/pqsignal.h new file mode 100644 index 00000000000..44f10882f26 --- /dev/null +++ b/src/backend/libpq/pqsignal.h @@ -0,0 +1,32 @@ +/*------------------------------------------------------------------------- + * + * pqsignal.h-- + * prototypes for the reliable BSD-style signal(2) routine. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pqsignal.h,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ + * + * NOTES + * This shouldn't be in libpq, but the monitor and some other + * things need it... + * + *------------------------------------------------------------------------- + */ +#ifndef PQSIGNAL_H +#define PQSIGNAL_H + +#include + +#include "c.h" + +typedef void (*pqsigfunc)(int); + +extern pqsigfunc pqsignal(int signo, pqsigfunc func); + +#if defined(USE_POSIX_SIGNALS) +#define signal(signo, handler) pqsignal(signo, (pqsigfunc)(handler)) +#endif /* USE_POSIX_SIGNALS */ + +#endif /* PQSIGNAL_H */ diff --git a/src/backend/main/Makefile.inc b/src/backend/main/Makefile.inc new file mode 100644 index 00000000000..1e66ad03cac --- /dev/null +++ b/src/backend/main/Makefile.inc @@ -0,0 +1,16 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the main() of the postgres backend +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/main/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:31 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:= $(VPATH):$(CURDIR)/main + +SRCS_MAIN= main.c diff --git a/src/backend/main/main.c b/src/backend/main/main.c new file mode 100644 index 00000000000..0c8dbc478d6 --- /dev/null +++ b/src/backend/main/main.c @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * main.c-- + * Stub main() routine for the postgres backend. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/main/main.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "c.h" +#include "miscadmin.h" +#include "bootstrap/bootstrap.h" /* for BootstrapMain() */ +#include "tcop/tcopprot.h" /* for PostgresMain() */ +#include "port-protos.h" /* for init_address_fixup() */ + +int +main(int argc, char *argv[]) +{ + int len; +#if defined(NOFIXADE) || defined(NOPRINTADE) + /* + * Must be first so that the bootstrap code calls it, too. + * (Only needed on some RISC architectures.) + */ + init_address_fixup(); +#endif /* NOFIXADE || NOPRINTADE */ + + /* use one executable for both postgres and postmaster, + invoke one or the other depending on the name of the executable */ + len = strlen(argv[0]); + if(len >= 10 && ! strcmp(argv[0] + len - 10, "postmaster")) + exit(PostmasterMain(argc, argv)); + + /* if the first argument is "-boot", then invoke the backend in + bootstrap mode */ + if (argc > 1 && strcmp(argv[1], "-boot") == 0) + exit(BootstrapMain(argc-1, argv+1)); /* remove the -boot arg from the command line */ + else + exit(PostgresMain(argc, argv)); +} diff --git a/src/backend/makeID b/src/backend/makeID new file mode 100644 index 00000000000..d1c97c9488f --- /dev/null +++ b/src/backend/makeID @@ -0,0 +1,17 @@ +#!/bin/sh +# +# $Header: /cvsroot/pgsql/src/backend/Attic/makeID,v 1.1.1.1 1996/07/09 06:21:08 scrappy Exp $ +# + + +if test $# -ne 1; then + echo "usage: $0 PORTNAME" + exit 1 +fi +PORTNAME=$1 + +gfind . port/$PORTNAME -type f -name '*.[chly]' -print | mkid -S.gen=C - 2>&1 | grep -v 'No scanner for language' + + + +exit 0 diff --git a/src/backend/nodes/Makefile.inc b/src/backend/nodes/Makefile.inc new file mode 100644 index 00000000000..a32fdbad6d1 --- /dev/null +++ b/src/backend/nodes/Makefile.inc @@ -0,0 +1,33 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the nodes module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/nodes/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ +# +# NOTES +# Originally, the nodes module is a home-brew, C++ like inheritance +# system. However, the automatically generated tags, accessor functions +# and the header files themselves are difficult to maintain. We need +# real language support. Emulation doesn't quite do it... +# +# See nodes/README for an explanation of the new no-frills nodes +# structures. +# - ay 11/5/94 +# +#------------------------------------------------------------------------- + +VPATH:= $(VPATH):$(CURDIR)/nodes + + +SRCS_NODES= nodeFuncs.c nodes.c list.c \ + copyfuncs.c equalfuncs.c makefuncs.c outfuncs.c readfuncs.c \ + print.c read.c + +HEADERS+= execnodes.h makefuncs.h memnodes.h nodeFuncs.h nodes.h \ + params.h parsenodes.h pg_list.h plannodes.h primnodes.h relation.h + diff --git a/src/backend/nodes/README b/src/backend/nodes/README new file mode 100644 index 00000000000..59db30ceae7 --- /dev/null +++ b/src/backend/nodes/README @@ -0,0 +1,65 @@ +******************************************************************************* +* * +* EXPLANATION OF THE NODE STRUCTURES * +* - Andrew Yu (11/94) * +* * +* Copyright (c) 1994, Regents of the University of California * +* * +* $Id: README,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ +* * +******************************************************************************* + +INTRODUCTION + +The current node structures are plain old C structures. "Inheritance" is +achieved by convention. No additional functions will be generated. Functions +that manipulate node structures reside in this directory. + + +FILES IN THIS DIRECTORY + + Node manipulation functions: + copyfuncs.c - copying a node + equalfuncs.c - comparing a node + outfuncs.c - convert a node to ascii representation + readfuncs.c - convert ascii representation back to a node + makefuncs.c - creator functions for primitive nodes + + Node definitions: + nodes.h - define node tags (NodeTag) + pg_list.h - generic list + primnodes.h - primitive nodes + parsenodes.h - parse tree nodes + plannodes.h - plan tree nodes + relation.h - inner plan tree nodes + execnodes.h - executor nodes + memnodes.h - memory nodes + + +STEPS TO ADD A NODE + +Suppose you wana define a node Foo: + +1. add a tag (T_Foo) to the enum NodeTag in nodes.h (You may have to + recompile the whole tree after doing this.) +2. add the structure definition to the appropriate ???nodes.h file. If you + intend to inherit from, say a Plan node, put Plan as the first field of + you definition. +3. if you intend to use copyObject, equal, nodeToString or stringToNode, + add an appropriate function to copyfuncs.c, equalfuncs.c, outfuncs.c + and readfuncs.c accordingly. (Except for frequently used nodes, don't + bother writing a creator function in makefuncs.c) + + +HISTORICAL NOTE + +Prior to the current simple C structure definitions, the Node structures +uses a pseudo-inheritance system which automatically generates creator and +accessor functions. Since every node inherits from LispValue, the whole thing +is a mess. Here's a little anecdote: + + LispValue definition -- class used to support lisp structures + in C. This is here because we did not want to totally rewrite + planner and executor code which depended on lisp structures when + we ported postgres V1 from lisp to C. -cim 4/23/90 + diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c new file mode 100644 index 00000000000..c04bf3627e6 --- /dev/null +++ b/src/backend/nodes/copyfuncs.c @@ -0,0 +1,1675 @@ +/*------------------------------------------------------------------------- + * + * copyfuncs.c-- + * Copy functions for Postgres tree nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/copyfuncs.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include + +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" + +#include "utils/syscache.h" +#include "utils/builtins.h" /* for namecpy */ +#include "utils/elog.h" +#include "utils/palloc.h" +#include "catalog/pg_type.h" +#include "storage/lmgr.h" + +/* + * listCopy-- + * this copy function only copies the "lcons-cells" of the list but not + * its contents. (good for list of pointers as well as list of integers). + */ +List * +listCopy(List *list) +{ + List *newlist=NIL; + List *l, *nl; + + foreach(l, list) { + if (newlist==NIL) { + newlist = nl = lcons(lfirst(l),NIL); + }else { + lnext(nl) = lcons(lfirst(l),NIL); + nl = lnext(nl); + } + } + return newlist; +} + +/* + * Node_Copy-- + * a macro to simplify calling of copyObject on the specified field + */ +#define Node_Copy(from, newnode, field) \ + newnode->field = copyObject(from->field) + +/* **************************************************************** + * plannodes.h copy functions + * **************************************************************** + */ + +/* ---------------- + * CopyPlanFields + * + * This function copies the fields of the Plan node. It is used by + * all the copy functions for classes which inherit from Plan. + * ---------------- + */ +static void +CopyPlanFields(Plan *from, Plan *newnode) +{ + newnode->cost = from->cost; + newnode->plan_size = from->plan_size; + newnode->plan_width = from->plan_width; + newnode->state = from->state; + newnode->targetlist = copyObject(from->targetlist); + newnode->qual = copyObject(from->qual); + newnode->lefttree = copyObject(from->lefttree); + newnode->righttree = copyObject(from->righttree); +} + +/* ---------------- + * _copyPlan + * ---------------- + */ +static Plan * +_copyPlan(Plan *from) +{ + Plan *newnode = makeNode(Plan); + + /* ---------------- + * copy the node superclass fields + * ---------------- + */ + CopyPlanFields(from, newnode); + + return newnode; +} + + +/* ---------------- + * _copyExistential + * ---------------- + */ +static Existential * +_copyExistential(Existential *from) +{ + Existential *newnode = makeNode(Existential); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copyResult + * ---------------- + */ +static Result * +_copyResult(Result *from) +{ + Result *newnode = makeNode(Result); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, resconstantqual); + Node_Copy(from, newnode, resstate); + + return newnode; +} + +/* ---------------- + * _copyAppend + * ---------------- + */ +static Append * +_copyAppend(Append *from) +{ + Append *newnode = makeNode(Append); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, unionplans); + newnode->unionrelid = from->unionrelid; + Node_Copy(from, newnode, unionrtentries); + Node_Copy(from, newnode, unionstate); + + return newnode; +} + + +/* ---------------- + * CopyScanFields + * + * This function copies the fields of the Scan node. It is used by + * all the copy functions for classes which inherit from Scan. + * ---------------- + */ +static void +CopyScanFields(Scan *from, Scan *newnode) +{ + newnode->scanrelid = from->scanrelid; + Node_Copy(from, newnode, scanstate); + return; +} + +/* ---------------- + * _copyScan + * ---------------- + */ +static Scan * +_copyScan(Scan *from) +{ + Scan *newnode = makeNode(Scan); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyScanFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copySeqScan + * ---------------- + */ +static SeqScan * +_copySeqScan(SeqScan *from) +{ + SeqScan *newnode = makeNode(SeqScan); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyScanFields((Scan*)from, (Scan*)newnode); + + return newnode; +} + +/* ---------------- + * _copyIndexScan + * ---------------- + */ +static IndexScan * +_copyIndexScan(IndexScan *from) +{ + IndexScan *newnode = makeNode(IndexScan); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyScanFields((Scan*)from, (Scan*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->indxid = listCopy(from->indxid); + Node_Copy(from, newnode, indxqual); + Node_Copy(from, newnode, indxstate); + + return newnode; +} + +/* ---------------- + * CopyJoinFields + * + * This function copies the fields of the Join node. It is used by + * all the copy functions for classes which inherit from Join. + * ---------------- + */ +static void +CopyJoinFields(Join *from, Join *newnode) +{ + /* nothing extra */ + return; +} + + +/* ---------------- + * _copyJoin + * ---------------- + */ +static Join * +_copyJoin(Join *from) +{ + Join *newnode = makeNode(Join); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyJoinFields(from, newnode); + + return newnode; +} + + +/* ---------------- + * _copyNestLoop + * ---------------- + */ +static NestLoop * +_copyNestLoop(NestLoop *from) +{ + NestLoop *newnode = makeNode(NestLoop); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyJoinFields((Join*)from, (Join*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, nlstate); + + return newnode; +} + + +/* ---------------- + * _copyMergeJoin + * ---------------- + */ +static MergeJoin * +_copyMergeJoin(MergeJoin *from) +{ + MergeJoin *newnode = makeNode(MergeJoin); + List *newlist; + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyJoinFields((Join*)from, (Join*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, mergeclauses); + + newnode->mergesortop = from->mergesortop; + newlist = NIL; + + newnode->mergerightorder = (Oid *)palloc(sizeof(Oid)*2); + newnode->mergerightorder[0] = from->mergerightorder[0]; + newnode->mergerightorder[1] = 0; + + newnode->mergeleftorder = (Oid *)palloc(sizeof(Oid)*2); + newnode->mergeleftorder[0] = from->mergeleftorder[0]; + newnode->mergeleftorder[1] = 0; + + Node_Copy(from, newnode, mergestate); + + return newnode; +} + +/* ---------------- + * _copyHashJoin + * ---------------- + */ +static HashJoin * +_copyHashJoin(HashJoin *from) +{ + HashJoin *newnode = makeNode(HashJoin); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyJoinFields((Join*)from, (Join*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, hashclauses); + + newnode->hashjoinop = from->hashjoinop; + + Node_Copy(from, newnode, hashjoinstate); + + newnode->hashjointable = from->hashjointable; + newnode->hashjointablekey = from->hashjointablekey; + newnode->hashjointablesize = from->hashjointablesize; + newnode->hashdone = from->hashdone; + + return newnode; +} + + +/* ---------------- + * CopyTempFields + * + * This function copies the fields of the Temp node. It is used by + * all the copy functions for classes which inherit from Temp. + * ---------------- + */ +static void +CopyTempFields(Temp *from, Temp *newnode) +{ + newnode->tempid = from->tempid; + newnode->keycount = from->keycount; + return; +} + + +/* ---------------- + * _copyTemp + * ---------------- + */ +static Temp * +_copyTemp(Temp *from) +{ + Temp *newnode = makeNode(Temp); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyTempFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copyMaterial + * ---------------- + */ +static Material * +_copyMaterial(Material *from) +{ + Material *newnode = makeNode(Material); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyTempFields((Temp*)from, (Temp*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, matstate); + + return newnode; +} + + +/* ---------------- + * _copySort + * ---------------- + */ +static Sort * +_copySort(Sort *from) +{ + Sort *newnode = makeNode(Sort); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyTempFields((Temp*)from, (Temp*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, sortstate); + + return newnode; +} + +/* --------------- + * _copyAgg + * -------------- + */ +static Agg * +_copyAgg(Agg *from) +{ + Agg *newnode = makeNode(Agg); + int i; + + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyTempFields((Temp*)from, (Temp*)newnode); + + newnode->numAgg = from->numAgg; + newnode->aggs = malloc(sizeof(Aggreg *)); + for(i=0; i < from->numAgg; i++) { + newnode->aggs[i] = copyObject(from->aggs[i]); + } + + Node_Copy(from, newnode, aggstate); + + return newnode; +} + + +/* ---------------- + * _copyUnique + * ---------------- + */ +static Unique * +_copyUnique(Unique *from) +{ + Unique *newnode = makeNode(Unique); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + CopyTempFields((Temp*)from, (Temp*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, uniquestate); + + return newnode; +} + + +/* ---------------- + * _copyHash + * ---------------- + */ +static Hash * +_copyHash(Hash *from) +{ + Hash *newnode = makeNode(Hash); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyPlanFields((Plan*)from, (Plan*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, hashkey); + Node_Copy(from, newnode, hashstate); + + newnode->hashtable = from->hashtable; + newnode->hashtablekey = from->hashtablekey; + newnode->hashtablesize = from->hashtablesize; + + return newnode; +} + +/* **************************************************************** + * primnodes.h copy functions + * **************************************************************** + */ + +/* ---------------- + * _copyResdom + * ---------------- + */ +static Resdom * +_copyResdom(Resdom *from) +{ + Resdom *newnode = makeNode(Resdom); + + newnode->resno = from->resno; + newnode->restype = from->restype; + newnode->reslen = from->reslen; + + if (from->resname != NULL) { + newnode->resname = palloc(strlen(from->resname)+1); + strcpy(newnode->resname, from->resname); + } else + newnode->resname = (char*) NULL; + + newnode->reskey = from->reskey; + newnode->reskeyop = from->reskeyop; + newnode->resjunk = from->resjunk; + + return newnode; +} + +static Fjoin * +_copyFjoin(Fjoin *from) +{ + Fjoin *newnode; + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + + newnode->fj_initialized = from->fj_initialized; + newnode->fj_nNodes = from->fj_nNodes; + + Node_Copy(from, newnode, fj_innerNode); + + newnode->fj_results = (DatumPtr) + palloc((from->fj_nNodes)*sizeof(Datum)); + + newnode->fj_alwaysDone = (BoolPtr) + palloc((from->fj_nNodes)*sizeof(bool)); + + memmove(from->fj_results, + newnode->fj_results, + (from->fj_nNodes)*sizeof(Datum)); + + memmove(from->fj_alwaysDone, + newnode->fj_alwaysDone, + (from->fj_nNodes)*sizeof(bool)); + + + return newnode; +} + +/* ---------------- + * _copyExpr + * ---------------- + */ +static Expr * +_copyExpr(Expr *from) +{ + Expr *newnode = makeNode(Expr); + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + newnode->typeOid = from->typeOid; + newnode->opType = from->opType; + + Node_Copy(from, newnode, oper); + Node_Copy(from, newnode, args); + + return newnode; +} + +/* ---------------- + * _copyVar + * ---------------- + */ +static Var * +_copyVar(Var *from) +{ + Var *newnode = makeNode(Var); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->varno = from->varno; + newnode->varattno = from->varattno; + newnode->vartype = from->vartype; + + newnode->varnoold = from->varnoold; + newnode->varoattno = from->varoattno; + + return newnode; +} + +/* ---------------- + * _copyOper + * ---------------- + */ +static Oper * +_copyOper(Oper *from) +{ + Oper *newnode = makeNode(Oper); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->opno = from->opno; + newnode->opid = from->opid; + newnode->opresulttype = from->opresulttype; + newnode->opsize = from->opsize; + + /* + * NOTE: shall we copy the cache structure or just the pointer ? + * Alternatively we can set 'op_fcache' to NULL, in which + * case the executor will initialize it when it needs it... + */ + newnode->op_fcache = from->op_fcache; + + return newnode; +} + +/* ---------------- + * _copyConst + * ---------------- + */ +static Const * +_copyConst(Const *from) +{ + static Oid cached_type; + static bool cached_typbyval; + + Const *newnode = makeNode(Const); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->consttype = from->consttype; + newnode->constlen = from->constlen; + + /* ---------------- + * XXX super cheesy hack until parser/planner + * puts in the right values here. + * ---------------- + */ + if (cached_type != from->consttype) { + HeapTuple typeTuple; + TypeTupleForm typeStruct; + + /* ---------------- + * get the type tuple corresponding to the paramList->type, + * If this fails, returnValue has been pre-initialized + * to "null" so we just return it. + * ---------------- + */ + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(from->consttype), + 0,0,0); + + /* ---------------- + * get the type length and by-value from the type tuple and + * save the information in our one element cache. + * ---------------- + */ + Assert(PointerIsValid(typeTuple)); + + typeStruct = (TypeTupleForm) GETSTRUCT(typeTuple); + cached_typbyval = (typeStruct)->typbyval ? true : false ; + cached_type = from->consttype; + } + + from->constbyval = cached_typbyval; + + if (!from->constisnull) { + /* ---------------- + * copying the Datum in a const node is a bit trickier + * because it might be a pointer and it might also be of + * variable length... + * ---------------- + */ + if (from->constbyval == true) { + /* ---------------- + * passed by value so just copy the datum. + * ---------------- + */ + newnode->constvalue = from->constvalue; + } else { + /* ---------------- + * not passed by value. datum contains a pointer. + * ---------------- + */ + if (from->constlen != -1) { + /* ---------------- + * fixed length structure + * ---------------- + */ + newnode->constvalue = PointerGetDatum(palloc(from->constlen)); + memmove((char*)newnode->constvalue, + (char*)from->constvalue, from->constlen); + } else { + /* ---------------- + * variable length structure. here the length is stored + * in the first int pointed to by the constval. + * ---------------- + */ + int length; + length = *((int *) from->constvalue); + newnode->constvalue = PointerGetDatum(palloc(length)); + memmove((char*)newnode->constvalue, + (char*)from->constvalue, length); + } + } + } + else { + newnode->constvalue = from->constvalue; + } + newnode->constisnull = from->constisnull; + newnode->constbyval = from->constbyval; + + return newnode; +} + +/* ---------------- + * _copyParam + * ---------------- + */ +static Param * +_copyParam(Param *from) +{ + Param *newnode = makeNode(Param); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->paramkind = from->paramkind; + newnode->paramid = from->paramid; + + if (from->paramname != NULL) { + newnode->paramname = pstrdup(from->paramname); + } else + newnode->paramname = (char*)NULL; + + newnode->paramtype = from->paramtype; + Node_Copy(from, newnode, param_tlist); + + return newnode; +} + +/* ---------------- + * _copyFunc + * ---------------- + */ +static Func * +_copyFunc(Func *from) +{ + Func *newnode = makeNode(Func); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->funcid = from->funcid; + newnode->functype = from->functype; + newnode->funcisindex = from->funcisindex; + newnode->funcsize = from->funcsize; + newnode->func_fcache = from->func_fcache; + Node_Copy(from, newnode, func_tlist); + Node_Copy(from, newnode, func_planlist); + + return newnode; +} + +/* ---------------- + * _copyAggreg + * ---------------- + */ +static Aggreg * +_copyAggreg(Aggreg *from) +{ + Aggreg *newnode = makeNode(Aggreg); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->aggname = pstrdup(from->aggname); + newnode->basetype = from->basetype; + newnode->aggtype = from->aggtype; + + Node_Copy(from, newnode, target); + + newnode->aggno = from->aggno; + + return newnode; +} + +static Array * +_copyArray(Array *from) +{ + Array *newnode = makeNode(Array); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->arrayelemtype = from->arrayelemtype; + newnode->arrayelemlength = from->arrayelemlength; + newnode->arrayelembyval = from->arrayelembyval; + newnode->arrayndim = from->arrayndim; + newnode->arraylow = from->arraylow; + newnode->arrayhigh = from->arrayhigh; + newnode->arraylen = from->arraylen; + + return newnode; +} + +static ArrayRef * +_copyArrayRef(ArrayRef *from) +{ + ArrayRef *newnode = makeNode(ArrayRef); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->refelemtype = from->refelemtype; + newnode->refattrlength = from->refattrlength; + newnode->refelemlength = from->refelemlength; + newnode->refelembyval = from->refelembyval; + + Node_Copy(from,newnode,refupperindexpr); + Node_Copy(from,newnode,reflowerindexpr); + Node_Copy(from,newnode,refexpr); + Node_Copy(from,newnode,refassgnexpr); + + return newnode; +} + +/* **************************************************************** + * relation.h copy functions + * **************************************************************** + */ + +/* ---------------- + * _copyRel + * ---------------- + */ +/* + ** when you change this, also make sure to fix up xfunc_copyRel in + ** planner/path/xfunc.c accordingly!!! + ** -- JMH, 8/2/93 + */ +static Rel * +_copyRel(Rel *from) +{ + Rel *newnode = makeNode(Rel); + int i, len; + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->relids = listCopy(from->relids); + + newnode->indexed = from->indexed; + newnode->pages = from->pages; + newnode->tuples = from->tuples; + newnode->size = from->size; + newnode->width = from->width; + newnode->indproc = from->indproc; + + Node_Copy(from, newnode, targetlist); + Node_Copy(from, newnode, pathlist); + Node_Copy(from, newnode, unorderedpath); + Node_Copy(from, newnode, cheapestpath); + newnode->pruneable = from->pruneable; + newnode->relam = from->relam; + + if (from->classlist) { + for(len=0; from->classlist[len]!=0; len++) + ; + newnode->classlist = (Oid *)palloc(sizeof(Oid) * (len+1)); + for(i=0; i < len; i++) { + newnode->classlist[i] = from->classlist[i]; + } + newnode->classlist[len] = 0; + } + + if (from->indexkeys) { + for(len=0; from->indexkeys[len]!=0; len++) + ; + newnode->indexkeys = (int *)palloc(sizeof(int) * (len+1)); + for(i=0; i < len; i++) { + newnode->indexkeys[i] = from->indexkeys[i]; + } + newnode->indexkeys[len] = 0; + } + + if (from->ordering) { + for(len=0; from->ordering[len]!=0; len++) + ; + newnode->ordering = (Oid *)palloc(sizeof(Oid) * (len+1)); + for(i=0; i < len; i++) { + newnode->ordering[i] = from->ordering[i]; + } + newnode->ordering[len] = 0; + } + + Node_Copy(from, newnode, clauseinfo); + Node_Copy(from, newnode, joininfo); + Node_Copy(from, newnode, innerjoin); + Node_Copy(from, newnode, superrels); + + return newnode; +} + +/* ---------------- + * CopyPathFields + * + * This function copies the fields of the Path node. It is used by + * all the copy functions for classes which inherit from Path. + * ---------------- + */ +static void +CopyPathFields(Path *from, Path *newnode) +{ + newnode->pathtype = from->pathtype; + /* Modify the next line, since it causes the copying to cycle + (i.e. the parent points right back here! + -- JMH, 7/7/92. + Old version: + Node_Copy(from, newnode, parent); + */ + newnode->parent = from->parent; + + newnode->path_cost = from->path_cost; + + newnode->p_ordering.ordtype = from->p_ordering.ordtype; + if (from->p_ordering.ordtype == SORTOP_ORDER) { + int len, i; + Oid *ordering = from->p_ordering.ord.sortop; + + if (ordering) { + for(len=0; ordering[len]!=0; len++) + ; + newnode->p_ordering.ord.sortop = + (Oid *)palloc(sizeof(Oid) * (len+1)); + for(i=0; i < len; i++) { + newnode->p_ordering.ord.sortop[i] = ordering[i]; + } + newnode->p_ordering.ord.sortop[len] = 0; + } else { + newnode->p_ordering.ord.sortop = NULL; + } + } else { + Node_Copy(from, newnode, p_ordering.ord.merge); + } + + Node_Copy(from, newnode, keys); + + newnode->outerjoincost = from->outerjoincost; + + newnode->joinid = listCopy(from->joinid); + Node_Copy(from, newnode, locclauseinfo); +} + +/* ---------------- + * _copyPath + * ---------------- + */ +static Path * +_copyPath(Path *from) +{ + Path *newnode = makeNode(Path); + + CopyPathFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copyIndexPath + * ---------------- + */ +static IndexPath * +_copyIndexPath(IndexPath *from) +{ + IndexPath *newnode = makeNode(IndexPath); + + /* ---------------- + * copy the node superclass fields + * ---------------- + */ + CopyPathFields((Path*)from, (Path*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->indexid = listCopy(from->indexid); + Node_Copy(from, newnode, indexqual); + + return newnode; +} + +/* ---------------- + * CopyJoinPathFields + * + * This function copies the fields of the JoinPath node. It is used by + * all the copy functions for classes which inherit from JoinPath. + * ---------------- + */ +static void +CopyJoinPathFields(JoinPath *from, JoinPath *newnode) +{ + Node_Copy(from, newnode, pathclauseinfo); + Node_Copy(from, newnode, outerjoinpath); + Node_Copy(from, newnode, innerjoinpath); +} + +/* ---------------- + * _copyJoinPath + * ---------------- + */ +static JoinPath * +_copyJoinPath(JoinPath *from) +{ + JoinPath *newnode = makeNode(JoinPath); + + /* ---------------- + * copy the node superclass fields + * ---------------- + */ + CopyPathFields((Path*)from, (Path*)newnode); + CopyJoinPathFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copyMergePath + * ---------------- + */ +static MergePath * +_copyMergePath(MergePath *from) +{ + MergePath *newnode = makeNode(MergePath); + + /* ---------------- + * copy the node superclass fields + * ---------------- + */ + CopyPathFields((Path*)from, (Path*)newnode); + CopyJoinPathFields((JoinPath*)from, (JoinPath*)newnode); + + /* ---------------- + * copy the remainder of the node + * ---------------- + */ + Node_Copy(from, newnode, path_mergeclauses); + Node_Copy(from, newnode, outersortkeys); + Node_Copy(from, newnode, innersortkeys); + + return newnode; +} + +/* ---------------- + * _copyHashPath + * ---------------- + */ +static HashPath * +_copyHashPath(HashPath *from) +{ + HashPath *newnode = makeNode(HashPath); + + /* ---------------- + * copy the node superclass fields + * ---------------- + */ + CopyPathFields((Path*)from, (Path*)newnode); + CopyJoinPathFields((JoinPath*)from, (JoinPath*)newnode); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, path_hashclauses); + Node_Copy(from, newnode, outerhashkeys); + Node_Copy(from, newnode, innerhashkeys); + + return newnode; +} + +/* ---------------- + * _copyOrderKey + * ---------------- + */ +static OrderKey * +_copyOrderKey(OrderKey *from) +{ + OrderKey *newnode = makeNode(OrderKey); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->attribute_number = from->attribute_number; + newnode->array_index = from->array_index; + + return newnode; +} + + +/* ---------------- + * _copyJoinKey + * ---------------- + */ +static JoinKey * +_copyJoinKey(JoinKey *from) +{ + JoinKey *newnode = makeNode(JoinKey); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, outer); + Node_Copy(from, newnode, inner); + + return newnode; +} + +/* ---------------- + * _copyMergeOrder + * ---------------- + */ +static MergeOrder * +_copyMergeOrder(MergeOrder *from) +{ + MergeOrder *newnode = makeNode(MergeOrder); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->join_operator = from->join_operator; + newnode->left_operator = from->left_operator; + newnode->right_operator = from->right_operator; + newnode->left_type = from->left_type; + newnode->right_type = from->right_type; + + return newnode; +} + +/* ---------------- + * _copyCInfo + * ---------------- + */ +static CInfo * +_copyCInfo(CInfo *from) +{ + CInfo *newnode = makeNode(CInfo); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, clause); + + newnode->selectivity = from->selectivity; + newnode->notclause = from->notclause; + + Node_Copy(from, newnode, indexids); + Node_Copy(from, newnode, mergesortorder); + newnode->hashjoinoperator = from->hashjoinoperator; + newnode->cinfojoinid = listCopy(from->cinfojoinid); + + return newnode; +} + +/* ---------------- + * CopyJoinMethodFields + * + * This function copies the fields of the JoinMethod node. It is used by + * all the copy functions for classes which inherit from JoinMethod. + * ---------------- + */ +static void +CopyJoinMethodFields(JoinMethod *from, JoinMethod *newnode) +{ + Node_Copy(from, newnode, jmkeys); + Node_Copy(from, newnode, clauses); + return; +} + +/* ---------------- + * _copyJoinMethod + * ---------------- + */ +static JoinMethod * +_copyJoinMethod(JoinMethod *from) +{ + JoinMethod *newnode = makeNode(JoinMethod); + + CopyJoinMethodFields(from, newnode); + + return newnode; +} + +/* ---------------- + * _copyHInfo + * ---------------- + */ +static HInfo * +_copyHInfo(HInfo *from) +{ + HInfo *newnode = makeNode(HInfo); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->hashop = from->hashop; + + return newnode; +} + +/* ---------------- + * _copyMInfo + * ---------------- + */ +static MInfo * +_copyMInfo(MInfo *from) +{ + MInfo *newnode = makeNode(MInfo); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, m_ordering); + + return newnode; +} + +/* ---------------- + * _copyJInfo + * ---------------- + */ +static JInfo * +_copyJInfo(JInfo *from) +{ + JInfo *newnode = makeNode(JInfo); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + newnode->otherrels = listCopy(from->otherrels); + Node_Copy(from, newnode, jinfoclauseinfo); + + newnode->mergesortable = from->mergesortable; + newnode->hashjoinable = from->hashjoinable; + newnode->inactive = from->inactive; + + return newnode; +} + +static Iter * +_copyIter(Iter *from) +{ + Iter *newnode = makeNode(Iter); + + Node_Copy(from, newnode, iterexpr); + newnode->itertype = from->itertype; + + return newnode; +} + +static Stream * +_copyStream(Stream *from) +{ + Stream *newnode = makeNode(Stream); + + newnode->pathptr = from->pathptr; + newnode->cinfo = from->cinfo; + newnode->clausetype = from->clausetype; + newnode->groupup = from->groupup; + newnode->groupcost = from->groupcost; + newnode->groupsel = from->groupsel; + newnode->upstream = (StreamPtr)NULL; /* only copy nodes downwards! */ + Node_Copy(from, newnode, downstream); + if (newnode->downstream) + ((Stream*)newnode->downstream)->upstream = (Stream*)newnode; + + return newnode; +} + +/* **************** + * parsenodes.h routines have no copy functions + * **************** + */ + +static TargetEntry * +_copyTargetEntry(TargetEntry *from) +{ + TargetEntry *newnode = makeNode(TargetEntry); + + Node_Copy(from, newnode, resdom); + Node_Copy(from, newnode, fjoin); + Node_Copy(from, newnode, expr); + return newnode; +} + +static RangeTblEntry * +_copyRangeTblEntry(RangeTblEntry *from) +{ + RangeTblEntry *newnode = makeNode(RangeTblEntry); + + *newnode = *from; /* ??? quick hack, be careful */ + + return newnode; +} + +static SortClause * +_copySortClause(SortClause *from) +{ + SortClause *newnode = makeNode(SortClause); + + Node_Copy(from, newnode, resdom); + newnode->opoid = from->opoid; + + return newnode; +} + +static Query * +_copyQuery(Query *from) +{ + Query *newnode = makeNode(Query); + + newnode->commandType = from->commandType; + newnode->resultRelation = from->resultRelation; + newnode->into = from->into; + newnode->isPortal = from->isPortal; + Node_Copy(from, newnode, rtable); + if (from->utilityStmt && nodeTag(from->utilityStmt) == T_NotifyStmt) { + NotifyStmt *from_notify = (NotifyStmt*)from->utilityStmt; + NotifyStmt *n = makeNode(NotifyStmt); + int length = strlen(from_notify->relname); + + n->relname = palloc(length + 1); + strcpy(n->relname,from_notify->relname); + newnode->utilityStmt = (Node*)n; + } + if (from->uniqueFlag) { + newnode->uniqueFlag = (char*)palloc(strlen(from->uniqueFlag)+1); + strcpy(newnode->uniqueFlag, from->uniqueFlag); + } + else + newnode->uniqueFlag = NULL; + Node_Copy(from, newnode, sortClause); + Node_Copy(from, newnode, targetList); + Node_Copy(from, newnode, qual); + + return newnode; +} + + +/* **************** + * mnodes.h routines have no copy functions + * **************** + */ + +/* **************************************************************** + * pg_list.h copy functions + * **************************************************************** + */ + +static Value * +_copyValue(Value *from) +{ + Value *newnode = makeNode(Value); + + newnode->type = from->type; + switch(from->type) { + case T_String: + newnode->val.str = pstrdup(from->val.str); + break; + case T_Integer: + newnode->val.ival = from->val.ival; + break; + case T_Float: + newnode->val.dval = from->val.dval; + break; + default: + break; + } + return newnode; +} + +/* ---------------- + * copyObject returns a copy of the node or list. If it is a list, it + * recursively copies its items. + * ---------------- + */ +void * +copyObject(void *from) +{ + void *retval; + + if (from==NULL) + return NULL; + switch(nodeTag(from)) { + /* + * PLAN NODES + */ + case T_Plan: + retval = _copyPlan(from); + break; + case T_Existential: + retval = _copyExistential(from); + break; + case T_Result: + retval = _copyResult(from); + break; + case T_Append: + retval = _copyAppend(from); + break; + case T_Scan: + retval = _copyScan(from); + break; + case T_SeqScan: + retval = _copySeqScan(from); + break; + case T_IndexScan: + retval = _copyIndexScan(from); + break; + case T_Join: + retval = _copyJoin(from); + break; + case T_NestLoop: + retval = _copyNestLoop(from); + break; + case T_MergeJoin: + retval = _copyMergeJoin(from); + break; + case T_HashJoin: + retval = _copyHashJoin(from); + break; + case T_Temp: + retval = _copyTemp(from); + break; + case T_Material: + retval = _copyMaterial(from); + break; + case T_Sort: + retval = _copySort(from); + break; + case T_Agg: + retval = _copyAgg(from); + break; + case T_Unique: + retval = _copyUnique(from); + break; + case T_Hash: + retval = _copyHash(from); + break; + + /* + * PRIMITIVE NODES + */ + case T_Resdom: + retval = _copyResdom(from); + break; + case T_Fjoin: + retval = _copyFjoin(from); + break; + case T_Expr: + retval = _copyExpr(from); + break; + case T_Var: + retval = _copyVar(from); + break; + case T_Oper: + retval = _copyOper(from); + break; + case T_Const: + retval = _copyConst(from); + break; + case T_Param: + retval = _copyParam(from); + break; + case T_Func: + retval = _copyFunc(from); + break; + case T_Array: + retval = _copyArray(from); + break; + case T_ArrayRef: + retval = _copyArrayRef(from); + break; + case T_Aggreg: + retval = _copyAggreg(from); + break; + /* + * RELATION NODES + */ + case T_Rel: + retval = _copyRel(from); + break; + case T_Path: + retval = _copyPath(from); + break; + case T_IndexPath: + retval = _copyIndexPath(from); + break; + case T_JoinPath: + retval = _copyJoinPath(from); + break; + case T_MergePath: + retval = _copyMergePath(from); + break; + case T_HashPath: + retval = _copyHashPath(from); + break; + case T_OrderKey: + retval = _copyOrderKey(from); + break; + case T_JoinKey: + retval = _copyJoinKey(from); + break; + case T_MergeOrder: + retval = _copyMergeOrder(from); + break; + case T_CInfo: + retval = _copyCInfo(from); + break; + case T_JoinMethod: + retval = _copyJoinMethod(from); + break; + case T_HInfo: + retval = _copyHInfo(from); + break; + case T_MInfo: + retval = _copyMInfo(from); + break; + case T_JInfo: + retval = _copyJInfo(from); + break; + case T_Iter: + retval = _copyIter(from); + break; + case T_Stream: + retval = _copyStream(from); + break; + + /* + * PARSE NODES + */ + case T_Query: + retval = _copyQuery(from); + break; + case T_TargetEntry: + retval = _copyTargetEntry(from); + break; + case T_RangeTblEntry: + retval = _copyRangeTblEntry(from); + break; + case T_SortClause: + retval = _copySortClause(from); + break; + + /* + * VALUE NODES + */ + case T_Integer: case T_String: case T_Float: + retval = _copyValue(from); + break; + case T_List: + { + List *list=from, *l; + List *newlist = NIL, *nl; + foreach(l, list) { + if (newlist==NIL) { + newlist = nl = lcons(copyObject(lfirst(l)),NIL); + }else { + lnext(nl) = lcons(copyObject(lfirst(l)),NIL); + nl = lnext(nl); + } + } + retval = newlist; + } + break; + default: + elog(NOTICE, "copyObject: don't know how to copy %d", nodeTag(from)); + retval = from; + break; + } + return retval; +} + diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c new file mode 100644 index 00000000000..d2dbef2bea2 --- /dev/null +++ b/src/backend/nodes/equalfuncs.c @@ -0,0 +1,703 @@ +/*------------------------------------------------------------------------- + * + * equalfuncs.c-- + * equal functions to compare the nodes + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/equalfuncs.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/nodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" + +#include "utils/builtins.h" /* for namestrcmp() */ +#include "utils/datum.h" +#include "utils/elog.h" +#include "storage/itemptr.h" + +/* + * Stuff from primnodes.h + */ + +/* + * Resdom is a subclass of Node. + */ +static bool +_equalResdom(Resdom *a, Resdom *b) +{ + if (a->resno != b->resno) + return (false); + if (a->restype != b->restype) + return (false); + if (a->reslen != b->reslen) + return (false); + if (strcmp(a->resname, b->resname) != 0) + return (false); + if (a->reskey != b->reskey) + return (false); + if (a->reskeyop != b->reskeyop) + return (false); + + return (true); +} + +static bool +_equalFjoin(Fjoin *a, Fjoin *b) +{ + int nNodes; + + if (a->fj_initialized != b->fj_initialized) + return (false); + if (a->fj_nNodes != b->fj_nNodes) + return (false); + if (!equal(a->fj_innerNode, b->fj_innerNode)) + return (false); + + nNodes = a->fj_nNodes; + if (memcmp(a->fj_results, b->fj_results, nNodes*sizeof(Datum)) != 0) + return (false); + if (memcmp(a->fj_alwaysDone, b->fj_alwaysDone, nNodes*sizeof(bool)) != 0) + return (false); + + return(true); +} + +/* + * Expr is a subclass of Node. + */ +static bool +_equalExpr(Expr *a, Expr *b) +{ + if (a->opType != b->opType) + return (false); + if (!equal(a->oper, b->oper)) + return (false); + if (!equal(a->args, b->args)) + return (false); + + return (true); +} + +bool _equalIter(Iter *a, Iter *b) +{ + return (equal(a->iterexpr, b->iterexpr)); +} + +static bool +_equalStream(Stream *a, Stream *b) +{ + if (a->clausetype != b->clausetype) + return(false); + if (a->groupup != b->groupup) + return(false); + if (a->groupcost != b->groupcost) + return(false); + if (a->groupsel != b->groupsel) + return(false); + if (!equal(a->pathptr, b->pathptr)) + return(false); + if (!equal(a->cinfo, b->cinfo)) + return(false); + if (!equal(a->upstream, b->upstream)) + return(false); + return(equal(a->downstream, b->downstream)); +} + +/* + * Var is a subclass of Expr. + */ +static bool +_equalVar(Var *a, Var *b) +{ + if (a->varno != b->varno) + return (false); + if (a->varattno != b->varattno) + return (false); + if (a->vartype != b->vartype) + return (false); + if (a->varnoold != b->varnoold) + return (false); + if (a->varoattno != b->varoattno) + return (false); + + return (true); +} + +static bool +_equalArray(Array *a, Array *b) +{ + if (a->arrayelemtype != b->arrayelemtype) + return (false); + if (a->arrayndim != b->arrayndim) + return (false); + if (a->arraylow.indx[0] != b->arraylow.indx[0]) + return (false); + if (a->arrayhigh.indx[0] != b->arrayhigh.indx[0]) + return (false); + if (a->arraylen != b->arraylen) + return (false); + return(TRUE); +} + +static bool +_equalArrayRef(ArrayRef *a, ArrayRef *b) +{ + if (a->refelemtype != b->refelemtype) + return (false); + if (a->refattrlength != b->refattrlength) + return (false); + if (a->refelemlength != b->refelemlength) + return (false); + if (a->refelembyval != b->refelembyval) + return (false); + if (!equal(a->refupperindexpr, b->refupperindexpr)) + return (false); + if (!equal(a->reflowerindexpr, b->reflowerindexpr)) + return (false); + if (!equal(a->refexpr, b->refexpr)) + return (false); + return (equal(a->refassgnexpr, b->refassgnexpr)); +} + +/* + * Oper is a subclass of Expr. + */ +static bool +_equalOper(Oper *a, Oper *b) +{ + if (a->opno != b->opno) + return (false); + if (a->opresulttype != b->opresulttype) + return (false); + + return (true); +} + +/* + * Const is a subclass of Expr. + */ +static bool +_equalConst(Const *a, Const *b) +{ + /* + ** this function used to do a pointer compare on a and b. That's + ** ridiculous. -- JMH, 7/11/92 + */ + if (a->consttype != b->consttype) + return(false); + if (a->constlen != b->constlen) + return(false); + if (a->constisnull != b->constisnull) + return(false); + if (a->constbyval != b->constbyval) + return(false); + return(datumIsEqual(a->constvalue, b->constvalue, + a->consttype, a->constbyval, a->constlen)); +} + +/* + * Param is a subclass of Expr. + */ +static bool +_equalParam(Param *a, Param *b) +{ + if (a->paramkind != b->paramkind) + return (false); + if (a->paramtype != b->paramtype) + return (false); + if (!equal(a->param_tlist, b->param_tlist)) + return (false); + + switch (a->paramkind) { + case PARAM_NAMED: + case PARAM_NEW: + case PARAM_OLD: + if (strcmp(a->paramname, b->paramname) != 0) + return (false); + break; + case PARAM_NUM: + if (a->paramid != b->paramid) + return (false); + break; + case PARAM_INVALID: + /* + * XXX: Hmmm... What are we supposed to return + * in this case ?? + */ + return(true); + break; + default: + elog(WARN, "_equalParam: Invalid paramkind value: %d", + a->paramkind); + } + + return (true); +} + +/* + * Func is a subclass of Expr. + */ +static bool +_equalFunc(Func *a, Func *b) +{ + if (a->funcid != b->funcid) + return (false); + if (a->functype != b->functype) + return (false); + if (a->funcisindex != b->funcisindex) + return (false); + if (a->funcsize != b->funcsize) + return (false); + if (!equal(a->func_tlist, b->func_tlist)) + return (false); + if (!equal(a->func_planlist, b->func_planlist)) + return (false); + + return (true); +} + +/* + * CInfo is a subclass of Node. + */ +static bool +_equalCInfo(CInfo *a, CInfo *b) +{ + Assert(IsA(a,CInfo)); + Assert(IsA(b,CInfo)); + + if (!equal(a->clause, b->clause)) + return(false); + if (a->selectivity != b->selectivity) + return(false); + if (a->notclause != b->notclause) + return(false); +#ifdef EqualMergeOrderExists + if (!EqualMergeOrder(a->mergesortorder,b->mergesortorder)) + return(false); +#endif + if(a->hashjoinoperator != b->hashjoinoperator) + return(false); + return(equal((a->indexids), + (b->indexids))); +} + +static bool +_equalJoinMethod(JoinMethod *a, JoinMethod *b) +{ + Assert(IsA(a,JoinMethod)); + Assert(IsA(b,JoinMethod)); + + if (!equal((a->jmkeys), + (b->jmkeys))) + return(false); + if (!equal((a->clauses), + (b->clauses))) + return(false); + return(true); +} + +static bool +_equalPath(Path *a, Path *b) +{ + if (a->pathtype != b->pathtype) + return(false); + if (a->parent != b->parent) + return(false); + /* + if (a->path_cost != b->path_cost) + return(false); + */ + if (a->p_ordering.ordtype == SORTOP_ORDER) { + int i = 0; + if (a->p_ordering.ord.sortop==NULL || + b->p_ordering.ord.sortop==NULL) { + + if (a->p_ordering.ord.sortop != b->p_ordering.ord.sortop) + return false; + } else { + while(a->p_ordering.ord.sortop[i]!=0 && + b->p_ordering.ord.sortop[i]!=0) { + if (a->p_ordering.ord.sortop[i] != b->p_ordering.ord.sortop[i]) + return false; + i++; + } + if (a->p_ordering.ord.sortop[i]!=0 || + b->p_ordering.ord.sortop[i]!=0) + return false; + } + } else { + if (!equal((a->p_ordering.ord.merge), + (b->p_ordering.ord.merge))) + return(false); + } + if (!equal((a->keys), + (b->keys))) + return(false); + /* + if (a->outerjoincost != b->outerjoincost) + return(false); + */ + if (!equali((a->joinid), + (b->joinid))) + return(false); + return(true); +} + +static bool +_equalIndexPath(IndexPath *a, IndexPath *b) +{ + if (!_equalPath((Path*)a,(Path*)b)) + return(false); + if (!equali((a->indexid), (b->indexid))) + return(false); + if (!equal((a->indexqual), (b->indexqual))) + return(false); + return(true); +} + +static bool +_equalJoinPath(JoinPath *a,JoinPath *b) +{ + Assert(IsA_JoinPath(a)); + Assert(IsA_JoinPath(b)); + + if (!_equalPath((Path*)a,(Path*)b)) + return(false); + if (!equal((a->pathclauseinfo), (b->pathclauseinfo))) + return(false); + if (!equal((a->outerjoinpath), (b->outerjoinpath))) + return(false); + if (!equal((a->innerjoinpath), (b->innerjoinpath))) + return(false); + return(true); +} + +static bool +_equalMergePath(MergePath *a, MergePath *b) +{ + Assert(IsA(a,MergePath)); + Assert(IsA(b,MergePath)); + + if (!_equalJoinPath((JoinPath*)a,(JoinPath*)b)) + return(false); + if (!equal((a->path_mergeclauses), (b->path_mergeclauses))) + return(false); + if (!equal((a->outersortkeys), (b->outersortkeys))) + return(false); + if (!equal((a->innersortkeys), (b->innersortkeys))) + return(false); + return(true); +} + +static bool +_equalHashPath(HashPath *a, HashPath *b) +{ + Assert(IsA(a,HashPath)); + Assert(IsA(b,HashPath)); + + if (!_equalJoinPath((JoinPath*)a,(JoinPath*)b)) + return(false); + if (!equal((a->path_hashclauses), (b->path_hashclauses))) + return(false); + if (!equal((a->outerhashkeys), (b->outerhashkeys))) + return(false); + if (!equal((a->innerhashkeys), (b->innerhashkeys))) + return(false); + return(true); +} + +static bool +_equalJoinKey(JoinKey *a, JoinKey *b) +{ + Assert(IsA(a,JoinKey)); + Assert(IsA(b,JoinKey)); + + if (!equal((a->outer),(b->outer))) + return(false); + if (!equal((a->inner),(b->inner))) + return(false); + return(true); +} + +static bool +_equalMergeOrder(MergeOrder *a, MergeOrder *b) +{ + if (a == (MergeOrder*)NULL && b == (MergeOrder*)NULL) + return(true); + Assert(IsA(a,MergeOrder)); + Assert(IsA(b,MergeOrder)); + + if (a->join_operator != b->join_operator) + return(false); + if (a->left_operator != b->left_operator) + return(false); + if (a->right_operator != b->right_operator) + return(false); + if (a->left_type != b->left_type) + return(false); + if (a->right_type != b->right_type) + return(false); + return(true); +} + +static bool +_equalHInfo(HInfo *a, HInfo *b) +{ + Assert(IsA(a,HInfo)); + Assert(IsA(b,HInfo)); + + if (a->hashop != b->hashop) + return(false); + return(true); +} + +/* XXX This equality function is a quick hack, should be + * fixed to compare all fields. + */ +static bool +_equalIndexScan(IndexScan *a, IndexScan *b) +{ + Assert(IsA(a,IndexScan)); + Assert(IsA(b,IndexScan)); + + /* + if(a->scan.plan.cost != b->scan.plan.cost) + return(false); + */ + + if (!equal((a->indxqual),(b->indxqual))) + return(false); + + if (a->scan.scanrelid != b->scan.scanrelid) + return(false); + + if (!equali((a->indxid),(b->indxid))) + return(false); + return(true); +} + +static bool +_equalJInfo(JInfo *a, JInfo *b) +{ + Assert(IsA(a,JInfo)); + Assert(IsA(b,JInfo)); + if (!equal((a->otherrels),(b->otherrels))) + return(false); + if (!equal((a->jinfoclauseinfo),(b->jinfoclauseinfo))) + return(false); + if (a->mergesortable != b->mergesortable) + return(false); + if (a->hashjoinable != b->hashjoinable) + return(false); + return(true); +} + +/* + * Stuff from execnodes.h + */ + +/* + * EState is a subclass of Node. + */ +static bool +_equalEState(EState *a, EState *b) +{ + if (a->es_direction != b->es_direction) + return (false); + + if (!equal(a->es_range_table, b->es_range_table)) + return (false); + + if (a->es_result_relation_info != b->es_result_relation_info) + return (false); + + return (true); +} + + +/* + * equal -- are two lists equal? + * + * This is a comparison by value. It would be simpler to write it + * to be recursive, but it should run faster if we iterate. + */ +static bool +_equalValue(Value *a, Value *b) +{ + if (a->type != b->type) + return (false); + + switch(a->type) { + case T_String: + return strcmp(a->val.str, b->val.str); + case T_Integer: + return (a->val.ival==b->val.ival); + case T_Float: + return (a->val.dval==b->val.dval); + default: + break; + } + + return (true); +} + +/* + * equal-- + * returns whether two nodes are equal + */ +bool +equal(void *a, void *b) +{ + bool retval; + + if (a == b) + return(true); + /* + * note that a!=b, so only one of them can be NULL + */ + if (a==NULL || b==NULL) + return (false); + /* + * are they the same type of nodes? + */ + if (nodeTag(a)!=nodeTag(b)) + return (false); + + switch(nodeTag(a)) { + case T_Resdom: + retval = _equalResdom(a, b); + break; + case T_Fjoin: + retval = _equalFjoin(a, b); + break; + case T_Expr: + retval = _equalExpr(a, b); + break; + case T_Iter: + retval = _equalIter(a, b); + break; + case T_Stream: + retval = _equalStream(a, b); + break; + case T_Var: + retval = _equalVar(a, b); + break; + case T_Array: + retval = _equalArray(a, b); + break; + case T_ArrayRef: + retval = _equalArrayRef(a, b); + break; + case T_Oper: + retval = _equalOper(a, b); + break; + case T_Const: + retval = _equalConst(a, b); + break; + case T_Param: + retval = _equalParam(a, b); + break; + case T_Func: + retval = _equalFunc(a, b); + break; + case T_CInfo: + retval = _equalCInfo(a, b); + break; + case T_JoinMethod: + retval = _equalJoinMethod(a, b); + break; + case T_Path: + retval = _equalPath(a, b); + break; + case T_IndexPath: + retval = _equalIndexPath(a, b); + break; + case T_JoinPath: + retval = _equalJoinPath(a, b); + break; + case T_MergePath: + retval = _equalMergePath(a, b); + break; + case T_HashPath: + retval = _equalHashPath(a, b); + break; + case T_JoinKey: + retval = _equalJoinKey(a, b); + break; + case T_MergeOrder: + retval = _equalMergeOrder(a, b); + break; + case T_HInfo: + retval = _equalHInfo(a, b); + break; + case T_IndexScan: + retval = _equalIndexScan(a, b); + break; + case T_JInfo: + retval = _equalJInfo(a, b); + break; + case T_EState: + retval = _equalEState(a, b); + break; + case T_Integer: case T_String: case T_Float: + retval = _equalValue(a, b); + break; + case T_List: + { + List *la = (List*)a; + List *lb = (List*)b; + List *l; + + if (a==NULL && b==NULL) + return (true); + if (length(a)!=length(b)) + return (false); + foreach(l, la) { + if (!equal(lfirst(l), lfirst(lb))) + return (false); + lb = lnext(lb); + } + retval = true; + } + break; + default: + elog(NOTICE, "equal: don't know whether nodes of type %d are equal", + nodeTag(a)); + break; + } + + return retval; +} + +/* + * equali-- + * compares two lists of integers + * + * XXX temp hack. needs something like T_IntList + */ +bool equali(List *a, List *b) +{ + List *la = (List*)a; + List *lb = (List*)b; + List *l; + + if (a==NULL && b==NULL) + return (true); + if (length(a)!=length(b)) + return (false); + foreach(l, la) { + if (lfirsti(l) != lfirsti(lb)) + return (false); + lb = lnext(lb); + } + return true; +} diff --git a/src/backend/nodes/execnodes.h b/src/backend/nodes/execnodes.h new file mode 100644 index 00000000000..6fb093a2e2a --- /dev/null +++ b/src/backend/nodes/execnodes.h @@ -0,0 +1,689 @@ +/*------------------------------------------------------------------------- + * + * execnodes.h-- + * definitions for executor state nodes + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: execnodes.h,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef EXECNODES_H +#define EXECNODES_H + +#include "postgres.h" + +#include "nodes/nodes.h" +#include "nodes/primnodes.h" +#include "nodes/pg_list.h" + +#include "nodes/memnodes.h" + +#include "storage/item.h" +#include "access/sdir.h" +#include "access/htup.h" +#include "access/tupdesc.h" +#include "access/funcindex.h" +#include "utils/rel.h" +#include "access/relscan.h" +#include "executor/hashjoin.h" +#include "executor/tuptable.h" + +/* ---------------- + * IndexInfo information + * + * this class holds the information saying what attributes + * are the key attributes for this index. -cim 10/15/89 + * + * NumKeyAttributes number of key attributes for this index + * KeyAttributeNumbers array of attribute numbers used as keys + * Predicate partial-index predicate for this index + * ---------------- + */ +typedef struct IndexInfo { + NodeTag type; + int ii_NumKeyAttributes; + AttrNumber *ii_KeyAttributeNumbers; + FuncIndexInfoPtr ii_FuncIndexInfo; + Node *ii_Predicate; +} IndexInfo; + +/* ---------------- + * RelationInfo information + * + * whenever we update an existing relation, we have to + * update indices on the relation. The RelationInfo class + * is used to hold all the information on result relations, + * including indices.. -cim 10/15/89 + * + * RangeTableIndex result relation's range table index + * RelationDesc relation descriptor for result relation + * NumIndices number indices existing on result relation + * IndexRelationDescs array of relation descriptors for indices + * IndexRelationInfo array of key/attr info for indices + * ---------------- + */ +typedef struct RelationInfo { + NodeTag type; + Index ri_RangeTableIndex; + Relation ri_RelationDesc; + int ri_NumIndices; + RelationPtr ri_IndexRelationDescs; + IndexInfo **ri_IndexRelationInfo; +} RelationInfo; + +/* ---------------- + * ExprContext + * + * This class holds the "current context" information + * needed to evaluate expressions for doing tuple qualifications + * and tuple projections. For example, if an expression refers + * to an attribute in the current inner tuple then we need to know + * what the current inner tuple is and so we look at the expression + * context. + * ---------------- + */ +typedef struct ExprContext { + NodeTag type; + TupleTableSlot *ecxt_scantuple; + TupleTableSlot *ecxt_innertuple; + TupleTableSlot *ecxt_outertuple; + Relation ecxt_relation; + Index ecxt_relid; + ParamListInfo ecxt_param_list_info; + List *ecxt_range_table; + Datum *ecxt_values; /* precomputed values for aggreg */ + char *ecxt_nulls; /* null flags for aggreg values */ +} ExprContext; + +/* ---------------- + * ProjectionInfo node information + * + * This is all the information needed to preform projections + * on a tuple. Nodes which need to do projections create one + * of these. In theory, when a node wants to preform a projection + * it should just update this information as necessary and then + * call ExecProject(). -cim 6/3/91 + * + * targetlist target list for projection + * len length of target list + * tupValue array of pointers to projection results + * exprContext expression context for ExecTargetList + * slot slot to place projection result in + * ---------------- + */ +typedef struct ProjectionInfo { + NodeTag type; + List *pi_targetlist; + int pi_len; + Datum *pi_tupValue; + ExprContext *pi_exprContext; + TupleTableSlot *pi_slot; +} ProjectionInfo; + +/* ---------------- + * JunkFilter + * + * this class is used to store information regarding junk attributes. + * A junk attribute is an attribute in a tuple that is needed only for + * storing intermediate information in the executor, and does not belong + * in the tuple proper. For example, when we do a delete or replace + * query, the planner adds an entry to the targetlist so that the tuples + * returned to ExecutePlan() contain an extra attribute: the t_ctid of + * the tuple to be deleted/replaced. This is needed for amdelete() and + * amreplace(). In doing a delete this does not make much of a + * difference, but in doing a replace we have to make sure we disgard + * all the junk in a tuple before calling amreplace(). Otherwise the + * inserted tuple will not have the correct schema. This solves a + * problem with hash-join and merge-sort replace plans. -cim 10/10/90 + * + * targetList: the original target list (including junk attributes). + * length: the length of 'targetList'. + * tupType: the tuple descriptor for the "original" tuple + * (including the junk attributes). + * cleanTargetList: the "clean" target list (junk attributes removed). + * cleanLength: the length of 'cleanTargetList' + * cleanTupTyp: the tuple descriptor of the "clean" tuple (with + * junk attributes removed). + * cleanMap: A map with the correspondance between the non junk + * attributes of the "original" tuple and the + * attributes of the "clean" tuple. + * ---------------- + */ +typedef struct JunkFilter { + NodeTag type; + List *jf_targetList; + int jf_length; + TupleDesc jf_tupType; + List *jf_cleanTargetList; + int jf_cleanLength; + TupleDesc jf_cleanTupType; + AttrNumber *jf_cleanMap; +} JunkFilter; + +/* ---------------- + * EState information + * + * direction direction of the scan + * + * range_table array of scan relation information + * + * result_relation_information for update queries + * + * into_relation_descriptor relation being retrieved "into" + * + * param_list_info information needed to transform + * Param nodes into Const nodes + * + * BaseId during InitPlan(), each node is + * given a number. this is the next + * number to be assigned. + * + * tupleTable this is a pointer to an array + * of pointers to tuples used by + * the executor at any given moment. + * + * junkFilter contains information used to + * extract junk attributes from a tuple. + * (see JunkFilter above) + * + * refcount local buffer refcounts used in + * an ExecMain cycle. this is introduced + * to avoid ExecStart's unpinning each + * other's buffers when called recursively + * ---------------- + */ +typedef struct EState { + NodeTag type; + ScanDirection es_direction; + List *es_range_table; + RelationInfo *es_result_relation_info; + Relation es_into_relation_descriptor; + ParamListInfo es_param_list_info; + int es_BaseId; + TupleTable es_tupleTable; + JunkFilter *es_junkFilter; + int *es_refcount; +} EState; + +/* ---------------- + * Executor Type information needed by plannodes.h + * + *| Note: the bogus classes CommonState and CommonScanState exist only + *| because our inheritance system only allows single inheritance + *| and we have to have unique slot names. Hence two or more + *| classes which want to have a common slot must ALL inherit + *| the slot from some other class. (This is a big hack to + *| allow our classes to share slot names..) + *| + *| Example: + *| the class Result and the class NestLoop nodes both want + *| a slot called "OuterTuple" so they both have to inherit + *| it from some other class. In this case they inherit + *| it from CommonState. "CommonState" and "CommonScanState" are + *| the best names I could come up with for this sort of + *| stuff. + *| + *| As a result, many classes have extra slots which they + *| don't use. These slots are denoted (unused) in the + *| comment preceeding the class definition. If you + *| comes up with a better idea of a way of doing things + *| along these lines, then feel free to make your idea + *| known to me.. -cim 10/15/89 + * ---------------- + */ + +/* ---------------------------------------------------------------- + * Common Executor State Information + * ---------------------------------------------------------------- + */ + +/* BaseNode removed -- base_id moved into CommonState - jolly */ + +/* ---------------- + * CommonState information + * + *| this is a bogus class used to hold slots so other + *| nodes can inherit them... + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * + * ---------------- + */ +typedef struct CommonState { + NodeTag type; /* its first field is NodeTag */ + int cs_base_id; + TupleTableSlot *cs_OuterTupleSlot; + TupleTableSlot *cs_ResultTupleSlot; + ExprContext *cs_ExprContext; + ProjectionInfo *cs_ProjInfo; + bool cs_TupFromTlist; +} CommonState; + + +/* ---------------------------------------------------------------- + * Control Node State Information + * ---------------------------------------------------------------- + */ + +/* ---------------- + * ResultState information + * + * done flag which tells us to quit when we + * have already returned a constant tuple. + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct ResultState { + CommonState cstate; /* its first field is NodeTag */ + int rs_done; +} ResultState; + +/* ---------------- + * AppendState information + * + * append nodes have this field "unionplans" which is this + * list of plans to execute in sequence.. these variables + * keep track of things.. + * + * whichplan which plan is being executed + * nplans how many plans are in the list + * initialized array of ExecInitNode() results + * rtentries range table for the current plan + * result_relation_info_list array of each subplan's result relation info + * junkFilter_list array of each subplan's junk filter + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct AppendState { + CommonState cstate; /* its first field is NodeTag */ + int as_whichplan; + int as_nplans; + bool *as_initialized; + List *as_rtentries; + List *as_result_relation_info_list; + List *as_junkFilter_list; +} AppendState; + +/* ---------------------------------------------------------------- + * Scan State Information + * ---------------------------------------------------------------- + */ + +/* ---------------- + * CommonScanState information + * + * CommonScanState is a class like CommonState, but is used more + * by the nodes like SeqScan and Sort which want to + * keep track of an underlying relation. + * + * currentRelation relation being scanned + * currentScanDesc current scan descriptor for scan + * ScanTupleSlot pointer to slot in tuple table holding scan tuple + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct CommonScanState { + CommonState cstate; /* its first field is NodeTag */ + Relation css_currentRelation; + HeapScanDesc css_currentScanDesc; + TupleTableSlot *css_ScanTupleSlot; +} CommonScanState; + +/* ---------------- + * IndexScanState information + * + *| index scans don't use CommonScanState because + *| the underlying AM abstractions for heap scans and + *| index scans are too different.. It would be nice + *| if the current abstraction was more useful but ... -cim 10/15/89 + * + * IndexPtr current index in use + * NumIndices number of indices in this scan + * ScanKeys Skey structures to scan index rels + * NumScanKeys array of no of keys in each Skey struct + * RuntimeKeyInfo array of array of flags for Skeys evaled at runtime + * RelationDescs ptr to array of relation descriptors + * ScanDescs ptr to array of scan descriptors + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct IndexScanState { + CommonState cstate; /* its first field is NodeTag */ + int iss_NumIndices; + int iss_IndexPtr; + ScanKey *iss_ScanKeys; + int *iss_NumScanKeys; + Pointer iss_RuntimeKeyInfo; + RelationPtr iss_RelationDescs; + IndexScanDescPtr iss_ScanDescs; +} IndexScanState; + + +/* ---------------------------------------------------------------- + * Join State Information + * ---------------------------------------------------------------- + */ + +/* ---------------- + * JoinState information + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef CommonState JoinState; + +/* ---------------- + * NestLoopState information + * + * PortalFlag Set to enable portals to work. + * + * JoinState information + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct NestLoopState { + JoinState jstate; /* its first field is NodeTag */ + bool nl_PortalFlag; +} NestLoopState; + +/* ---------------- + * MergeJoinState information + * + * OSortopI outerKey1 sortOp innerKey1 ... + * ISortopO innerkey1 sortOp outerkey1 ... + * JoinState current "state" of join. see executor.h + * MarkedTupleSlot pointer to slot in tuple table for marked tuple + * + * JoinState information + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct MergeJoinState { + JoinState jstate; /* its first field is NodeTag */ + List *mj_OSortopI; + List *mj_ISortopO; + int mj_JoinState; + TupleTableSlot *mj_MarkedTupleSlot; +} MergeJoinState; + +/* ---------------- + * HashJoinState information + * + * hj_HashTable address of the hash table for the hashjoin + * hj_HashTableShmId shared memory id of hash table + * hj_CurBucket the current hash bucket that we are searching + * for matches of the current outer tuple + * hj_CurTuple the current matching inner tuple in the + * current hash bucket + * hj_CurOTuple the current matching inner tuple in the + * current hash overflow chain + * hj_InnerHashKey the inner hash key in the hashjoin condition + * hj_OuterBatches file descriptors for outer batches + * hj_InnerBatches file descriptors for inner batches + * hj_OuterReadPos current read position of outer batch + * hj_OuterReadBlk current read block of outer batch + * hj_OuterTupleSlot tuple slot for outer tuples + * hj_HashTupleSlot tuple slot for hashed tuples + * + * + * + * JoinState information + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct HashJoinState { + JoinState jstate; /* its first field is NodeTag */ + HashJoinTable hj_HashTable; + IpcMemoryId hj_HashTableShmId; + HashBucket hj_CurBucket; + HeapTuple hj_CurTuple; + OverflowTuple hj_CurOTuple; + Var *hj_InnerHashKey; + File *hj_OuterBatches; + File *hj_InnerBatches; + char *hj_OuterReadPos; + int hj_OuterReadBlk; + TupleTableSlot *hj_OuterTupleSlot; + TupleTableSlot *hj_HashTupleSlot; +} HashJoinState; + + +/* ---------------------------------------------------------------- + * Materialization State Information + * ---------------------------------------------------------------- + */ + +/* ---------------- + * MaterialState information + * + * materialize nodes are used to materialize the results + * of a subplan into a temporary relation. + * + * Flag indicated whether subplan has been materialized + * TempRelation temporary relation containing result of executing + * the subplan. + * + * CommonScanState information + * + * currentRelation relation descriptor of sorted relation + * currentScanDesc current scan descriptor for scan + * ScanTupleSlot pointer to slot in tuple table holding scan tuple + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct MaterialState { + CommonScanState csstate; /* its first field is NodeTag */ + bool mat_Flag; + Relation mat_TempRelation; +} MaterialState; + +/* --------------------- + * AggregateState information + * + * done indicated whether aggregate has been materialized + * ------------------------- + */ +typedef struct AggState { + CommonScanState csstate; /* its first field is NodeTag */ + bool agg_done; +} AggState; + +/* --------------------- + * GroupState information + * + * ------------------------- + */ +typedef struct GroupState { + CommonScanState csstate; /* its first field is NodeTag */ + bool grp_useLastTuple; /* last tuple not processed yet */ + bool grp_done; + TupleTableSlot *grp_lastSlot; +} GroupState; + +/* ---------------- + * SortState information + * + *| sort nodes are really just a kind of a scan since + *| we implement sorts by retrieveing the entire subplan + *| into a temp relation, sorting the temp relation into + *| another sorted relation, and then preforming a simple + *| unqualified sequential scan on the sorted relation.. + *| -cim 10/15/89 + * + * Flag indicated whether relation has been sorted + * Keys scan key structures used to keep info on sort keys + * TempRelation temporary relation containing result of executing + * the subplan. + * + * CommonScanState information + * + * currentRelation relation descriptor of sorted relation + * currentScanDesc current scan descriptor for scan + * ScanTupleSlot pointer to slot in tuple table holding scan tuple + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct SortState { + CommonScanState csstate; /* its first field is NodeTag */ + bool sort_Flag; + ScanKey sort_Keys; + Relation sort_TempRelation; +} SortState; + +/* ---------------- + * UniqueState information + * + * Unique nodes are used "on top of" sort nodes to discard + * duplicate tuples returned from the sort phase. Basically + * all it does is compare the current tuple from the subplan + * with the previously fetched tuple stored in OuterTuple and + * if the two are identical, then we just fetch another tuple + * from the sort and try again. + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef CommonState UniqueState; + + +/* ---------------- + * HashState information + * + * hashBatches file descriptors for the batches + * + * CommonState information + * + * OuterTupleSlot pointer to slot containing current "outer" tuple + * ResultTupleSlot pointer to slot in tuple table for projected tuple + * ExprContext node's current expression context + * ProjInfo info this node uses to form tuple projections + * NumScanAttributes size of ScanAttributes array + * ScanAttributes attribute numbers of interest in this tuple + * ---------------- + */ +typedef struct HashState { + CommonState cstate; /* its first field is NodeTag */ + File *hashBatches; +} HashState; + +/* ----------------------- + * TeeState information + * leftPlace : next item in the queue unseen by the left parent + * rightPlace : next item in the queue unseen by the right parent + * lastPlace : last item in the queue + * bufferRelname : name of the relation used as the buffer queue + * bufferRel : the relation used as the buffer queue + * mcxt : for now, tee's have their own memory context + * may be cleaned up later if portals are cleaned up + * + * initially, a Tee starts with [left/right]Place variables set to -1. + * on cleanup, queue is free'd when both leftPlace and rightPlace = -1 + * ------------------------- +*/ +typedef struct TeeState { + CommonState cstate; /* its first field is NodeTag */ + int tee_leftPlace; + int tee_rightPlace; + int tee_lastPlace; + char *tee_bufferRelname; + Relation tee_bufferRel; + MemoryContext tee_mcxt; + HeapScanDesc tee_leftScanDesc; + HeapScanDesc tee_rightScanDesc; +} TeeState; + +#endif /* EXECNODES_H */ diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c new file mode 100644 index 00000000000..20617747c25 --- /dev/null +++ b/src/backend/nodes/list.c @@ -0,0 +1,438 @@ +/*------------------------------------------------------------------------- + * + * list.c-- + * various list handling routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/list.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + * NOTES + * XXX a few of the following functions are duplicated to handle + * List of pointers and List of integers separately. Some day, + * someone should unify them. - ay 11/2/94 + * This file needs cleanup. + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Oct, 1994 file creation + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "utils/builtins.h" /* for namecpy */ +#include "utils/elog.h" +#include "utils/palloc.h" + +List * +makeList(void *elem, ...) +{ + va_list args; + List *retval = NIL; + List *temp = NIL; + List *tempcons = NIL; + + va_start(args, elem); + + temp = elem; + while (temp != (void *) -1) { + temp = lcons(temp, NIL); + if (tempcons == NIL) + retval = temp; + else + lnext(tempcons) = temp; + tempcons = temp; + + temp = va_arg(args, void *); + } + + va_end(args); + + return (retval); +} + +List * +lcons(void *datum, List *list) +{ + List *l = makeNode(List); + lfirst(l) = datum; + lnext(l) = list; + return l; +} + +List * +lappend(List *list, void *obj) +{ + return nconc(list, lcons(obj, NIL)); +} + +Value * +makeInteger(long i) +{ + Value *v = makeNode(Value); + v->type = T_Integer; + v->val.ival = i; + return v; +} + +Value * +makeFloat(double d) +{ + Value *v = makeNode(Value); + v->type = T_Float; + v->val.dval = d; + return v; +} + +Value * +makeString(char *str) +{ + Value *v = makeNode(Value); + v->type = T_String; + v->val.str = str; + return v; +} + +/* n starts with 0 */ +void * +nth(int n, List *l) +{ + /* XXX assume list is long enough */ + while(n > 0) { + l = lnext(l); + n--; + } + return lfirst(l); +} + +/* this is here solely for rt_store. Get rid of me some day! */ +void +set_nth(List *l, int n, void *elem) +{ + /* XXX assume list is long enough */ + while(n > 0) { + l = lnext(l); + n--; + } + lfirst(l) = elem; + return; +} + +int +length(List *l) +{ + int i=0; + while(l!=NIL) { + l = lnext(l); + i++; + } + return i; +} + +void +freeList(List *list) +{ + while(list!=NIL) { + List *l = list; + list = lnext(list); + pfree(l); + } +} + +/* + * below are for backwards compatibility + */ +List * +append(List *l1, List *l2) +{ + List *newlist, *newlist2, *p; + + if (l1==NIL) + return copyObject(l2); + + newlist = copyObject(l1); + newlist2 = copyObject(l2); + + for (p=newlist; lnext(p)!=NIL; p=lnext(p)) + ; + lnext(p) = newlist2; + return newlist; +} + +/* + * below are for backwards compatibility + */ +List * +intAppend(List *l1, List *l2) +{ + List *newlist, *newlist2, *p; + + if (l1==NIL) + return listCopy(l2); + + newlist = listCopy(l1); + newlist2 = listCopy(l2); + + for (p=newlist; lnext(p)!=NIL; p=lnext(p)) + ; + lnext(p) = newlist2; + return newlist; +} + +List * +nconc(List *l1, List *l2) +{ + List *temp; + + if (l1 == NIL) + return l2; + if (l2 == NIL) + return l1; + if (l1 == l2) + elog(WARN, "tryout to nconc a list to itself"); + + for (temp = l1; lnext(temp)!=NULL; temp = lnext(temp)) + ; + + lnext(temp) = l2; + return(l1); /* list1 is now list1[]list2 */ +} + + +List * +nreverse(List *list) +{ + List *rlist = NIL; + List *p = NIL; + + if(list==NULL) + return(NIL); + + if (length(list) == 1) + return(list); + + for (p = list; p!=NULL; p = lnext(p)) { + rlist = lcons(lfirst(p),rlist); + } + + lfirst(list) = lfirst(rlist); + lnext(list) = lnext(rlist); + return(list); +} + +/* + * same + * + * Returns t if two lists contain the same elements. + * now defined in lispdep.c + * + * XXX only good for IntList -ay + */ +bool +same(List *foo, List *bar) +{ + List *temp = NIL; + + if (foo == NULL) + return (bar==NULL); + if (bar == NULL) + return (foo==NULL); + if (length(foo) == length(bar)) { + foreach (temp,foo) { + if (!intMember((int)lfirst(temp),bar)) + return(false); + } + return(true); + } + return(false); + +} + +List * +LispUnion(List *foo, List *bar) +{ + List *retval = NIL; + List *i = NIL; + List *j = NIL; + + if (foo==NIL) + return(bar); /* XXX - should be copy of bar */ + + if (bar==NIL) + return(foo); /* XXX - should be copy of foo */ + + foreach (i,foo) { + foreach (j,bar) { + if (! equal(lfirst(i), lfirst(j))) { + retval = lappend(retval,lfirst(i)); + break; + } + } + } + foreach(i,bar) { + retval = lappend(retval,lfirst(i)); + } + + return(retval); +} + +List * +LispUnioni(List *foo, List *bar) +{ + List *retval = NIL; + List *i = NIL; + List *j = NIL; + + if (foo==NIL) + return(bar); /* XXX - should be copy of bar */ + + if (bar==NIL) + return(foo); /* XXX - should be copy of foo */ + + foreach (i,foo) { + foreach (j,bar) { + if (lfirsti(i) != lfirsti(j)) { + retval = lappendi(retval,lfirst(i)); + break; + } + } + } + foreach(i,bar) { + retval = lappendi(retval, lfirsti(i)); + } + + return(retval); +} + +/* + * member() + * - nondestructive, returns t iff foo is a member of the list + * bar + */ +bool +member(void *foo, List *bar) +{ + List *i; + foreach (i,bar) + if (equal((Node*)(lfirst(i)),(Node*)foo)) + return(true); + return(false); +} + +bool +intMember(int foo, List *bar) +{ + List *i; + foreach (i,bar) + if (foo == (int)lfirst(i)) + return(true); + return(false); +} + +/* + * lremove - + * only does pointer comparisons. Removes 'elem' from the the linked list. + */ +List * +lremove(void *elem, List *list) +{ + List *l; + List *prev = NIL; + List *result = list; + + foreach(l, list) { + if (elem == lfirst(l)) + break; + prev = l; + } + if (l!=NULL) { + if (prev == NIL) { + result = lnext(list); + } else { + lnext(prev) = lnext(l); + } + } + return result; +} + +List * +LispRemove(void *elem, List *list) +{ + List *temp = NIL; + List *prev = NIL; + + if (equal(elem, lfirst(list))) + return lnext(list); + + temp = lnext(list); + prev = list; + while(temp!=NIL) { + if (equal(elem, lfirst(temp))) { + lnext(prev) = lnext(temp); + break; + } + temp = lnext(temp); + prev = lnext(prev); + } + return(list); +} + +List * +intLispRemove(int elem, List *list) +{ + List *temp = NIL; + List *prev = NIL; + + if (elem == (int)lfirst(list)) + return lnext(list); + + temp = lnext(list); + prev = list; + while(temp!=NIL) { + if (elem == (int)lfirst(temp)) { + lnext(prev) = lnext(temp); + break; + } + temp = lnext(temp); + prev = lnext(prev); + } + return(list); +} + +List * +set_difference(List *list1, List *list2) +{ + List *temp1 = NIL; + List *result = NIL; + + if (list2==NIL) + return(list1); + + foreach (temp1, list1) { + if (!member(lfirst(temp1), list2)) + result = lappend(result, lfirst(temp1)); + } + return(result); +} + +List * +set_differencei(List *list1, List *list2) +{ + List *temp1 = NIL; + List *result = NIL; + + if (list2==NIL) + return(list1); + + foreach (temp1, list1) { + if (!intMember(lfirsti(temp1), list2)) + result = lappendi(result, lfirst(temp1)); + } + return(result); +} + diff --git a/src/backend/nodes/makefuncs.c b/src/backend/nodes/makefuncs.c new file mode 100644 index 00000000000..a461524e7ad --- /dev/null +++ b/src/backend/nodes/makefuncs.c @@ -0,0 +1,117 @@ +/* + * makefuncs.c-- + * creator functions for primitive nodes. The functions here are for + * the most frequently created nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/makefuncs.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + * NOTES + * Creator functions in POSTGRES 4.2 are generated automatically. Most of + * them are rarely used. Now we don't generate them any more. If you want + * one, you have to write it yourself. + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Oct 20, 1994 file creation + */ +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/makefuncs.h" + +/* + * makeOper - + * creates an Oper node + */ +Oper * +makeOper(Oid opno, + Oid opid, + Oid opresulttype, + int opsize, + FunctionCachePtr op_fcache) +{ + Oper *oper = makeNode(Oper); + + oper->opno = opno; + oper->opid = opid; + oper->opresulttype = opresulttype; + oper->opsize = opsize; + oper->op_fcache = op_fcache; + return oper; +} + +/* + * makeVar - + * creates a Var node + * + */ +Var * +makeVar(Index varno, + AttrNumber varattno, + Oid vartype, + Index varnoold, + AttrNumber varoattno) +{ + Var *var = makeNode(Var); + + var->varno = varno; + var->varattno = varattno; + var->vartype = vartype; + var->varnoold = varnoold; + var->varoattno = varoattno; + + return var; +} + +/* + * makeResdom - + * creates a Resdom (Result Domain) node + */ +Resdom * +makeResdom(AttrNumber resno, + Oid restype, + int reslen, + char *resname, + Index reskey, + Oid reskeyop, + int resjunk) +{ + Resdom *resdom = makeNode(Resdom); + + resdom->resno = resno; + resdom->restype = restype; + resdom->reslen = reslen; + resdom->resname = resname; + resdom->reskey = reskey; + resdom->reskeyop = reskeyop; + resdom->resjunk = resjunk; + return resdom; +} + +/* + * makeConst - + * creates a Const node + */ +Const * +makeConst(Oid consttype, + Size constlen, + Datum constvalue, + bool constisnull, + bool constbyval, + bool constisset) +{ + Const *cnst = makeNode(Const); + + cnst->consttype = consttype; + cnst->constlen = constlen; + cnst->constvalue = constvalue; + cnst->constisnull = constisnull; + cnst->constbyval = constbyval; + cnst->constisset = constisset; + return cnst; +} + diff --git a/src/backend/nodes/makefuncs.h b/src/backend/nodes/makefuncs.h new file mode 100644 index 00000000000..4c6b0291674 --- /dev/null +++ b/src/backend/nodes/makefuncs.h @@ -0,0 +1,48 @@ +/*------------------------------------------------------------------------- + * + * makefuncs.h-- + * prototypes for the creator functions (for primitive nodes) + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: makefuncs.h,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MAKEFUNC_H +#define MAKEFUNC_H + +#include "access/attnum.h" +#include "catalog/pg_operator.h" +#include "utils/fcache.h" +#include "nodes/primnodes.h" + +extern Oper *makeOper(Oid opno, + Oid opid, + Oid opresulttype, + int opsize, + FunctionCachePtr op_fcache); + +extern Var *makeVar(Index varno, + AttrNumber varattno, + Oid vartype, + Index varnoold, + AttrNumber varoattno); + +extern Resdom *makeResdom(AttrNumber resno, + Oid restype, + int reslen, + char *resname, + Index reskey, + Oid reskeyop, + int resjunk); + +extern Const *makeConst(Oid consttype, + Size constlen, + Datum constvalue, + bool constisnull, + bool constbyval, + bool constisset); + +#endif /* MAKEFUNC_H */ diff --git a/src/backend/nodes/memnodes.h b/src/backend/nodes/memnodes.h new file mode 100644 index 00000000000..35adee0d9c3 --- /dev/null +++ b/src/backend/nodes/memnodes.h @@ -0,0 +1,101 @@ +/*------------------------------------------------------------------------- + * + * memnodes.h-- + * POSTGRES memory context node definitions. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: memnodes.h,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + * XXX the typedefs in this file are different from the other ???nodes.h; + * they are pointers to structures instead of the structures themselves. + * If you're wondering, this is plain laziness. I don't want to touch + * the memory context code which should be revamped altogether some day. + * - ay 10/94 + *------------------------------------------------------------------------- + */ +#ifndef MEMNODES_H +#define MEMNODES_H + +#include "c.h" + +#include "utils/memutils.h" +#include "lib/fstack.h" + +#include "nodes/nodes.h" + +/* + * MemoryContext -- + * A logical context in which memory allocations occur. + * + * The types of memory contexts can be thought of as members of the + * following inheritance hierarchy with properties summarized below. + * + * Node + * | + * MemoryContext___ + * / \ + * GlobalMemory PortalMemoryContext + * / \ + * PortalVariableMemory PortalHeapMemory + * + * Flushed at Flushed at Checkpoints + * Transaction Portal + * Commit Close + * + * GlobalMemory n n n + * PortalVariableMemory n y n + * PortalHeapMemory y y y + */ + +typedef struct MemoryContextMethodsData { + Pointer (*alloc)(); + void (*free_p)(); /* need to use free as a #define, + so can't use free */ + Pointer (*realloc)(); + char* (*getName)(); + void (*dump)(); +} *MemoryContextMethods; + +typedef struct MemoryContext { + NodeTag type; + MemoryContextMethods method; +} *MemoryContext; + +/* think about doing this right some time but we'll have explicit fields + for now -ay 10/94 */ +typedef struct GlobalMemory { + NodeTag type; + MemoryContextMethods method; + AllocSetData setData; + char *name; + OrderedElemData elemData; +} *GlobalMemory; + +typedef MemoryContext *PortalMemoryContext; + +typedef struct PortalVariableMemory { + NodeTag type; + MemoryContextMethods method; + AllocSetData setData; +} *PortalVariableMemory; + +typedef struct PortalHeapMemory { + NodeTag type; + MemoryContextMethods method; + Pointer block; + FixedStackData stackData; +} *PortalHeapMemory; + +/* + * MemoryContextIsValid -- + * True iff memory context is valid. + */ +#define MemoryContextIsValid(context) \ + (IsA(context,MemoryContext) || IsA(context,GlobalMemory) || \ + IsA(context,PortalVariableMemory) || IsA(context,PortalHeapMemory)) + +#endif /* MEMNODES_H */ + + diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c new file mode 100644 index 00000000000..c28e5da979a --- /dev/null +++ b/src/backend/nodes/nodeFuncs.c @@ -0,0 +1,116 @@ +/*------------------------------------------------------------------------- + * + * nodeFuncs.c-- + * All node routines more complicated than simple access/modification + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/nodeFuncs.c,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/nodeFuncs.h" +#include "utils/lsyscache.h" + +/* + * single_node - + * Returns t if node corresponds to a single-noded expression + */ +bool +single_node(Node *node) +{ + if(IsA(node,Ident) || IsA(node,Const) || IsA(node,Var) || IsA(node,Param)) + return(true); + else + return(false); +} + +/***************************************************************************** + * VAR nodes + *****************************************************************************/ + +/* + * var_is_outer + * var_is_inner + * var_is_mat + * var_is_rel + * + * Returns t iff the var node corresponds to (respectively): + * the outer relation in a join + * the inner relation of a join + * a materialized relation + * a base relation (i.e., not an attribute reference, a variable from + * some lower join level, or a sort result) + * var node is an array reference + * + */ +bool +var_is_outer (Var *var) +{ + return((bool)(var->varno == OUTER)); +} + +bool +var_is_inner (Var *var) +{ + return ( (bool) (var->varno == INNER)); +} + +bool +var_is_rel (Var *var) +{ + return (bool) + ! (var_is_inner (var) || var_is_outer (var)); +} + +/***************************************************************************** + * OPER nodes + *****************************************************************************/ + +/* + * replace_opid - + * + * Given a oper node, resets the opfid field with the + * procedure OID (regproc id). + * + * Returns the modified oper node. + * + */ +Oper * +replace_opid (Oper *oper) +{ + oper->opid = get_opcode(oper->opno); + oper->op_fcache = NULL; + return(oper); +} + +/***************************************************************************** + * constant (CONST, PARAM) nodes + *****************************************************************************/ + +/* + * non_null - + * Returns t if the node is a non-null constant, e.g., if the node has a + * valid `constvalue' field. + * + */ +bool +non_null (Expr *c) +{ + + if ( IsA(c,Const) && ! ((Const*)c)->constisnull ) + return(true); + else + return(false); +} + + + diff --git a/src/backend/nodes/nodeFuncs.h b/src/backend/nodes/nodeFuncs.h new file mode 100644 index 00000000000..c725f251779 --- /dev/null +++ b/src/backend/nodes/nodeFuncs.h @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * nodeFuncs.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodeFuncs.h,v 1.1.1.1 1996/07/09 06:21:32 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODEFUNCS_H +#define NODEFUNCS_H + +extern bool single_node(Node *node); +extern bool var_is_outer(Var *var); +extern bool var_is_inner(Var *var); +extern bool var_is_rel(Var *var); +extern Oper *replace_opid(Oper *oper); +extern bool non_null(Expr *c); + +#endif /* NODEFUNCS_H */ diff --git a/src/backend/nodes/nodes.c b/src/backend/nodes/nodes.c new file mode 100644 index 00000000000..82845cca15c --- /dev/null +++ b/src/backend/nodes/nodes.c @@ -0,0 +1,45 @@ +/*------------------------------------------------------------------------- + * + * nodes.c-- + * support code for nodes (now that we get rid of the home-brew + * inheritance system, our support code for nodes get much simpler) + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/nodes.c,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + * HISTORY + * Andrew Yu Oct 20, 1994 file creation + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "utils/palloc.h" +#include "utils/elog.h" +#include "nodes/nodes.h" /* where func declarations of this file goes */ + +/* + * newNode - + * create a new node of the specified size and tag the node with the + * specified tag. + * + * !WARNING!: Avoid using newNode directly. You should be using the + * macro makeNode. eg. to create a Resdom node, use makeNode(Resdom) + * + */ +Node * +newNode(Size size, NodeTag tag) +{ + Node *newNode; + + Assert(size >= 4); /* need the tag, at least */ + + newNode = (Node *)palloc(size); + memset((char *)newNode, 0, size); + newNode->type = tag; + return(newNode); +} + diff --git a/src/backend/nodes/nodes.h b/src/backend/nodes/nodes.h new file mode 100644 index 00000000000..7fa9fdb5a93 --- /dev/null +++ b/src/backend/nodes/nodes.h @@ -0,0 +1,299 @@ +/*------------------------------------------------------------------------- + * + * nodes.h-- + * Definitions for tagged nodes. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: nodes.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef NODES_H +#define NODES_H + +#include "c.h" + +/* + * The first field of every node is NodeTag. Each node created (with makeNode) + * will have one of the following tags as the value of its first field. + * + * Note that the number of the node tags are not contiguous. We left holes + * here so that we can add more tags without changing the existing enum's. + */ +typedef enum NodeTag { + T_Invalid = 0, + + /*--------------------- + * TAGS FOR PLAN NODES (plannodes.h) + *--------------------- + */ + T_Plan = 10, + T_Existential, + T_Result, + T_Append, + T_Scan, + T_SeqScan, + T_IndexScan, + T_Join, + T_NestLoop, + T_MergeJoin, + T_HashJoin, + T_Temp, + T_Material, + T_Sort, + T_Agg, + T_Unique, + T_Hash, + T_Choose, + T_Tee, + T_Group, + + /*--------------------- + * TAGS FOR PRIMITIVE NODES (primnodes.h) + *--------------------- + */ + T_Resdom = 100, + T_Fjoin, + T_Expr, + T_Var, + T_Oper, + T_Const, + T_Param, + T_Aggreg, + T_Func, + T_Array, + T_ArrayRef, + + /*--------------------- + * TAGS FOR INNER PLAN NODES (relation.h) + *--------------------- + */ + T_Rel = 200, + T_Path, + T_IndexPath, + T_JoinPath, + T_MergePath, + T_HashPath, + T_OrderKey, + T_JoinKey, + T_MergeOrder, + T_CInfo, + T_JoinMethod, + T_HInfo, + T_MInfo, + T_JInfo, + T_Iter, + T_Stream, + + /*--------------------- + * TAGS FOR EXECUTOR NODES (execnodes.h) + *--------------------- + */ + T_IndexInfo = 300, + T_RelationInfo, + T_TupleCount, + T_TupleTableSlot, + T_ExprContext, + T_ProjectionInfo, + T_JunkFilter, + T_EState, + T_BaseNode, + T_CommonState, + T_ResultState, + T_AppendState, + T_CommonScanState, + T_ScanState, + T_IndexScanState, + T_JoinState, + T_NestLoopState, + T_MergeJoinState, + T_HashJoinState, + T_MaterialState, + T_AggState, + T_GroupState, + T_SortState, + T_UniqueState, + T_HashState, + T_TeeState, + + /*--------------------- + * TAGS FOR MEMORY NODES (memnodes.h) + *--------------------- + */ + T_MemoryContext = 400, + T_GlobalMemory, + T_PortalMemoryContext, + T_PortalVariableMemory, + T_PortalHeapMemory, + + /*--------------------- + * TAGS FOR VALUE NODES (pg_list.h) + *--------------------- + */ + T_Value = 500, + T_List, + T_Integer, + T_Float, + T_String, + T_Null, + + /*--------------------- + * TAGS FOR PARSE TREE NODES (parsenode.h) + *--------------------- + */ + T_Query = 600, + T_AppendStmt, + T_DeleteStmt, + T_ReplaceStmt, + T_CursorStmt, + T_RetrieveStmt, + T_AddAttrStmt, + T_AggregateStmt, + T_ChangeACLStmt, + T_ClosePortalStmt, + T_ClusterStmt, + T_CopyStmt, + T_CreateStmt, + T_VersionStmt, + T_DefineStmt, + T_DestroyStmt, + T_ExtendStmt, + T_FetchStmt, + T_IndexStmt, + T_MoveStmt, + T_ProcedureStmt, + T_PurgeStmt, + T_RecipeStmt, + T_RemoveFuncStmt, + T_RemoveOperStmt, + T_RemoveStmt, + T_RenameStmt, + T_RuleStmt, + T_NotifyStmt, + T_ListenStmt, + T_TransactionStmt, + T_ViewStmt, + T_LoadStmt, + T_CreatedbStmt, + T_DestroydbStmt, + T_VacuumStmt, + T_ExplainStmt, + + T_A_Expr = 700, + T_Attr, + T_A_Const, + T_ParamNo, + T_Ident, + T_FuncCall, + T_A_Indices, + T_ResTarget, + T_ParamString, + T_TimeRange, + T_RelExpr, + T_SortBy, + T_RangeVar, + T_TypeName, + T_IndexElem, + T_ColumnDef, + T_DefElem, + T_TargetEntry, + T_RangeTblEntry, + T_SortClause, + T_GroupClause +} NodeTag; + +/* + * The first field of a node of any type is gauranteed to be the NodeTag. + * Hence the type of any node can be gotten by casting it to Node. Declaring + * a variable to be of Node * (instead of void *) can also facilitate + * debugging. + */ +typedef struct Node { + NodeTag type; +} Node; + +#define nodeTag(_node_) ((Node*)_node_)->type + +#define makeNode(_node_) (_node_*)newNode(sizeof(_node_),T_##_node_) +#define NodeSetTag(n, t) ((Node *)n)->type = t + +#define IsA(_node_,_tag_) (nodeTag(_node_) == T_##_tag_) + +/* ---------------------------------------------------------------- + * IsA functions (no inheritence any more) + * ---------------------------------------------------------------- + */ +#define IsA_JoinPath(jp) \ + (nodeTag(jp)==T_JoinPath || nodeTag(jp)==T_MergePath || \ + nodeTag(jp)==T_HashPath) + +#define IsA_Join(j) \ + (nodeTag(j)==T_Join || nodeTag(j)==T_NestLoop || \ + nodeTag(j)==T_MergeJoin || nodeTag(j)==T_HashJoin) + +#define IsA_Temp(t) \ + (nodeTag(t)==T_Temp || nodeTag(t)==T_Material || nodeTag(t)==T_Sort || \ + nodeTag(t)==T_Unique) + +/* ---------------------------------------------------------------- + * extern declarations follow + * ---------------------------------------------------------------- + */ + +/* + * nodes/nodes.c + */ +extern Node *newNode(Size size, NodeTag tag); + +/* + * nodes/{outfuncs.c,print.c} + */ +#define nodeDisplay print + +extern char *nodeToString(void *obj); +extern void print(void *obj); + +/* + * nodes/{readfuncs.c,read.c} + */ +extern void *stringToNode(char *str); + +/* + * nodes/copyfuncs.c + */ +extern void *copyObject(void *obj); + +/* + * nodes/equalfuncs.c + */ +extern bool equal(void *a, void *b); + + +/* ---------------- + * I don't know why this is here. Most likely a hack.. + * -cim 6/3/90 + * ---------------- + */ +typedef float Cost; + +/* + * CmdType - + * enums for type of operation to aid debugging + * + * ??? could have put this in parsenodes.h but many files not in the + * optimizer also need this... + */ +typedef enum CmdType { + CMD_UNKNOWN, + CMD_SELECT, /* select stmt (formerly retrieve) */ + CMD_UPDATE, /* update stmt (formerly replace) */ + CMD_INSERT, /* insert stmt (formerly append) */ + CMD_DELETE, + CMD_NOTIFY, + CMD_UTILITY /* cmds like create, destroy, copy, vacuum, etc. */ +} CmdType; + + +#endif /* NODES_H */ diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c new file mode 100644 index 00000000000..5281d526917 --- /dev/null +++ b/src/backend/nodes/outfuncs.c @@ -0,0 +1,1670 @@ +/*------------------------------------------------------------------------- + * + * outfuncs.c-- + * routines to convert a node to ascii representation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/outfuncs.c,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + * NOTES + * Every (plan) node in POSTGRES has an associated "out" routine which + * knows how to create its ascii representation. These functions are + * useful for debugging as well as for storing plans in the system + * catalogs (eg. indexes). This is also the plan string sent out in + * Mariposa. + * + * These functions update the in/out argument of type StringInfo + * passed to them. This argument contains the string holding the ASCII + * representation plus some other information (string length, etc.) + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "utils/syscache.h" +#include "utils/lsyscache.h" +#include "fmgr.h" +#include "utils/elog.h" +#include "utils/datum.h" +#include "utils/palloc.h" + +#include "nodes/nodes.h" +#include "nodes/execnodes.h" +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" + +#include "catalog/pg_type.h" +#include "lib/stringinfo.h" + +static void _outDatum(StringInfo str, Datum value, Oid type); +static void _outNode(StringInfo str, void *obj); + +/* + * _outIntList - + * converts a List of integers + */ +void +_outIntList(StringInfo str, List *list) +{ + List *l; + char buf[500]; + + appendStringInfo(str, "("); + foreach(l, list) { + sprintf(buf, "%d ", (int)lfirst(l)); + appendStringInfo(str, buf); + } + appendStringInfo(str, ")"); +} + +static void +_outQuery(StringInfo str, Query *node) +{ + char buf[500]; + + sprintf(buf, "QUERY"); + appendStringInfo(str,buf); + + sprintf(buf, " :command %d", node->commandType); + appendStringInfo(str,buf); + if (node->utilityStmt && + nodeTag(node->utilityStmt) == T_NotifyStmt) + sprintf(buf," :utility %s", + ((NotifyStmt*)(node->utilityStmt))->relname); + else /* use "" to designate */ + sprintf(buf," :utility \"\""); + appendStringInfo(str,buf); + + sprintf(buf, " :resrel %d", node->resultRelation); + appendStringInfo(str,buf); + sprintf(buf, " :rtable "); + appendStringInfo(str,buf); + _outNode(str, node->rtable); + if (node->uniqueFlag) + sprintf(buf, " :unique %s", node->uniqueFlag); + else /* use "" to designate non-unique */ + sprintf(buf, " :unique \"\""); + appendStringInfo(str,buf); + sprintf(buf, " :targetlist "); + appendStringInfo(str,buf); + _outNode(str, node->targetList); + sprintf(buf, " :qual "); + appendStringInfo(str,buf); + _outNode(str, node->qual); + +} + +/* + * print the basic stuff of all nodes that inherit from Plan + */ +static void +_outPlanInfo(StringInfo str, Plan *node) +{ + char buf[500]; + + sprintf(buf, " :cost %g", node->cost ); + appendStringInfo(str,buf); + sprintf(buf, " :size %d", node->plan_size); + appendStringInfo(str,buf); + sprintf(buf, " :width %d", node->plan_width); + appendStringInfo(str,buf); + sprintf(buf, " :state %s", (node->state == (EState*) NULL ? + "nil" : "non-NIL")); + appendStringInfo(str,buf); + sprintf(buf, " :qptargetlist "); + appendStringInfo(str,buf); + _outNode(str, node->targetlist); + sprintf(buf, " :qpqual "); + appendStringInfo(str,buf); + _outNode(str, node->qual); + sprintf(buf, " :lefttree "); + appendStringInfo(str,buf); + _outNode(str, node->lefttree); + sprintf(buf, " :righttree "); + appendStringInfo(str,buf); + _outNode(str, node->righttree); + +} + +/* + * Stuff from plannodes.h + */ +static void +_outPlan(StringInfo str, Plan *node) +{ + char buf[500]; + + sprintf(buf, "PLAN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + +} + +static void +_outResult(StringInfo str, Result *node) +{ + char buf[500]; + + sprintf(buf, "RESULT"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :resconstantqual "); + appendStringInfo(str,buf); + _outNode(str, node->resconstantqual); + +} + +/* + * Existential is a subclass of Plan. + */ +static void +_outExistential(StringInfo str, Existential *node) +{ + char buf[500]; + + sprintf(buf, "EXISTENTIAL"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + +} + +/* + * Append is a subclass of Plan. + */ +static void +_outAppend(StringInfo str, Append *node) +{ + char buf[500]; + + sprintf(buf, "APPEND"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :unionplans "); + appendStringInfo(str,buf); + _outNode(str, node->unionplans); + + sprintf(buf, " :unionrelid %d", node->unionrelid); + appendStringInfo(str,buf); + + sprintf(buf, " :unionrtentries "); + appendStringInfo(str,buf); + _outNode(str, node->unionrtentries); + +} + +/* + * Join is a subclass of Plan + */ +static void +_outJoin(StringInfo str, Join *node) +{ + char buf[500]; + + sprintf(buf, "JOIN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + +} + +/* + * NestLoop is a subclass of Join + */ +static void +_outNestLoop(StringInfo str, NestLoop *node) +{ + char buf[500]; + + sprintf(buf, "NESTLOOP"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); +} + +/* + * MergeJoin is a subclass of Join + */ +static void +_outMergeJoin(StringInfo str, MergeJoin *node) +{ + char buf[500]; + + sprintf(buf, "MERGEJOIN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :mergeclauses "); + appendStringInfo(str,buf); + _outNode(str, node->mergeclauses); + + sprintf(buf, " :mergesortop %d", node->mergesortop); + appendStringInfo(str,buf); + + sprintf(buf, " :mergerightorder %d", node->mergerightorder[0]); + appendStringInfo(str, buf); + + sprintf(buf, " :mergeleftorder %d", node->mergeleftorder[0]); + appendStringInfo(str, buf); +} + +/* + * HashJoin is a subclass of Join. + */ +static void +_outHashJoin(StringInfo str, HashJoin *node) +{ + char buf[500]; + + sprintf(buf, "HASHJOIN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :hashclauses "); + appendStringInfo(str,buf); + _outNode(str, node->hashclauses); + + sprintf(buf, " :hashjoinop %d",node->hashjoinop); + appendStringInfo(str,buf); + sprintf(buf, " :hashjointable 0x%x", (int) node->hashjointable); + appendStringInfo(str,buf); + sprintf(buf, " :hashjointablekey %d", node->hashjointablekey); + appendStringInfo(str,buf); + sprintf(buf, " :hashjointablesize %d", node->hashjointablesize); + appendStringInfo(str,buf); + sprintf(buf, " :hashdone %d", node->hashdone); + appendStringInfo(str,buf); +} + +/* + * Scan is a subclass of Node + */ +static void +_outScan(StringInfo str, Scan *node) +{ + char buf[500]; + + sprintf(buf, "SCAN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :scanrelid %d", node->scanrelid); + appendStringInfo(str,buf); + +} + +/* + * SeqScan is a subclass of Scan + */ +static void +_outSeqScan(StringInfo str, SeqScan *node) +{ + char buf[500]; + + sprintf(buf, "SEQSCAN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :scanrelid %d", node->scanrelid); + appendStringInfo(str,buf); + + +} + +/* + * IndexScan is a subclass of Scan + */ +static void +_outIndexScan(StringInfo str, IndexScan *node) +{ + char buf[500]; + + sprintf(buf, "INDEXSCAN"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :scanrelid %d", node->scan.scanrelid); + appendStringInfo(str,buf); + + sprintf(buf, " :indxid "); + appendStringInfo(str,buf); + _outIntList(str, node->indxid); + + sprintf(buf, " :indxqual "); + appendStringInfo(str,buf); + _outNode(str, node->indxqual); + +} + +/* + * Temp is a subclass of Plan + */ +static void +_outTemp(StringInfo str, Temp *node) +{ + char buf[500]; + + sprintf(buf, "TEMP"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :tempid %d", node->tempid); + appendStringInfo(str,buf); + sprintf(buf, " :keycount %d", node->keycount); + appendStringInfo(str,buf); + +} + +/* + * Sort is a subclass of Temp + */ +static void +_outSort(StringInfo str, Sort *node) +{ + char buf[500]; + + sprintf(buf, "SORT"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :tempid %d", node->tempid); + appendStringInfo(str,buf); + sprintf(buf, " :keycount %d", node->keycount); + appendStringInfo(str,buf); + +} + +static void +_outAgg(StringInfo str, Agg *node) +{ + char buf[500]; + sprintf(buf, "AGG"); + appendStringInfo(str,buf); + _outPlanInfo(str,(Plan*)node); + + /* the actual Agg fields */ + sprintf(buf, " :numagg %d ", node->numAgg); + appendStringInfo(str, buf); +} + +static void +_outGroup(StringInfo str, Group *node) +{ + char buf[500]; + sprintf(buf, "GRP"); + appendStringInfo(str,buf); + _outPlanInfo(str,(Plan*)node); + + /* the actual Group fields */ + sprintf(buf, " :numCols %d ", node->numCols); + appendStringInfo(str, buf); + sprintf(buf, " :tuplePerGroup %s", node->tuplePerGroup ? "true" : "nil"); + appendStringInfo(str, buf); +} + + + +/* + * For some reason, unique is a subclass of Temp. + */ +static void +_outUnique(StringInfo str, Unique *node) +{ + char buf[500]; + + sprintf(buf, "UNIQUE"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :tempid %d", node->tempid); + appendStringInfo(str,buf); + sprintf(buf, " :keycount %d", node->keycount); + appendStringInfo(str,buf); + +} + + +/* + * Hash is a subclass of Temp + */ +static void +_outHash(StringInfo str, Hash *node) +{ + char buf[500]; + + sprintf(buf, "HASH"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :hashkey "); + appendStringInfo(str,buf); + _outNode(str, node->hashkey); + + sprintf(buf, " :hashtable 0x%x", (int) (node->hashtable)); + appendStringInfo(str,buf); + sprintf(buf, " :hashtablekey %d", node->hashtablekey); + appendStringInfo(str,buf); + sprintf(buf, " :hashtablesize %d", node->hashtablesize); + appendStringInfo(str,buf); +} + +static void +_outTee(StringInfo str, Tee *node) +{ + char buf[500]; + + sprintf(buf, "TEE"); + appendStringInfo(str,buf); + _outPlanInfo(str, (Plan*) node); + + sprintf(buf, " :leftParent %X", (int) (node->leftParent)); + appendStringInfo(str,buf); + sprintf(buf, " :rightParent %X", (int) (node->rightParent)); + appendStringInfo(str,buf); + + sprintf(buf, " :rtentries "); + appendStringInfo(str,buf); + _outNode(str, node->rtentries); +} + + + +/***************************************************************************** + * + * Stuff from primnodes.h. + * + *****************************************************************************/ + + +/* + * Resdom is a subclass of Node + */ +static void +_outResdom(StringInfo str, Resdom *node) +{ + char buf[500]; + + sprintf(buf, "RESDOM"); + appendStringInfo(str,buf); + sprintf(buf, " :resno %hd", node->resno); + appendStringInfo(str,buf); + sprintf(buf, " :restype %d", node->restype); + appendStringInfo(str,buf); + sprintf(buf, " :reslen %d", node->reslen); + appendStringInfo(str,buf); + sprintf(buf, " :resname \"%.*s\"", NAMEDATALEN, + ((node->resname) ? ((char *) node->resname) : "null")); + appendStringInfo(str,buf); + sprintf(buf, " :reskey %d", node->reskey); + appendStringInfo(str,buf); + sprintf(buf, " :reskeyop %ld", (long int) node->reskeyop); + appendStringInfo(str,buf); + sprintf(buf, " :resjunk %d", node->resjunk); + appendStringInfo(str,buf); + +} + +static void +_outFjoin(StringInfo str, Fjoin *node) +{ + char buf[500]; + int i; + + sprintf(buf, "FJOIN"); + appendStringInfo(str,buf); + sprintf(buf, " :initialized %s", node->fj_initialized ? "true":"nil"); + appendStringInfo(str,buf); + sprintf(buf, " :nNodes %d", node->fj_nNodes); + appendStringInfo(str,buf); + + appendStringInfo(str," :innerNode "); + appendStringInfo(str,buf); + _outNode(str, node->fj_innerNode); + + sprintf(buf, " :results @ 0x%x ", (int)(node->fj_results)); + appendStringInfo(str, buf); + + appendStringInfo( str, " :alwaysdone "); + for (i = 0; ifj_nNodes; i++) + { + sprintf(buf, " %s ", ((node->fj_alwaysDone[i]) ? "true" : "nil")); + appendStringInfo(str, buf); + } +} + +/* + * Expr is a subclass of Node + */ +static void +_outExpr(StringInfo str, Expr *node) +{ + char buf[500]; + char *opstr; + + sprintf(buf, "EXPR"); + appendStringInfo(str,buf); + + sprintf(buf, " :typeOid %d", node->typeOid); + appendStringInfo(str,buf); + switch(node->opType) { + case OP_EXPR: + opstr = "op"; + break; + case FUNC_EXPR: + opstr = "func"; + break; + case OR_EXPR: + opstr = "or"; + break; + case AND_EXPR: + opstr = "and"; + break; + case NOT_EXPR: + opstr = "not"; + break; + } + sprintf(buf, " :opType %s", opstr); + appendStringInfo(str,buf); + sprintf(buf, " :oper "); + appendStringInfo(str,buf); + _outNode(str, node->oper); + sprintf(buf, " :args "); + appendStringInfo(str,buf); + _outNode(str, node->args); +} + +/* + * Var is a subclass of Expr + */ +static void +_outVar(StringInfo str, Var *node) +{ + char buf[500]; + + sprintf(buf, "VAR"); + appendStringInfo(str,buf); + sprintf(buf, " :varno %d", node->varno); + appendStringInfo(str,buf); + sprintf(buf, " :varattno %hd", node->varattno); + appendStringInfo(str,buf); + sprintf(buf, " :vartype %d", node->vartype); + appendStringInfo(str,buf); + sprintf(buf, " :varnoold %d", node->varnoold); + appendStringInfo(str,buf); + sprintf(buf, " :varoattno %d", node->varoattno); + appendStringInfo(str,buf); +} + +/* + * Const is a subclass of Expr + */ +static void +_outConst(StringInfo str, Const *node) +{ + char buf[500]; + + sprintf(buf, "CONST"); + appendStringInfo(str,buf); + sprintf(buf, " :consttype %d", node->consttype); + appendStringInfo(str,buf); + sprintf(buf, " :constlen %hd", node->constlen); + appendStringInfo(str,buf); + sprintf(buf, " :constisnull %s", (node->constisnull ? "true" : "nil")); + appendStringInfo(str,buf); + sprintf(buf, " :constvalue "); + appendStringInfo(str,buf); + if (node->constisnull) { + sprintf(buf, "NIL "); + appendStringInfo(str,buf); + } else { + _outDatum(str, node->constvalue, node->consttype); + } + sprintf(buf, " :constbyval %s", (node->constbyval ? "true" : "nil")); + appendStringInfo(str,buf); + +} + +/* + * Aggreg + */ +static void +_outAggreg(StringInfo str, Aggreg *node) +{ + char buf[500]; + + sprintf(buf, "AGGREG"); + appendStringInfo(str,buf); + sprintf(buf, " :aggname \"%.*s\"", NAMEDATALEN, (char*)node->aggname); + appendStringInfo(str,buf); + sprintf(buf, " :basetype %d", node->basetype); + appendStringInfo(str,buf); + sprintf(buf, " :aggtype %d", node->aggtype); + appendStringInfo(str,buf); + sprintf(buf, " :aggno %d", node->aggno); + appendStringInfo(str,buf); + + sprintf(buf, " :target "); + appendStringInfo(str,buf); + _outNode(str, node->target); +} + +/* + * Array is a subclass of Expr + */ +static void +_outArray(StringInfo str, Array *node) +{ + char buf[500]; + int i; + sprintf(buf, "ARRAY"); + appendStringInfo(str, buf); + sprintf(buf, " :arrayelemtype %d", node->arrayelemtype); + appendStringInfo(str, buf); + sprintf(buf, " :arrayelemlength %d", node->arrayelemlength); + appendStringInfo(str, buf); + sprintf(buf, " :arrayelembyval %c", (node->arrayelembyval) ? 't' : 'f'); + appendStringInfo(str, buf); + sprintf(buf, " :arrayndim %d", node->arrayndim); + appendStringInfo(str, buf); + sprintf(buf, " :arraylow "); + appendStringInfo(str, buf); + for (i = 0; i < node->arrayndim; i++){ + sprintf(buf, " %d", node->arraylow.indx[i]); + appendStringInfo(str, buf); + } + sprintf(buf, " :arrayhigh "); + appendStringInfo(str, buf); + for (i = 0; i < node->arrayndim; i++){ + sprintf(buf, " %d", node->arrayhigh.indx[i]); + appendStringInfo(str, buf); + } + sprintf(buf, " :arraylen %d", node->arraylen); + appendStringInfo(str, buf); +} + +/* + * ArrayRef is a subclass of Expr + */ +static void +_outArrayRef(StringInfo str, ArrayRef *node) +{ + char buf[500]; + + sprintf(buf, "ARRAYREF"); + appendStringInfo(str, buf); + sprintf(buf, " :refelemtype %d", node->refelemtype); + appendStringInfo(str, buf); + sprintf(buf, " :refattrlength %d", node->refattrlength); + appendStringInfo(str, buf); + sprintf(buf, " :refelemlength %d", node->refelemlength); + appendStringInfo(str, buf); + sprintf(buf, " :refelembyval %c", (node->refelembyval) ? 't' : 'f'); + appendStringInfo(str, buf); + + sprintf(buf, " :refupperindex "); + appendStringInfo(str, buf); + _outNode(str, node->refupperindexpr); + + sprintf(buf, " :reflowerindex "); + appendStringInfo(str, buf); + _outNode(str, node->reflowerindexpr); + + sprintf(buf, " :refexpr "); + appendStringInfo(str, buf); + _outNode(str, node->refexpr); + + sprintf(buf, " :refassgnexpr "); + appendStringInfo(str, buf); + _outNode(str, node->refassgnexpr); +} + +/* + * Func is a subclass of Expr + */ +static void +_outFunc(StringInfo str, Func *node) +{ + char buf[500]; + + sprintf(buf, "FUNC"); + appendStringInfo(str,buf); + sprintf(buf, " :funcid %d", node->funcid); + appendStringInfo(str,buf); + sprintf(buf, " :functype %d", node->functype); + appendStringInfo(str,buf); + sprintf(buf, " :funcisindex %s", + (node->funcisindex ? "true" : "nil")); + appendStringInfo(str,buf); + sprintf(buf, " :funcsize %d", node->funcsize); + appendStringInfo(str, buf); + sprintf(buf, " :func_fcache @ 0x%x", (int)(node->func_fcache)); + appendStringInfo(str, buf); + + appendStringInfo(str, " :func_tlist "); + _outNode(str, node->func_tlist); + + appendStringInfo(str, " :func_planlist "); + _outNode(str, node->func_planlist); +} + +/* + * Oper is a subclass of Expr + */ +static void +_outOper(StringInfo str, Oper *node) +{ + char buf[500]; + + sprintf(buf, "OPER"); + appendStringInfo(str,buf); + sprintf(buf, " :opno %d", node->opno); + appendStringInfo(str,buf); + sprintf(buf, " :opid %d", node->opid); + appendStringInfo(str,buf); + sprintf(buf, " :opresulttype %d", node->opresulttype); + appendStringInfo(str,buf); + +} + +/* + * Param is a subclass of Expr + */ +static void +_outParam(StringInfo str, Param *node) +{ + char buf[500]; + + sprintf(buf, "PARAM"); + appendStringInfo(str,buf); + sprintf(buf, " :paramkind %d", node->paramkind); + appendStringInfo(str,buf); + sprintf(buf, " :paramid %hd", node->paramid); + appendStringInfo(str,buf); + sprintf(buf, " :paramname \"%.*s\"", NAMEDATALEN, node->paramname); + appendStringInfo(str,buf); + sprintf(buf, " :paramtype %d", node->paramtype); + appendStringInfo(str,buf); + + appendStringInfo(str, " :param_tlist "); + _outNode(str, node->param_tlist); +} + +/* + * Stuff from execnodes.h + */ + +/* + * EState is a subclass of Node. + */ +static void +_outEState(StringInfo str, EState *node) +{ + char buf[500]; + + sprintf(buf, "ESTATE"); + appendStringInfo(str,buf); + sprintf(buf, " :direction %d", node->es_direction); + appendStringInfo(str,buf); + + sprintf(buf, " :range_table "); + appendStringInfo(str,buf); + _outNode(str, node->es_range_table); + + sprintf(buf, " :result_relation_info @ 0x%x", + (int) (node->es_result_relation_info)); + appendStringInfo(str,buf); + +} + +/* + * Stuff from relation.h + */ +static void +_outRel(StringInfo str, Rel *node) +{ + char buf[500]; + + sprintf(buf, "REL"); + appendStringInfo(str,buf); + + sprintf(buf, " :relids "); + appendStringInfo(str,buf); + _outIntList(str, node->relids); + + sprintf(buf, " :indexed %s", (node->indexed ? "true" : "nil")); + appendStringInfo(str,buf); + sprintf(buf, " :pages %u", node->pages); + appendStringInfo(str,buf); + sprintf(buf, " :tuples %u", node->tuples); + appendStringInfo(str,buf); + sprintf(buf, " :size %u", node->size); + appendStringInfo(str,buf); + sprintf(buf, " :width %u", node->width); + appendStringInfo(str,buf); + + sprintf(buf, " :targetlist "); + appendStringInfo(str,buf); + _outNode(str, node->targetlist); + + sprintf(buf, " :pathlist "); + appendStringInfo(str,buf); + _outNode(str, node->pathlist); + + /* + * Not sure if these are nodes or not. They're declared as + * struct Path *. Since i don't know, i'll just print the + * addresses for now. This can be changed later, if necessary. + */ + + sprintf(buf, " :unorderedpath @ 0x%x", (int)(node->unorderedpath)); + appendStringInfo(str,buf); + sprintf(buf, " :cheapestpath @ 0x%x", (int)(node->cheapestpath)); + appendStringInfo(str,buf); + + sprintf(buf, " :pruneable %s", (node->pruneable ? "true" : "nil")); + appendStringInfo(str,buf); + +#if 0 + sprintf(buf, " :classlist "); + appendStringInfo(str,buf); + _outNode(str, node->classlist); + + sprintf(buf, " :indexkeys "); + appendStringInfo(str,buf); + _outNode(str, node->indexkeys); + + sprintf(buf, " :ordering "); + appendStringInfo(str,buf); + _outNode(str, node->ordering); +#endif + + sprintf(buf, " :clauseinfo "); + appendStringInfo(str,buf); + _outNode(str, node->clauseinfo); + + sprintf(buf, " :joininfo "); + appendStringInfo(str,buf); + _outNode(str, node->joininfo); + + sprintf(buf, " :innerjoin "); + appendStringInfo(str,buf); + _outNode(str, node->innerjoin); + +} + +/* + * TargetEntry is a subclass of Node. + */ +static void +_outTargetEntry(StringInfo str, TargetEntry *node) +{ + char buf[500]; + + sprintf(buf, "TLE"); + appendStringInfo(str,buf); + sprintf(buf, " :resdom "); + appendStringInfo(str,buf); + _outNode(str, node->resdom); + + sprintf(buf, " :expr "); + appendStringInfo(str,buf); + if (node->expr) { + _outNode(str, node->expr); + }else { + appendStringInfo(str, "nil"); + } +} + +static void +_outRangeTblEntry(StringInfo str, RangeTblEntry *node) +{ + char buf[500]; + + sprintf(buf, "RTE"); + appendStringInfo(str,buf); + + sprintf(buf, " :relname \"%.*s\"", NAMEDATALEN, + ((node->relname) ? ((char *) node->relname) : "null")); + appendStringInfo(str,buf); + + sprintf(buf, " :inh %d ", node->inh); + appendStringInfo(str,buf); + + sprintf(buf, " :refname \"%.*s\"", NAMEDATALEN, + ((node->refname) ? ((char *) node->refname) : "null")); + appendStringInfo(str,buf); + + sprintf(buf, " :relid %d ", node->relid); + appendStringInfo(str,buf); +} + +/* + * Path is a subclass of Node. + */ +static void +_outPath(StringInfo str, Path *node) +{ + char buf[500]; + + sprintf(buf, "PATH"); + appendStringInfo(str,buf); + + sprintf(buf, " :pathtype %d", node->pathtype); + appendStringInfo(str,buf); + + sprintf(buf, " :cost %f", node->path_cost); + appendStringInfo(str,buf); + + sprintf(buf, " :keys "); + appendStringInfo(str,buf); + _outNode(str, node->keys); + +} + +/* + * IndexPath is a subclass of Path. + */ +static void +_outIndexPath(StringInfo str, IndexPath *node) +{ + char buf[500]; + + sprintf(buf, "INDEXPATH"); + appendStringInfo(str,buf); + + sprintf(buf, " :pathtype %d", node->path.pathtype); + appendStringInfo(str,buf); + + /* sprintf(buf, " :parent "); + appendStringInfo(str,buf); + _outNode(str, node->parent); */ + + sprintf(buf, " :cost %f", node->path.path_cost); + appendStringInfo(str,buf); + +#if 0 + sprintf(buf, " :p_ordering "); + appendStringInfo(str,buf); + _outNode(str, node->path.p_ordering); +#endif + sprintf(buf, " :keys "); + appendStringInfo(str,buf); + _outNode(str, node->path.keys); + + sprintf(buf, " :indexid "); + appendStringInfo(str,buf); + _outIntList(str, node->indexid); + + sprintf(buf, " :indexqual "); + appendStringInfo(str,buf); + _outNode(str, node->indexqual); + +} + +/* + * JoinPath is a subclass of Path + */ +static void +_outJoinPath(StringInfo str, JoinPath *node) +{ + char buf[500]; + + sprintf(buf, "JOINPATH"); + appendStringInfo(str,buf); + + sprintf(buf, " :pathtype %d", node->path.pathtype); + appendStringInfo(str,buf); + + /* sprintf(buf, " :parent "); + appendStringInfo(str,buf); + _outNode(str, node->parent); */ + + sprintf(buf, " :cost %f", node->path.path_cost); + appendStringInfo(str,buf); + +#if 0 + sprintf(buf, " :p_ordering "); + appendStringInfo(str,buf); + _outNode(str, node->path.p_ordering); +#endif + sprintf(buf, " :keys "); + appendStringInfo(str,buf); + _outNode(str, node->path.keys); + + sprintf(buf, " :pathclauseinfo "); + appendStringInfo(str,buf); + _outNode(str, node->pathclauseinfo); + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + */ + + sprintf(buf, " :outerjoinpath @ 0x%x", (int)(node->outerjoinpath)); + appendStringInfo(str,buf); + sprintf(buf, " :innerjoinpath @ 0x%x", (int)(node->innerjoinpath)); + appendStringInfo(str,buf); + + sprintf(buf, " :outerjoincost %f", node->path.outerjoincost); + appendStringInfo(str,buf); + + sprintf(buf, " :joinid "); + appendStringInfo(str,buf); + _outIntList(str, node->path.joinid); + +} + +/* + * MergePath is a subclass of JoinPath. + */ +static void +_outMergePath(StringInfo str, MergePath *node) +{ + char buf[500]; + + sprintf(buf, "MERGEPATH"); + appendStringInfo(str,buf); + + sprintf(buf, " :pathtype %d", node->jpath.path.pathtype); + appendStringInfo(str,buf); + + sprintf(buf, " :cost %f", node->jpath.path.path_cost); + appendStringInfo(str,buf); + + sprintf(buf, " :keys "); + appendStringInfo(str,buf); + _outNode(str, node->jpath.path.keys); + + sprintf(buf, " :pathclauseinfo "); + appendStringInfo(str,buf); + _outNode(str, node->jpath.pathclauseinfo); + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + */ + + sprintf(buf, " :outerjoinpath @ 0x%x", (int)(node->jpath.outerjoinpath)); + appendStringInfo(str,buf); + sprintf(buf, " :innerjoinpath @ 0x%x", (int)(node->jpath.innerjoinpath)); + appendStringInfo(str,buf); + + sprintf(buf, " :outerjoincost %f", node->jpath.path.outerjoincost); + appendStringInfo(str,buf); + + sprintf(buf, " :joinid "); + appendStringInfo(str,buf); + _outIntList(str, node->jpath.path.joinid); + + sprintf(buf, " :path_mergeclauses "); + appendStringInfo(str,buf); + _outNode(str, node->path_mergeclauses); + + sprintf(buf, " :outersortkeys "); + appendStringInfo(str,buf); + _outNode(str, node->outersortkeys); + + sprintf(buf, " :innersortkeys "); + appendStringInfo(str,buf); + _outNode(str, node->innersortkeys); + +} + +/* + * HashPath is a subclass of JoinPath. + */ +static void +_outHashPath(StringInfo str, HashPath *node) +{ + char buf[500]; + + sprintf(buf, "HASHPATH"); + appendStringInfo(str,buf); + + sprintf(buf, " :pathtype %d", node->jpath.path.pathtype); + appendStringInfo(str,buf); + + sprintf(buf, " :cost %f", node->jpath.path.path_cost); + appendStringInfo(str,buf); + + sprintf(buf, " :keys "); + appendStringInfo(str,buf); + _outNode(str, node->jpath.path.keys); + + sprintf(buf, " :pathclauseinfo "); + appendStringInfo(str,buf); + _outNode(str, node->jpath.pathclauseinfo); + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + */ + + sprintf(buf, " :outerjoinpath @ 0x%x", (int) (node->jpath.outerjoinpath)); + appendStringInfo(str,buf); + sprintf(buf, " :innerjoinpath @ 0x%x", (int) (node->jpath.innerjoinpath)); + appendStringInfo(str,buf); + + sprintf(buf, " :outerjoincost %f", node->jpath.path.outerjoincost); + appendStringInfo(str,buf); + + sprintf(buf, " :joinid "); + appendStringInfo(str,buf); + _outIntList(str, node->jpath.path.joinid); + + sprintf(buf, " :path_hashclauses "); + appendStringInfo(str,buf); + _outNode(str, node->path_hashclauses); + + sprintf(buf, " :outerhashkeys "); + appendStringInfo(str,buf); + _outNode(str, node->outerhashkeys); + + sprintf(buf, " :innerhashkeys "); + appendStringInfo(str,buf); + _outNode(str, node->innerhashkeys); + +} + +/* + * OrderKey is a subclass of Node. + */ +static void +_outOrderKey(StringInfo str, OrderKey *node) +{ + char buf[500]; + + sprintf(buf, "ORDERKEY"); + appendStringInfo(str,buf); + sprintf(buf, " :attribute_number %d", node->attribute_number); + appendStringInfo(str,buf); + sprintf(buf, " :array_index %d", node->array_index); + appendStringInfo(str,buf); + +} + +/* + * JoinKey is a subclass of Node. + */ +static void +_outJoinKey(StringInfo str, JoinKey *node) +{ + char buf[500]; + + sprintf(buf, "JOINKEY"); + appendStringInfo(str,buf); + + sprintf(buf, " :outer "); + appendStringInfo(str,buf); + _outNode(str, node->outer); + + sprintf(buf, " :inner "); + appendStringInfo(str,buf); + _outNode(str, node->inner); + +} + +/* + * MergeOrder is a subclass of Node. + */ +static void +_outMergeOrder(StringInfo str, MergeOrder *node) +{ + char buf[500]; + + sprintf(buf, "MERGEORDER"); + appendStringInfo(str,buf); + + sprintf(buf, " :join_operator %d", node->join_operator); + appendStringInfo(str,buf); + sprintf(buf, " :left_operator %d", node->left_operator); + appendStringInfo(str,buf); + sprintf(buf, " :right_operator %d", node->right_operator); + appendStringInfo(str,buf); + sprintf(buf, " :left_type %d", node->left_type); + appendStringInfo(str,buf); + sprintf(buf, " :right_type %d", node->right_type); + appendStringInfo(str,buf); + +} + +/* + * CInfo is a subclass of Node. + */ +static void +_outCInfo(StringInfo str, CInfo *node) +{ + char buf[500]; + + sprintf(buf, "CINFO"); + appendStringInfo(str,buf); + + sprintf(buf, " :clause "); + appendStringInfo(str,buf); + _outNode(str, node->clause); + + sprintf(buf, " :selectivity %f", node->selectivity); + appendStringInfo(str,buf); + sprintf(buf, " :notclause %s", (node->notclause ? "true" : "nil")); + appendStringInfo(str,buf); + + sprintf(buf, " :indexids "); + appendStringInfo(str,buf); + _outNode(str, node->indexids); + + sprintf(buf, " :mergesortorder "); + appendStringInfo(str,buf); + _outNode(str, node->mergesortorder); + + sprintf(buf, " :hashjoinoperator %d", node->hashjoinoperator); + appendStringInfo(str,buf); + +} + +/* + * JoinMethod is a subclass of Node. + */ +static void +_outJoinMethod(StringInfo str, JoinMethod *node) +{ + char buf[500]; + + sprintf(buf, "JOINMETHOD"); + appendStringInfo(str,buf); + + sprintf(buf, " :jmkeys "); + appendStringInfo(str,buf); + _outNode(str, node->jmkeys); + + sprintf(buf, " :clauses "); + appendStringInfo(str,buf); + _outNode(str, node->clauses); + + +} + +/* + * HInfo is a subclass of JoinMethod. + */ +static void +_outHInfo(StringInfo str, HInfo *node) +{ + char buf[500]; + + sprintf(buf, "HASHINFO"); + appendStringInfo(str,buf); + + sprintf(buf, " :hashop "); + appendStringInfo(str,buf); + sprintf(buf, "%d",node->hashop); + appendStringInfo(str,buf); + + sprintf(buf, " :jmkeys "); + appendStringInfo(str,buf); + _outNode(str, node->jmethod.jmkeys); + + sprintf(buf, " :clauses "); + appendStringInfo(str,buf); + _outNode(str, node->jmethod.clauses); + +} + +/* + * JInfo is a subclass of Node. + */ +static void +_outJInfo(StringInfo str, JInfo *node) +{ + char buf[500]; + + sprintf(buf, "JINFO"); + appendStringInfo(str,buf); + + sprintf(buf, " :otherrels "); + appendStringInfo(str,buf); + _outIntList(str, node->otherrels); + + sprintf(buf, " :jinfoclauseinfo "); + appendStringInfo(str,buf); + _outNode(str, node->jinfoclauseinfo); + + sprintf(buf, " :mergesortable %s", + (node->mergesortable ? "true" : "nil")); + appendStringInfo(str,buf); + sprintf(buf, " :hashjoinable %s", + (node->hashjoinable ? "true" : "nil")); + appendStringInfo(str,buf); + +} + +/* + * Print the value of a Datum given its type. + */ +static void +_outDatum(StringInfo str, Datum value, Oid type) +{ + char buf[500]; + Size length, typeLength; + bool byValue; + int i; + char *s; + + /* + * find some information about the type and the "real" length + * of the datum. + */ + byValue = get_typbyval(type); + typeLength = get_typlen(type); + length = datumGetSize(value, type, byValue, typeLength); + + if (byValue) { + s = (char *) (&value); + sprintf(buf, " %d [ ", length); + appendStringInfo(str,buf); + for (i=0; iiterexpr); +} + +static void +_outStream(StringInfo str, Stream *node) +{ + char buf[500]; + + appendStringInfo(str,"STREAM"); + + sprintf(buf, " :pathptr @ 0x%x", (int)(node->pathptr)); + appendStringInfo(str,buf); + + sprintf(buf, " :cinfo @ 0x%x", (int)(node->cinfo)); + appendStringInfo(str,buf); + + sprintf(buf, " :clausetype %d", (int)(node->clausetype)); + appendStringInfo(str,buf); + + sprintf(buf, " :upstream @ 0x%x", (int)(node->upstream)); + appendStringInfo(str,buf); + + sprintf(buf, " :downstream @ 0x%x", (int)(node->downstream)); + appendStringInfo(str,buf); + + sprintf(buf, " :groupup %d", node->groupup); + appendStringInfo(str,buf); + + sprintf(buf, " :groupcost %f", node->groupcost); + appendStringInfo(str,buf); + + sprintf(buf, " :groupsel %f", node->groupsel); + appendStringInfo(str,buf); +} + +static void +_outValue(StringInfo str, Value *value) +{ + char buf[500]; + + switch(value->type) { + case T_String: + sprintf(buf, "\"%s\"", value->val.str); + appendStringInfo(str, buf); + break; + case T_Integer: + sprintf(buf, "%ld", value->val.ival); + appendStringInfo(str, buf); + break; + case T_Float: + sprintf(buf, "%f", value->val.dval); + appendStringInfo(str, buf); + break; + default: + break; + } + return; +} + +/* + * _outNode - + * converts a Node into ascii string and append it to 'str' + */ +static void +_outNode(StringInfo str, void *obj) +{ + if (obj==NULL) { + appendStringInfo(str, "nil"); + return; + } + + if (nodeTag(obj)==T_List) { + List *l; + appendStringInfo(str, "("); + foreach(l, (List*)obj) { + _outNode(str, lfirst(l)); + if (lnext(l)) + appendStringInfo(str, " "); + } + appendStringInfo(str, ")"); + }else { + appendStringInfo(str, "{"); + switch(nodeTag(obj)) { + case T_Query: + _outQuery(str, obj); + break; + case T_Plan: + _outPlan(str, obj); + break; + case T_Result: + _outResult(str, obj); + break; + case T_Existential: + _outExistential(str, obj); + break; + case T_Append: + _outAppend(str, obj); + break; + case T_Join: + _outJoin(str, obj); + break; + case T_NestLoop: + _outNestLoop(str, obj); + break; + case T_MergeJoin: + _outMergeJoin(str, obj); + break; + case T_HashJoin: + _outHashJoin(str, obj); + break; + case T_Scan: + _outScan(str, obj); + break; + case T_SeqScan: + _outSeqScan(str, obj); + break; + case T_IndexScan: + _outIndexScan(str, obj); + break; + case T_Temp: + _outTemp(str, obj); + break; + case T_Sort: + _outSort(str, obj); + break; + case T_Agg: + _outAgg(str, obj); + break; + case T_Group: + _outGroup(str, obj); + break; + case T_Unique: + _outUnique(str, obj); + break; + case T_Hash: + _outHash(str, obj); + break; + case T_Tee: + _outTee(str, obj); + break; + case T_Resdom: + _outResdom(str, obj); + break; + case T_Fjoin: + _outFjoin(str, obj); + break; + case T_Expr: + _outExpr(str, obj); + break; + case T_Var: + _outVar(str, obj); + break; + case T_Const: + _outConst(str, obj); + break; + case T_Aggreg: + _outAggreg(str, obj); + break; + case T_Array: + _outArray(str, obj); + break; + case T_ArrayRef: + _outArrayRef(str, obj); + break; + case T_Func: + _outFunc(str, obj); + break; + case T_Oper: + _outOper(str, obj); + break; + case T_Param: + _outParam(str, obj); + break; + case T_EState: + _outEState(str, obj); + break; + case T_Rel: + _outRel(str, obj); + break; + case T_TargetEntry: + _outTargetEntry(str, obj); + break; + case T_RangeTblEntry: + _outRangeTblEntry(str, obj); + break; + case T_Path: + _outPath(str, obj); + break; + case T_IndexPath: + _outIndexPath (str, obj); + break; + case T_JoinPath: + _outJoinPath(str, obj); + break; + case T_MergePath: + _outMergePath(str, obj); + break; + case T_HashPath: + _outHashPath(str, obj); + break; + case T_OrderKey: + _outOrderKey(str, obj); + break; + case T_JoinKey: + _outJoinKey(str, obj); + break; + case T_MergeOrder: + _outMergeOrder(str, obj); + break; + case T_CInfo: + _outCInfo(str, obj); + break; + case T_JoinMethod: + _outJoinMethod(str, obj); + break; + case T_HInfo: + _outHInfo(str, obj); + break; + case T_JInfo: + _outJInfo(str, obj); + break; + case T_Iter: + _outIter(str, obj); + break; + case T_Stream: + _outStream(str, obj); + break; + case T_Integer: case T_String: case T_Float: + _outValue(str, obj); + break; + default: + elog(NOTICE, "_outNode: don't know how to print type %d", + nodeTag(obj)); + break; + } + appendStringInfo(str, "}"); + } + return; +} + +/* + * nodeToString - + * returns the ascii representation of the Node + */ +char * +nodeToString(void *obj) +{ + StringInfo str; + char *s; + + if (obj==NULL) + return ""; + Assert(obj!=NULL); + str = makeStringInfo(); + _outNode(str, obj); + s = str->data; + pfree(str); + + return s; +} diff --git a/src/backend/nodes/params.h b/src/backend/nodes/params.h new file mode 100644 index 00000000000..57ee1a023c3 --- /dev/null +++ b/src/backend/nodes/params.h @@ -0,0 +1,90 @@ +/*------------------------------------------------------------------------- + * + * params.h-- + * Declarations/definitions of stuff needed to handle parameterized plans. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: params.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PARAMS_H +#define PARAMS_H + +#include "postgres.h" +#include "access/attnum.h" + +/* ---------------------------------------------------------------- + * + * The following are the possible values for the 'paramkind' + * field of a Param node. + * + * PARAM_NAMED: The parameter has a name, i.e. something + * like `$.salary' or `$.foobar'. + * In this case field `paramname' must be a valid Name. + * and field `paramid' must be == 0. + * + * PARAM_NUM: The parameter has only a numeric identifier, + * i.e. something like `$1', `$2' etc. + * The number is contained in the `parmid' field. + * + * PARAM_NEW: Used in PRS2 rule, similar to PARAM_NAMED. + * The `paramname' & `paramid' refer to the "NEW" tuple + * `paramname' is the attribute name and `paramid' its + * attribute number. + * + * PARAM_OLD: Same as PARAM_NEW, but in this case we refer to + * the "OLD" tuple. + */ + +#define PARAM_NAMED 11 +#define PARAM_NUM 12 +#define PARAM_NEW 13 +#define PARAM_OLD 14 +#define PARAM_INVALID 100 + + +/* ---------------------------------------------------------------- + * ParamListInfo + * + * Information needed in order for the executor to handle + * parameterized plans (you know, $.salary, $.name etc. stuff...). + * + * ParamListInfoData contains information needed when substituting a + * Param node with a Const node. + * + * kind : the kind of parameter. + * name : the parameter name (valid if kind == PARAM_NAMED, + * PARAM_NEW or PARAM_OLD) + * id : the parameter id (valid if kind == PARAM_NUM) + * or the attrno (if kind == PARAM_NEW or PARAM_OLD) + * type : PG_TYPE OID of the value + * length : length in bytes of the value + * isnull : true if & only if the value is null (if true then + * the fields 'length' and 'value' are undefined). + * value : the value that has to be substituted in the place + * of the parameter. + * + * ParamListInfo is to be used as an array of ParamListInfoData + * records. An 'InvalidName' in the name field of such a record + * indicates that this is the last record in the array. + * + * ---------------------------------------------------------------- + */ + +typedef struct ParamListInfoData { + int kind; + char *name; + AttrNumber id; + Oid type; + Size length; + bool isnull; + bool byval; + Datum value; +} ParamListInfoData; + +typedef ParamListInfoData *ParamListInfo; + +#endif /* PARAMS_H */ diff --git a/src/backend/nodes/parsenodes.h b/src/backend/nodes/parsenodes.h new file mode 100644 index 00000000000..bc994bb1a0a --- /dev/null +++ b/src/backend/nodes/parsenodes.h @@ -0,0 +1,731 @@ +/*------------------------------------------------------------------------- + * + * parsenodes.h-- + * definitions for parse tree nodes + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: parsenodes.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PARSENODES_H +#define PARSENODES_H + +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "utils/tqual.h" + +/***************************************************************************** + * Query Tree + *****************************************************************************/ + +/* + * Query - + * all statments are turned into a Query tree (via transformStmt) + * for further processing by the optimizer + * utility statements (i.e. non-optimizable statements) + * have the *utilityStmt field set. + * + * we need the isPortal flag because portal names can be null too; can + * get rid of it if we support CURSOR as a commandType. + * + */ +typedef struct Query { + NodeTag type; + + CmdType commandType; /* select|insert|update|delete|utility */ + + Node *utilityStmt; /* non-null if this is a non-optimizable + statement */ + + int resultRelation; /* target relation (index to rtable) */ + char *into; /* portal (cursor) name */ + bool isPortal; /* is this a retrieve into portal? */ + bool isBinary; /* binary portal? */ + + char *uniqueFlag; /* NULL, '*', or Unique attribute name */ + List *sortClause; /* a list of SortClause's */ + + List *rtable; /* list of range table entries */ + List *targetList; /* target list (of TargetEntry) */ + Node *qual; /* qualifications */ + + List *groupClause; /* list of columns to specified in GROUP BY */ + Node *havingQual; /* qualification of each group */ + + int qry_numAgg; /* number of aggregates in the target list */ + Aggreg **qry_aggs; /* the aggregates */ + + /* internal to planner */ + List *base_relation_list_; /* base relation list */ + List *join_relation_list_; /* list of relations generated by joins */ + bool query_is_archival_; /* archival query flag */ +} Query; + + +/***************************************************************************** + * Other Statements (no optimizations required) + * + * Some of them require a little bit of transformation (which is also + * done by transformStmt). The whole structure is then passed on to + * ProcessUtility (by-passing the optimization step) as the utilityStmt + * field in Query. + *****************************************************************************/ + +/* ---------------------- + * Add Column Statement + * ---------------------- + */ +typedef struct AddAttrStmt { + NodeTag type; + char *relname; /* the relation to add attr */ + bool inh; /* add recursively to children? */ + struct ColumnDef *colDef; /* the attribute definition */ +} AddAttrStmt; + +/* ---------------------- + * Change ACL Statement + * ---------------------- + */ +typedef struct ChangeACLStmt { + NodeTag type; + struct AclItem *aclitem; + unsigned modechg; + List *relNames; +} ChangeACLStmt; + +/* ---------------------- + * Close Portal Statement + * ---------------------- + */ +typedef struct ClosePortalStmt { + NodeTag type; + char *portalname; /* name of the portal (cursor) */ +} ClosePortalStmt; + +/* ---------------------- + * Copy Statement + * ---------------------- + */ +typedef struct CopyStmt { + NodeTag type; + bool binary; /* is a binary copy? */ + char *relname; /* the relation to copy */ + int direction; /* TO or FROM */ + char *filename; /* if NULL, use stdin/stdout */ + char *delimiter; /* delimiter character, \t by default*/ +} CopyStmt; + +/* ---------------------- + * Create Table Statement + * ---------------------- + */ +typedef enum ArchType { + ARCH_NONE, ARCH_LIGHT, ARCH_HEAVY /* archive mode */ +} ArchType; + +typedef struct CreateStmt { + NodeTag type; + char *relname; /* the relation to create */ + List *tableElts; /* column definitions + list of ColumnDef */ + List *inhRelnames; /* relations to inherit from + list of Value (string) */ + ArchType archiveType; /* archive mode (ARCH_NONE if none */ + int location; /* smgrid (-1 if none) */ + int archiveLoc; /* smgrid (-1 if none) */ +} CreateStmt; + +/* ---------------------- + * Create Version Statement + * ---------------------- + */ +typedef struct VersionStmt { + NodeTag type; + char *relname; /* the new relation */ + int direction; /* FORWARD | BACKWARD */ + char *fromRelname; /* relation to create a version */ + char *date; /* date of the snapshot */ +} VersionStmt; + +/* ---------------------- + * Create {Operator|Type|Aggregate} Statement + * ---------------------- + */ +typedef struct DefineStmt { + NodeTag type; + int defType; /* OPERATOR|P_TYPE|AGGREGATE*/ + char *defname; + List *definition; /* a list of DefElem */ +} DefineStmt; + +/* ---------------------- + * Drop Table Statement + * ---------------------- + */ +typedef struct DestroyStmt { + NodeTag type; + List *relNames; /* relations to be dropped */ +} DestroyStmt; + +/* ---------------------- + * Extend Index Statement + * ---------------------- + */ +typedef struct ExtendStmt { + NodeTag type; + char *idxname; /* name of the index */ + Node *whereClause; /* qualifications */ + List *rangetable; /* range table, filled in + by transformStmt() */ +} ExtendStmt; + +/* ---------------------- + * Begin Recipe Statement + * ---------------------- + */ +typedef struct RecipeStmt { + NodeTag type; + char *recipeName; /* name of the recipe*/ +} RecipeStmt; + +/* ---------------------- + * Fetch Statement + * ---------------------- + */ +typedef struct FetchStmt { + NodeTag type; + int direction; /* FORWARD or BACKWARD */ + int howMany; /* amount to fetch ("ALL" --> 0) */ + char *portalname; /* name of portal (cursor) */ +} FetchStmt; + +/* ---------------------- + * Create Index Statement + * ---------------------- + */ +typedef struct IndexStmt { + NodeTag type; + char *idxname; /* name of the index */ + char *relname; /* name of relation to index on */ + char *accessMethod; /* name of acess methood (eg. btree) */ + List *indexParams; /* a list of IndexElem */ + List *withClause; /* a list of ParamString */ + Node *whereClause; /* qualifications */ + List *rangetable; /* range table, filled in + by transformStmt() */ +} IndexStmt; + +/* ---------------------- + * Move Statement (Not implemented) + * ---------------------- + */ +typedef struct MoveStmt { + NodeTag type; + int direction; /* FORWARD or BACKWARD */ + bool to; + int where; + char *portalname; +} MoveStmt; + +/* ---------------------- + * Create Function Statement + * ---------------------- + */ +typedef struct ProcedureStmt { + NodeTag type; + char *funcname; /* name of function to create */ + List *defArgs; /* list of definitions + a list of strings (as Value *) */ + Node *returnType; /* the return type (as a string or + a TypeName (ie.setof) */ + List *withClause; /* a list of ParamString */ + char *as; /* the SQL statement or filename */ + char *language; /* C or SQL */ +} ProcedureStmt; + +/* ---------------------- + * Purge Statement + * ---------------------- + */ +typedef struct PurgeStmt { + NodeTag type; + char *relname; /* relation to purge */ + char *beforeDate; /* purge before this date */ + char *afterDate; /* purge after this date */ +} PurgeStmt; + +/* ---------------------- + * Drop Function Statement + * ---------------------- + */ +typedef struct RemoveFuncStmt { + NodeTag type; + char *funcname; /* function to drop */ + List *args; /* types of the arguments */ +} RemoveFuncStmt; + +/* ---------------------- + * Drop Operator Statement + * ---------------------- + */ +typedef struct RemoveOperStmt { + NodeTag type; + char *opname; /* operator to drop */ + List *args; /* types of the arguments */ +} RemoveOperStmt; + +/* ---------------------- + * Drop {Aggregate|Type|Index|Rule|View} Statement + * ---------------------- + */ +typedef struct RemoveStmt { + NodeTag type; + int removeType; /* AGGREGATE|P_TYPE|INDEX|RULE|VIEW */ + char *name; /* name to drop */ +} RemoveStmt; + +/* ---------------------- + * Alter Table Statement + * ---------------------- + */ +typedef struct RenameStmt { + NodeTag type; + char *relname; /* relation to be altered */ + bool inh; /* recursively alter children? */ + char *column; /* if NULL, rename the relation name + to the new name. Otherwise, rename + this column name. */ + char *newname; /* the new name */ +} RenameStmt; + +/* ---------------------- + * Create Rule Statement + * ---------------------- + */ +typedef struct RuleStmt { + NodeTag type; + char *rulename; /* name of the rule */ + Node *whereClause; /* qualifications */ + CmdType event; /* RETRIEVE */ + struct Attr *object; /* object affected */ + bool instead; /* is a 'do instead'? */ + List *actions; /* the action statements */ +} RuleStmt; + +/* ---------------------- + * Notify Statement + * ---------------------- + */ +typedef struct NotifyStmt { + NodeTag type; + char *relname; /* relation to notify */ +} NotifyStmt; + +/* ---------------------- + * Listen Statement + * ---------------------- + */ +typedef struct ListenStmt { + NodeTag type; + char *relname; /* relation to listen on */ +} ListenStmt; + +/* ---------------------- + * {Begin|Abort|End} Transaction Statement + * ---------------------- + */ +typedef struct TransactionStmt { + NodeTag type; + int command; /* BEGIN|END|ABORT */ +} TransactionStmt; + +/* ---------------------- + * Create View Statement + * ---------------------- + */ +typedef struct ViewStmt { + NodeTag type; + char *viewname; /* name of the view */ + Query *query; /* the SQL statement */ +} ViewStmt; + +/* ---------------------- + * Load Statement + * ---------------------- + */ +typedef struct LoadStmt { + NodeTag type; + char *filename; /* file to load */ +} LoadStmt; + +/* ---------------------- + * Createdb Statement + * ---------------------- + */ +typedef struct CreatedbStmt { + NodeTag type; + char *dbname; /* database to create */ +} CreatedbStmt; + +/* ---------------------- + * Destroydb Statement + * ---------------------- + */ +typedef struct DestroydbStmt { + NodeTag type; + char *dbname; /* database to drop */ +} DestroydbStmt; + +/* ---------------------- + * Cluster Statement (support pbrown's cluster index implementation) + * ---------------------- + */ +typedef struct ClusterStmt { + NodeTag type; + char *relname; /* relation being indexed */ + char *indexname; /* original index defined */ +} ClusterStmt; + +/* ---------------------- + * Vacuum Statement + * ---------------------- + */ +typedef struct VacuumStmt { + NodeTag type; + char *vacrel; /* table to vacuum */ +} VacuumStmt; + +/* ---------------------- + * Explain Statement + * ---------------------- + */ +typedef struct ExplainStmt { + NodeTag type; + Query *query; /* the query */ + List *options; +} ExplainStmt; + + +/***************************************************************************** + * Optimizable Statements + *****************************************************************************/ + +/* ---------------------- + * Insert Statement + * ---------------------- + */ +typedef struct AppendStmt { + NodeTag type; + char *relname; /* relation to insert into */ + List *cols; /* names of the columns */ + List *exprs; /* the expressions (same order as + the columns) */ + List *fromClause; /* the from clause */ + Node *whereClause; /* qualifications */ +} AppendStmt; + +/* ---------------------- + * Delete Statement + * ---------------------- + */ +typedef struct DeleteStmt { + NodeTag type; + char *relname; /* relation to delete from */ + Node *whereClause; /* qualifications */ +} DeleteStmt; + +/* ---------------------- + * Update Statement + * ---------------------- + */ +typedef struct ReplaceStmt { + NodeTag type; + char *relname; /* relation to update */ + List *targetList; /* the target list (of ResTarget) */ + Node *whereClause; /* qualifications */ + List *fromClause; /* the from clause */ +} ReplaceStmt; + +/* ---------------------- + * Create Cursor Statement + * ---------------------- + */ +typedef struct CursorStmt { + NodeTag type; + char *portalname; /* the portal (cursor) to create */ + bool binary; /* a binary (internal) portal? */ + char *unique; /* NULL, "*", or unique attribute name */ + List *targetList; /* the target list (of ResTarget) */ + List *fromClause; /* the from clause */ + Node *whereClause; /* qualifications */ + List *orderClause; /* sort clause (a list of SortBy's) */ +} CursorStmt; + +/* ---------------------- + * Select Statement + * ---------------------- + */ +typedef struct RetrieveStmt { + NodeTag type; + char *unique; /* NULL, '*', or unique attribute name */ + char *into; /* name of table (for select into + table) */ + List *targetList; /* the target list (of ResTarget) */ + List *fromClause; /* the from clause */ + Node *whereClause; /* qualifications */ + List *groupClause; /* group by clause */ + Node *havingClause; /* having conditional-expression */ + List *orderClause; /* sort clause (a list of SortBy's) */ +} RetrieveStmt; + + +/**************************************************************************** + * Supporting data structures for Parse Trees + ****************************************************************************/ + +/* + * TypeName - specifies a type in definitions + */ +typedef struct TypeName { + NodeTag type; + char *name; /* name of the type */ + bool setof; /* is a set? */ + List *arrayBounds; /* array bounds */ + int typlen; /* length for char() and varchar() */ +} TypeName; + +/* + * ParamNo - specifies a parameter reference + */ +typedef struct ParamNo { + NodeTag type; + int number; /* the number of the parameter */ + TypeName *typename; /* the typecast */ +} ParamNo; + +/* + * A_Expr - binary expressions + */ +typedef struct A_Expr { + NodeTag type; + int oper; /* type of operation + {OP,OR,AND,NOT,ISNULL,NOTNULL} */ + char *opname; /* name of operator/function */ + Node *lexpr; /* left argument */ + Node *rexpr; /* right argument */ +} A_Expr; + +/* + * Attr - + * specifies an Attribute (ie. a Column); could have nested dots or + * array references. + * + */ +typedef struct Attr { + NodeTag type; + char *relname; /* name of relation (can be "*") */ + ParamNo *paramNo; /* or a parameter */ + List *attrs; /* attributes (possibly nested); + list of Values (strings) */ + List *indirection; /* array refs (list of A_Indices') */ +} Attr; + +/* + * A_Const - a constant expression + */ +typedef struct A_Const { + NodeTag type; + Value val; /* the value (with the tag) */ + TypeName *typename; /* typecast */ +} A_Const; + +/* + * ColumnDef - column definition (used in various creates) + */ +typedef struct ColumnDef { + NodeTag type; + char *colname; /* name of column */ + TypeName *typename; /* type of column */ +} ColumnDef; + +/* + * Ident - + * an identifier (could be an attribute or a relation name). Depending + * on the context at transformStmt time, the identifier is treated as + * either a relation name (in which case, isRel will be set) or an + * attribute (in which case, it will be transformed into an Attr). + */ +typedef struct Ident { + NodeTag type; + char *name; /* its name */ + List *indirection; /* array references */ + bool isRel; /* is a relation - filled in by + transformExpr() */ +} Ident; + +/* + * FuncCall - a function/aggregate invocation + */ +typedef struct FuncCall { + NodeTag type; + char *funcname; /* name of function */ + List *args; /* the arguments (list of exprs) */ +} FuncCall; + +/* + * A_Indices - array reference or bounds ([lidx:uidx] or [uidx]) + */ +typedef struct A_Indices { + NodeTag type; + Node *lidx; /* could be NULL */ + Node *uidx; +} A_Indices; + +/* + * ResTarget - + * result target (used in target list of pre-transformed Parse trees) + */ +typedef struct ResTarget { + NodeTag type; + char *name; /* name of the result column */ + List *indirection; /* array references */ + Node *val; /* the value of the result + (A_Expr or Attr) */ +} ResTarget; + +/* + * ParamString - used in with clauses + */ +typedef struct ParamString { + NodeTag type; + char *name; + char *val; +} ParamString; + +/* + * TimeRange - specifies a time range + */ +typedef struct TimeRange { + NodeTag type; + char *startDate; + char *endDate; /* snapshot if NULL */ +} TimeRange; + +/* + * RelExpr - relation expressions + */ +typedef struct RelExpr { + NodeTag type; + char *relname; /* the relation name */ + bool inh; /* inheritance query */ + TimeRange *timeRange; /* the time range */ +} RelExpr; + +/* + * Sortby - for order by clause + */ +typedef struct SortBy { + NodeTag type; + char *name; /* name of column to sort on */ + char *useOp; /* operator to use */ +} SortBy; + +/* + * RangeVar - range variable, used in from clauses + */ +typedef struct RangeVar { + NodeTag type; + RelExpr *relExpr; /* the relation expression */ + char *name; /* the name to be referenced + (optional) */ +} RangeVar; + +/* + * IndexElem - index parameters (used in create index) + */ +typedef struct IndexElem { + NodeTag type; + char *name; /* name of index */ + List *args; /* if not NULL, function index */ + char *class; +} IndexElem; + +/* + * DefElem - + * a definition (used in definition lists in the form of defname = arg) + */ +typedef struct DefElem { + NodeTag type; + char *defname; + Node *arg; /* a (Value *) or a (TypeName *) */ +} DefElem; + + +/**************************************************************************** + * Nodes for a Query tree + ****************************************************************************/ + +/* + * TargetEntry - + * a target entry (used in the transformed target list) + * + * one of resdom or fjoin is not NULL. a target list is + * (( expr) ( expr) ...) + */ +typedef struct TargetEntry { + NodeTag type; + Resdom *resdom; /* fjoin overload this to be a list??*/ + Fjoin *fjoin; + Node *expr; /* can be a list too */ +} TargetEntry; + +/* + * RangeTblEntry - + * used in range tables. Some of the following are only used in one of + * the parsing, optimizing, execution stages. + * + * inFromCl marks those range variables that are listed in the from clause. + * In SQL, the targetlist can only refer to range variables listed in the + * from clause but POSTQUEL allows you to refer to tables not specified, in + * which case a range table entry will be generated. We use POSTQUEL + * semantics which is more powerful. However, we need SQL semantics in + * some cases (eg. when expanding a '*') + */ +typedef struct RangeTblEntry { + NodeTag type; + char *relname; /* real name of the relation */ + TimeRange *timeRange; /* time range */ + char *refname; /* the reference name (specified in + the from clause) */ + Oid relid; + bool inh; /* inheritance? */ + bool archive; /* filled in by plan_archive */ + bool inFromCl; /* comes from From Clause */ + TimeQual timeQual; /* filled in by pg_plan */ +} RangeTblEntry; + +/* + * SortClause - + * used in the sort clause for retrieves and cursors + */ +typedef struct SortClause { + NodeTag type; + Resdom *resdom; /* attributes in tlist to be sorted */ + Oid opoid; /* sort operators */ +} SortClause; + +/* + * GroupClause - + * used in the GROUP BY clause + */ +typedef struct GroupClause { + NodeTag type; + Var *grpAttr; /* attributes to group on */ + Oid grpOpoid; /* the sort operator to use */ +} GroupClause; + +#endif /* PARSENODES_H */ diff --git a/src/backend/nodes/pg_list.h b/src/backend/nodes/pg_list.h new file mode 100644 index 00000000000..83eaa5f93ab --- /dev/null +++ b/src/backend/nodes/pg_list.h @@ -0,0 +1,112 @@ +/*------------------------------------------------------------------------- + * + * pg_list.h-- + * POSTGRES generic list package + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pg_list.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PG_LIST_H +#define PG_LIST_H + +#include +#include "c.h" +#include "nodes/nodes.h" + +/* ---------------------------------------------------------------- + * node definitions + * ---------------------------------------------------------------- + */ + +/*---------------------- + * Value node + *---------------------- + */ +typedef struct Value { + NodeTag type; /* tag appropriately (eg. T_String) */ + union ValUnion { + char *str; /* string */ + long ival; + double dval; + } val; +} Value; + +#define intVal(v) (((Value *)v)->val.ival) +#define floatVal(v) (((Value *)v)->val.dval) +#define strVal(v) (((Value *)v)->val.str) + + +/*---------------------- + * List node + *---------------------- + */ +typedef struct List { + NodeTag type; + void *elem; + struct List *next; +} List; + +#define NIL ((List *) NULL) + +/* ---------------- + * accessor macros + * ---------------- + */ +#define lfirst(l) ((l)->elem) +#define lnext(l) ((l)->next) +#define lsecond(l) (lfirst(lnext(l))) + +/* + * foreach - + * a convenience macro which loops through the list + */ +#define foreach(_elt_,_list_) \ + for(_elt_=_list_; _elt_!=NIL;_elt_=lnext(_elt_)) + + +/* + * function prototypes in nodes/list.c + */ +extern int length(List *list); +extern List *append(List *list1, List *list2); +extern List *nconc(List *list1, List *list2); +extern List *lcons(void *datum, List *list); +extern bool member(void *foo, List *bar); +extern Value *makeInteger(long i); +extern Value *makeFloat(double d); +extern Value *makeString(char *str); +extern List *makeList(void *elem, ...); +extern List *lappend(List *list, void *obj); +extern List *lremove(void *elem, List *list); +extern void freeList(List *list); + +extern void *nth(int n, List *l); +extern void set_nth(List *l, int n, void *elem); + +/* hack for now */ +#define lconsi(i,l) lcons((void*)i,l) +#define lfirsti(l) ((int)lfirst(l)) +#define lappendi(l,i) lappend(l,(void*)i) +extern bool intMember(int, List *); +extern List *intAppend(List *list1, List *list2); + +extern List *nreverse(List *); +extern List *set_difference(List *, List *); +extern List *set_differencei(List *, List *); +extern List *LispRemove(void *, List *); +extern List *intLispRemove(int, List *); +extern List *LispUnion(List *foo, List *bar); +extern List *LispUnioni(List *foo, List *bar); +extern bool same(List *foo, List *bar); + +/* should be in nodes.h but needs List */ +extern bool equali(List *a, List *b); + +/* in copyfuncs.c */ +extern List *listCopy(List *); + +#endif /* PG_LIST_H */ diff --git a/src/backend/nodes/plannodes.h b/src/backend/nodes/plannodes.h new file mode 100644 index 00000000000..a01a93c7749 --- /dev/null +++ b/src/backend/nodes/plannodes.h @@ -0,0 +1,330 @@ +/*------------------------------------------------------------------------- + * + * plannodes.h-- + * definitions for query plan nodes + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: plannodes.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PLANNODES_H +#define PLANNODES_H + +#include "postgres.h" + +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" + +/* ---------------------------------------------------------------- + * Executor State types are used in the plannode structures + * so we have to include their definitions too. + * + * Node Type node information used by executor + * + * control nodes + * + * Existential ExistentialState exstate; + * Result ResultState resstate; + * Append AppendState unionstate; + * + * scan nodes + * + * Scan *** CommonScanState scanstate; + * IndexScan IndexScanState indxstate; + * + * (*** nodes which inherit Scan also inherit scanstate) + * + * join nodes + * + * NestLoop NestLoopState nlstate; + * MergeJoin MergeJoinState mergestate; + * HashJoin HashJoinState hashjoinstate; + * + * materialize nodes + * + * Material MaterialState matstate; + * Sort SortState sortstate; + * Unique UniqueState uniquestate; + * Hash HashState hashstate; + * + * ---------------------------------------------------------------- + */ +#include "nodes/execnodes.h" /* XXX move executor types elsewhere */ + + +/* ---------------------------------------------------------------- + * node definitions + * ---------------------------------------------------------------- + */ + +/* ---------------- + * Plan node + * ---------------- + */ + +typedef struct Plan { + NodeTag type; + Cost cost; + int plan_size; + int plan_width; + int plan_tupperpage; + EState *state; /* at execution time, state's of individual + nodes point to one EState for the + whole top-level plan */ + List *targetlist; + List *qual; /* Node* or List* ?? */ + struct Plan *lefttree; + struct Plan *righttree; +} Plan; + +/* ---------------- + * these are are defined to avoid confusion problems with "left" + * and "right" and "inner" and "outer". The convention is that + * the "left" plan is the "outer" plan and the "right" plan is + * the inner plan, but these make the code more readable. + * ---------------- + */ +#define innerPlan(node) (((Plan *)(node))->righttree) +#define outerPlan(node) (((Plan *)(node))->lefttree) + + +/* + * =============== + * Top-level nodes + * =============== + */ + +/* all plan nodes "derive" from the Plan structure by having the + Plan structure as the first field. This ensures that everything works + when nodes are cast to Plan's. (node pointers are frequently cast to Plan* + when passed around generically in the executor */ + + +/* ---------------- + * existential node + * ---------------- + */ +typedef Plan Existential; + +/* ---------------- + * result node - + * returns tuples from outer plan that satisfy the qualifications + * ---------------- + */ +typedef struct Result { + Plan plan; + Node *resconstantqual; + ResultState *resstate; +} Result; + +/* ---------------- + * append node + * ---------------- + */ +typedef struct Append { + Plan plan; + List *unionplans; + Index unionrelid; + List *unionrtentries; + AppendState *unionstate; +} Append; + +/* + * ========== + * Scan nodes + * ========== + */ +typedef struct Scan { + Plan plan; + Index scanrelid; /* relid is index into the range table */ + CommonScanState *scanstate; +} Scan; + +/* ---------------- + * sequential scan node + * ---------------- + */ +typedef Scan SeqScan; + +/* ---------------- + * index scan node + * ---------------- + */ +typedef struct IndexScan { + Scan scan; + List *indxid; + List *indxqual; + IndexScanState *indxstate; +} IndexScan; + +/* + * ========== + * Join nodes + * ========== + */ + +/* ---------------- + * Join node + * ---------------- + */ +typedef Plan Join; + +/* ---------------- + * nest loop join node + * ---------------- + */ +typedef struct NestLoop { + Join join; + NestLoopState *nlstate; +} NestLoop; + +/* ---------------- + * merge join node + * ---------------- + */ +typedef struct MergeJoin { + Join join; + List *mergeclauses; + Oid mergesortop; + Oid *mergerightorder; /* inner sort operator */ + Oid *mergeleftorder; /* outer sort operator */ + MergeJoinState *mergestate; +} MergeJoin; + +/* ---------------- + * hash join (probe) node + * ---------------- + */ +typedef struct HashJoin { + Join join; + List *hashclauses; + Oid hashjoinop; + HashJoinState *hashjoinstate; + HashJoinTable hashjointable; + IpcMemoryKey hashjointablekey; + int hashjointablesize; + bool hashdone; +} HashJoin; + +/* --------------- + * aggregate node + * --------------- + */ +typedef struct Agg { + Plan plan; + int numAgg; + Aggreg **aggs; + AggState *aggstate; +} Agg; + +/* --------------- + * group node - + * use for queries with GROUP BY specified. + * + * If tuplePerGroup is true, one tuple (with group columns only) is + * returned for each group and NULL is returned when there are no more + * groups. Otherwise, all the tuples of a group are returned with a + * NULL returned at the end of each group. (see nodeGroup.c for details) + * --------------- + */ +typedef struct Group { + Plan plan; + bool tuplePerGroup; /* what tuples to return (see above) */ + int numCols; /* number of group columns */ + AttrNumber *grpColIdx; /* index into the target list */ + GroupState *grpstate; +} Group; + +/* + * ========== + * Temp nodes + * ========== + */ +typedef struct Temp { + Plan plan; + Oid tempid; + int keycount; +} Temp; + +/* ---------------- + * materialization node + * ---------------- + */ +typedef struct Material { + Plan plan; /* temp node flattened out */ + Oid tempid; + int keycount; + MaterialState *matstate; +} Material; + +/* ---------------- + * sort node + * ---------------- + */ +typedef struct Sort { + Plan plan; /* temp node flattened out */ + Oid tempid; + int keycount; + SortState *sortstate; +} Sort; + +/* ---------------- + * unique node + * ---------------- + */ +typedef struct Unique { + Plan plan; /* temp node flattened out */ + Oid tempid; + int keycount; + char *uniqueAttr; /* NULL if all attrs, + or unique attribute name */ + AttrNumber uniqueAttrNum; /* attribute number of attribute + to select distinct on */ + UniqueState *uniquestate; +} Unique; + +/* ---------------- + * hash build node + * ---------------- + */ +typedef struct Hash { + Plan plan; + Var *hashkey; + HashState *hashstate; + HashJoinTable hashtable; + IpcMemoryKey hashtablekey; + int hashtablesize; +} Hash; + +/* --------------------- + * choose node + * --------------------- + */ +typedef struct Choose { + Plan plan; + List *chooseplanlist; +} Choose; + +/* ------------------- + * Tee node information + * + * leftParent : the left parent of this node + * rightParent: the right parent of this node + * ------------------- +*/ +typedef struct Tee { + Plan plan; + Plan* leftParent; + Plan* rightParent; + TeeState *teestate; + char *teeTableName; /* the name of the table to materialize + the tee into */ + List *rtentries; /* the range table for the plan below the Tee + may be different than the parent plans */ +} Tee; + +#endif /* PLANNODES_H */ diff --git a/src/backend/nodes/primnodes.h b/src/backend/nodes/primnodes.h new file mode 100644 index 00000000000..7ceb9c6101f --- /dev/null +++ b/src/backend/nodes/primnodes.h @@ -0,0 +1,318 @@ +/*------------------------------------------------------------------------- + * + * primnodes.h-- + * Definitions for parse tree/query tree ("primitive") nodes. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: primnodes.h,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PRIMNODES_H +#define PRIMNODES_H + +#include "postgres.h" + +#include "access/attnum.h" +#include "storage/buf.h" +#include "utils/rel.h" +#include "utils/fcache.h" +#include "nodes/params.h" + +#include "nodes/nodes.h" +#include "nodes/pg_list.h" + +/* ---------------------------------------------------------------- + * node definitions + * ---------------------------------------------------------------- + */ + +/* ---------------- + * Resdom (Result Domain) + * resno - attribute number + * restype - type of the resdom + * reslen - length (in bytes) of the result + * resname - name of the resdom (could be NULL) + * reskey - order of key in a sort (for those > 0) + * reskeyop - sort operator Oid + * resjunk - set to nonzero to eliminate the attribute + * from final target list e.g., ctid for replace + * and delete + * + * ---------------- + */ +typedef struct Resdom { + NodeTag type; + AttrNumber resno; + Oid restype; + int reslen; + char *resname; + Index reskey; + Oid reskeyop; + int resjunk; +} Resdom; + +/* ------------- + * Fjoin + * initialized - true if the Fjoin has already been initialized for + * the current target list evaluation + * nNodes - The number of Iter nodes returning sets that the + * node will flatten + * outerList - 1 or more Iter nodes + * inner - exactly one Iter node. We eval every node in the + * outerList once then eval the inner node to completion + * pair the outerList result vector with each inner + * result to form the full result. When the inner has + * been exhausted, we get the next outer result vector + * and reset the inner. + * results - The complete (flattened) result vector + * alwaysNull - a null vector to indicate sets with a cardinality of + * 0, we treat them as the set {NULL}. + */ +typedef struct Fjoin { + NodeTag type; + bool fj_initialized; + int fj_nNodes; + List *fj_innerNode; + DatumPtr fj_results; + BoolPtr fj_alwaysDone; +} Fjoin; + +/* ---------------- + * Expr + * typeOid - oid of the type of this expression + * opType - type of this expression + * oper - the Oper node if it is an OPER_EXPR or the + * Func node if it is a FUNC_EXPR + * args - arguments to this expression + * ---------------- + */ +typedef enum OpType { + OP_EXPR, FUNC_EXPR, OR_EXPR, AND_EXPR, NOT_EXPR +} OpType; + +typedef struct Expr { + NodeTag type; + Oid typeOid; /* oid of the type of this expr */ + OpType opType; /* type of the op */ + Node *oper; /* could be Oper or Func */ + List *args; /* list of argument nodes */ +} Expr; + +/* ---------------- + * Var + * varno - index of this var's relation in the range table + * (could be INNER or OUTER) + * varattno - attribute number of this var + * vartype - pg_type tuple oid for the type of this var + * varnoold - keep varno around in case it got changed to INNER/ + * OUTER (see match_varid) + * varoattno - attribute number of this var + * [ '(varnoold varoattno) was varid -ay 2/95] + * ---------------- + */ +#define INNER 65000 +#define OUTER 65001 + +#define PRS2_CURRENT_VARNO 1 +#define PRS2_NEW_VARNO 2 + +typedef struct Var { + NodeTag type; + Index varno; + AttrNumber varattno; + Oid vartype; + Index varnoold; /* only used by optimizer */ + AttrNumber varoattno; /* only used by optimizer */ +} Var; + +/* ---------------- + * Oper + * opno - PG_OPERATOR OID of the operator + * opid - PG_PROC OID for the operator + * opresulttype - PG_TYPE OID of the operator's return value + * opsize - size of return result (cached by executor) + * op_fcache - XXX comment me. + * + * ---- + * NOTE: in the good old days 'opno' used to be both (or either, or + * neither) the pg_operator oid, and/or the pg_proc oid depending + * on the postgres module in question (parser->pg_operator, + * executor->pg_proc, planner->both), the mood of the programmer, + * and the phase of the moon (rumors that it was also depending on the day + * of the week are probably false). To make things even more postgres-like + * (i.e. a mess) some comments were referring to 'opno' using the name + * 'opid'. Anyway, now we have two separate fields, and of course that + * immediately removes all bugs from the code... [ sp :-) ]. + * ---------------- + */ +typedef struct Oper { + NodeTag type; + Oid opno; + Oid opid; + Oid opresulttype; + int opsize; + FunctionCachePtr op_fcache; +} Oper; + + +/* ---------------- + * Const + * consttype - PG_TYPE OID of the constant's value + * constlen - length in bytes of the constant's value + * constvalue - the constant's value + * constisnull - whether the constant is null + * (if true, the other fields are undefined) + * constbyval - whether the information in constvalue + * if passed by value. If true, then all the information + * is stored in the datum. If false, then the datum + * contains a pointer to the information. + * constisset - whether the const represents a set. The const + * value corresponding will be the query that defines + * the set. + * ---------------- + */ +typedef struct Const { + NodeTag type; + Oid consttype; + Size constlen; + Datum constvalue; + bool constisnull; + bool constbyval; + bool constisset; +} Const; + +/* ---------------- + * Param + * paramkind - specifies the kind of parameter. The possible values + * for this field are specified in "params.h", and they are: + * + * PARAM_NAMED: The parameter has a name, i.e. something + * like `$.salary' or `$.foobar'. + * In this case field `paramname' must be a valid Name. + * + * PARAM_NUM: The parameter has only a numeric identifier, + * i.e. something like `$1', `$2' etc. + * The number is contained in the `paramid' field. + * + * PARAM_NEW: Used in PRS2 rule, similar to PARAM_NAMED. + * The `paramname' and `paramid' refer to the "NEW" tuple + * The `pramname' is the attribute name and `paramid' + * is the attribute number. + * + * PARAM_OLD: Same as PARAM_NEW, but in this case we refer to + * the "OLD" tuple. + * + * paramid - numeric identifier for literal-constant parameters ("$1") + * paramname - attribute name for tuple-substitution parameters ("$.foo") + * paramtype - PG_TYPE OID of the parameter's value + * param_tlist - allows for projection in a param node. + * ---------------- + */ +typedef struct Param { + NodeTag type; + int paramkind; + AttrNumber paramid; + char *paramname; + Oid paramtype; + List *param_tlist; +} Param; + + +/* ---------------- + * Func + * funcid - PG_FUNCTION OID of the function + * functype - PG_TYPE OID of the function's return value + * funcisindex - the function can be evaluated by scanning an index + * (set during query optimization) + * funcsize - size of return result (cached by executor) + * func_fcache - runtime state while running this function. Where + * we are in the execution of the function if it + * returns more than one value, etc. + * See utils/fcache.h + * func_tlist - projection of functions returning tuples + * func_planlist - result of planning this func, if it's a PQ func + * ---------------- + */ +typedef struct Func { + NodeTag type; + Oid funcid; + Oid functype; + bool funcisindex; + int funcsize; + FunctionCachePtr func_fcache; + List *func_tlist; + List *func_planlist; +} Func; + +/* ---------------- + * Aggreg + * aggname - name of the aggregate + * basetype - base type Oid of the aggregate + * aggtype - type Oid of final result of the aggregate + * query - XXX comment me + * target - XXX comment me + * ---------------- + */ +typedef struct Aggreg { + NodeTag type; + char *aggname; + Oid basetype; /* base type of the aggregate */ + Oid aggtype; /* type of final result */ + Node *target; /* attribute to aggreg on */ + int aggno; /* index to ecxt_values */ +} Aggreg; + +/* ---------------- + * Array + * arrayelemtype - base type of the array's elements (homogenous!) + * arrayelemlength - length of that type + * arrayelembyval - can you pass this element by value? + * arrayndim - number of dimensions of the array + * arraylow - base for array indexing + * arrayhigh - limit for array indexing + * arraylen - + * ---------------- + * + * memo from mao: the array support we inherited from 3.1 is just + * wrong. when time exists, we should redesign this stuff to get + * around a bunch of unfortunate implementation decisions made there. + */ +typedef struct Array { + NodeTag type; + Oid arrayelemtype; + int arrayelemlength; + bool arrayelembyval; + int arrayndim; + IntArray arraylow; + IntArray arrayhigh; + int arraylen; +} Array; + +/* ---------------- + * ArrayRef: + * refelemtype - type of the element referenced here + * refelemlength - length of that type + * refelembyval - can you pass this element type by value? + * refupperindexpr - expressions that evaluate to upper array index + * reflowerexpr- the expressions that evaluate to a lower array index + * refexpr - the expression that evaluates to an array + * refassignexpr- the expression that evaluates to the new value + * to be assigned to the array in case of replace. + * ---------------- + */ +typedef struct ArrayRef { + NodeTag type; + int refattrlength; + int refelemlength; + Oid refelemtype; + bool refelembyval; + List *refupperindexpr; + List *reflowerindexpr; + Node *refexpr; + Node *refassgnexpr; +} ArrayRef; + +#endif /* PRIMNODES_H */ diff --git a/src/backend/nodes/print.c b/src/backend/nodes/print.c new file mode 100644 index 00000000000..0297b445b10 --- /dev/null +++ b/src/backend/nodes/print.c @@ -0,0 +1,377 @@ +/*------------------------------------------------------------------------- + * + * print.c-- + * various print routines (used mostly for debugging) + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/print.c,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Oct 26, 1994 file creation + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "access/printtup.h" +#include "nodes/pg_list.h" +#include "nodes/execnodes.h" +#include "nodes/parsenodes.h" + +#include "parser/parsetree.h" +#include "parser/catalog_utils.h" +#include "access/heapam.h" +#include "utils/lsyscache.h" +#include "nodes/nodes.h" +#include "nodes/plannodes.h" +#include "optimizer/clauses.h" +/* + * print-- + * print contents of Node to stdout + */ +void +print(void *obj) +{ + char *s; + + s = nodeToString(obj); + printf("%s\n", s); + fflush(stdout); + return; +} + +/* + * pretty print hack extraordinaire. -ay 10/94 + */ +void +pprint(void *obj) +{ + char *s; + int i; + char line[80]; + int indentLev; + int j; + + s = nodeToString(obj); + + indentLev = 0; + i = 0; + for(;;) { + for(j=0; jrelname,rte->refname,rte->relid, + rte->inFromCl, + (rte->inh?"inh":"")); + i++; + } +} + + +/* + * print_expr-- + * print an expression + */ +void +print_expr(Node *expr, List *rtable) +{ + if (expr==NULL) { + printf("nil"); + return; + } + + if (IsA(expr,Var)) { + Var *var = (Var*)expr; + RangeTblEntry *rt; + char *relname, *attname; + + switch (var->varno) { + case INNER: + relname = "INNER"; + attname = "?"; + break; + case OUTER: + relname = "OUTER"; + attname = "?"; + break; + default: + { + Relation r; + rt = rt_fetch(var->varno, rtable); + relname = rt->relname; + r = heap_openr(relname); + if (rt->refname) + relname = rt->refname; /* table renamed */ + attname = getAttrName(r, var->varattno); + heap_close(r); + } + break; + } + printf("%s.%s",relname,attname); + } else if (IsA(expr,Expr)) { + Expr *e = (Expr*)expr; + if (is_opclause(expr)) { + char *opname; + + print_expr((Node*)get_leftop(e), rtable); + opname = get_opname(((Oper*)e->oper)->opno); + printf(" %s ", opname); + print_expr((Node*)get_rightop(e), rtable); + } else { + printf("an expr"); + } + } else { + printf("not an expr"); + } +} + +/* + * print_keys - + * temporary here. where is keys list of list?? + */ +void +print_keys(List *keys, List *rtable) +{ + List *k; + + printf("("); + foreach(k, keys) { + Node *var = lfirst((List*)lfirst(k)); + print_expr(var, rtable); + if (lnext(k)) printf(", "); + } + printf(")\n"); +} + +/* + * print_tl -- + * print targetlist in a more legible way. + */ +void +print_tl(List *tlist, List *rtable) +{ + List *tl; + + printf("(\n"); + foreach(tl, tlist) { + TargetEntry *tle = lfirst(tl); + + printf("\t%d %s\t", tle->resdom->resno, tle->resdom->resname); + if (tle->resdom->reskey!=0) { + printf("(%d):\t", tle->resdom->reskey); + } else { + printf(" :\t"); + } + print_expr(tle->expr, rtable); + printf("\n"); + } + printf(")\n"); +} + +/* + * print_slot-- + * print out the tuple with the given TupleTableSlot + */ +void +print_slot(TupleTableSlot *slot) +{ + if (!slot->val) { + printf("tuple is null.\n"); + return; + } + if (!slot->ttc_tupleDescriptor) { + printf("no tuple descriptor.\n"); + return; + } + + debugtup(slot->val, slot->ttc_tupleDescriptor); +} + +char* +plannode_type (Plan* p) +{ + switch(nodeTag(p)) { + case T_Plan: + return "PLAN"; + break; + case T_Existential: + return "EXISTENTIAL"; + break; + case T_Result: + return "RESULT"; + break; + case T_Append: + return "APPEND"; + break; + case T_Scan: + return "SCAN"; + break; + case T_SeqScan: + return "SEQSCAN"; + break; + case T_IndexScan: + return "INDEXSCAN"; + break; + case T_Join: + return "JOIN"; + break; + case T_NestLoop: + return "NESTLOOP"; + break; + case T_MergeJoin: + return "MERGEJOIN"; + break; + case T_HashJoin: + return "HASHJOIN"; + break; + case T_Temp: + return "TEMP"; + break; + case T_Material: + return "MATERIAL"; + break; + case T_Sort: + return "SORT"; + break; + case T_Agg: + return "AGG"; + break; + case T_Unique: + return "UNIQUE"; + break; + case T_Hash: + return "HASH"; + break; + case T_Tee: + return "TEE"; + break; + case T_Choose: + return "CHOOSE"; + break; + case T_Group: + return "GROUP"; + break; + default: + return "UNKNOWN"; + break; + } +} +/* + prints the ascii description of the plan nodes + does this recursively by doing a depth-first traversal of the + plan tree. for SeqScan and IndexScan, the name of the table is also + printed out + +*/ +void +print_plan_recursive (Plan* p, Query *parsetree, int indentLevel, char* label) +{ + int i; + char extraInfo[100]; + + if (!p) + return; + for (i=0;icost, p->plan_size, p->plan_width); + if (IsA(p,Scan) || IsA(p,SeqScan)) { + RangeTblEntry *rte; + rte = rt_fetch(((Scan*)p)->scanrelid, parsetree->rtable); + strncpy(extraInfo, rte->relname, NAMEDATALEN); + extraInfo[NAMEDATALEN] = '\0'; + } else + if (IsA(p,IndexScan)) { + strncpy(extraInfo, + ((RangeTblEntry*)(nth(((IndexScan*)p)->scan.scanrelid - 1, + parsetree->rtable)))->relname, + NAMEDATALEN); + extraInfo[NAMEDATALEN] = '\0'; + } else + extraInfo[0] = '\0'; + if (extraInfo[0] != '\0') + printf(" ( %s )\n", extraInfo); + else + printf("\n"); + print_plan_recursive(p->lefttree, parsetree, indentLevel + 3, "l: "); + print_plan_recursive(p->righttree, parsetree, indentLevel + 3, "r: "); +} + +/* print_plan + prints just the plan node types */ + +void +print_plan (Plan* p, Query* parsetree) +{ + print_plan_recursive(p, parsetree, 0, ""); +} + + diff --git a/src/backend/nodes/read.c b/src/backend/nodes/read.c new file mode 100644 index 00000000000..dbcc59a6f93 --- /dev/null +++ b/src/backend/nodes/read.c @@ -0,0 +1,270 @@ +/*------------------------------------------------------------------------- + * + * read.c-- + * routines to convert a string (legal ascii representation of node) back + * to nodes + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/read.c,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Nov 2, 1994 file creation + * + *------------------------------------------------------------------------- + */ +#include +#include +#include +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/readfuncs.h" +#include "utils/elog.h" + +/* + * stringToNode - + * returns a Node with a given legal ascii representation + */ +void * +stringToNode(char *str) +{ + void *retval; + + (void) lsptok(str, NULL); /* set the string used in lsptok */ + retval = nodeRead(true); /* start reading */ + + return retval; +} + +/***************************************************************************** + * + * the lisp token parser + * + *****************************************************************************/ + +#define RIGHT_PAREN (1000000 + 1) +#define LEFT_PAREN (1000000 + 2) +#define PLAN_SYM (1000000 + 3) +#define AT_SYMBOL (1000000 + 4) +#define ATOM_TOKEN (1000000 + 5) + +/* + * nodeTokenType - + * returns the type of the node token contained in token. + * It returns one of the following valid NodeTags: + * T_Integer, T_Float, T_String + * and some of its own: + * RIGHT_PAREN, LEFT_PAREN, PLAN_SYM, AT_SYMBOL, ATOM_TOKEN + * + * Assumption: the ascii representation is legal + */ +static NodeTag +nodeTokenType(char *token, int length) +{ + NodeTag retval; + + /* + * Check if the token is a number (decimal or integer, + * positive or negative + */ + if (isdigit(*token) || + (length>=2 && *token=='-' && isdigit(*(token+1)) )) + { + /* + * skip the optional '-' (i.e. negative number) + */ + if (*token == '-') { + token++; + } + + /* + * See if there is a decimal point + */ + + for (; length && *token != '.'; token++, length--); + + /* + * if there isn't, token's an int, otherwise it's a float. + */ + + retval = (*token != '.') ? T_Integer : T_Float; + } + else if (isalpha(*token)) + retval = ATOM_TOKEN; + else if (*token == '(') + retval = LEFT_PAREN; + else if (*token == ')') + retval = RIGHT_PAREN; + else if (*token == '@') + retval = AT_SYMBOL; + else if (*token == '\"') + retval = T_String; + else if (*token == '{') + retval = PLAN_SYM; + return(retval); +} + +/* + * Works kinda like strtok, except it doesn't put nulls into string. + * + * Returns the length in length instead. The string can be set without + * returning a token by calling lsptok with length == NULL. + * + */ +char * +lsptok(char *string, int *length) +{ + static char *local_str; + char *ret_string; + + if (string != NULL) { + local_str = string; + if (length == NULL) { + return(NULL); + } + } + + for (; *local_str == ' ' + || *local_str == '\n' + || *local_str == '\t'; local_str++); + + /* + * Now pointing at next token. + */ + ret_string = local_str; + if (*local_str == '\0') return(NULL); + *length = 1; + + if (*local_str == '\"') { + for (local_str++; *local_str != '\"'; (*length)++, local_str++); + (*length)++; local_str++; + }else if (*local_str == ')' || *local_str == '(' || + *local_str == '}' || *local_str == '{') { + local_str++; + }else { + for (; *local_str != ' ' + && *local_str != '\n' + && *local_str != '\t' + && *local_str != '{' + && *local_str != '}' + && *local_str != '(' + && *local_str != ')'; local_str++, (*length)++); + (*length)--; + } + return(ret_string); +} + +/* + * This guy does all the reading. + * + * Secrets: He assumes that lsptok already has the string (see below). + * Any callers should set read_car_only to true. + */ +void * +nodeRead(bool read_car_only) +{ + char *token; + NodeTag type; + Node *this_value, *return_value; + int tok_len; + char tmp; + bool make_dotted_pair_cell = false; + + token = lsptok(NULL, &tok_len); + + if (token == NULL) return(NULL); + + type = nodeTokenType(token, tok_len); + + switch(type) { + case PLAN_SYM: + this_value = parsePlanString(); + token = lsptok(NULL, &tok_len); + if (token[0] != '}') return(NULL); + + if (!read_car_only) + make_dotted_pair_cell = true; + else + make_dotted_pair_cell = false; + break; + case LEFT_PAREN: + if (!read_car_only) { + List *l = makeNode(List); + + lfirst(l) = nodeRead(false); + lnext(l) = nodeRead(false); + this_value = (Node*)l; + }else { + this_value = nodeRead(false); + } + break; + case RIGHT_PAREN: + this_value = NULL; + break; + case AT_SYMBOL: + break; + case ATOM_TOKEN: + if (!strncmp(token, "nil", 3)) { + this_value = NULL; + /* + * It might be "nil" but it is an atom! + */ + if (read_car_only) { + make_dotted_pair_cell = false; + } else { + make_dotted_pair_cell = true; + } + }else { + tmp = token[tok_len]; + token[tok_len] = '\0'; + this_value = (Node*)pstrdup(token); /* !attention! not a Node. + use with caution */ + token[tok_len] = tmp; + make_dotted_pair_cell = true; + } + break; + case T_Float: + tmp = token[tok_len]; + token[tok_len] = '\0'; + this_value = (Node*)makeFloat(atof(token)); + token[tok_len] = tmp; + make_dotted_pair_cell = true; + break; + case T_Integer: + tmp = token[tok_len]; + token[tok_len] = '\0'; + this_value = (Node*)makeInteger(atoi(token)); + token[tok_len] = tmp; + make_dotted_pair_cell = true; + break; + case T_String: + tmp = token[tok_len - 1]; + token[tok_len - 1] = '\0'; + token++; + this_value = (Node*)makeString(token); /* !! not strdup'd */ + token[tok_len - 2] = tmp; + make_dotted_pair_cell = true; + break; + default: + elog(WARN, "nodeRead: Bad type %d", type); + break; + } + if (make_dotted_pair_cell) { + List *l = makeNode(List); + + lfirst(l) = this_value; + if (!read_car_only) { + lnext(l) = nodeRead(false); + }else { + lnext(l) = NULL; + } + return_value = (Node*)l; + }else { + return_value = this_value; + } + return(return_value); +} + diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c new file mode 100644 index 00000000000..fc909fe7aa9 --- /dev/null +++ b/src/backend/nodes/readfuncs.c @@ -0,0 +1,1948 @@ +/*------------------------------------------------------------------------- + * + * readfuncs.c-- + * Reader functions for Postgres tree nodes. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/nodes/readfuncs.c,v 1.1.1.1 1996/07/09 06:21:33 scrappy Exp $ + * + * NOTES + * Most of the read functions for plan nodes are tested. (In fact, they + * pass the regression test as of 11/8/94.) The rest (for path selection) + * are probably never used. No effort has been made to get them to work. + * The simplest way to test these functions is by doing the following in + * ProcessQuery (before executing the plan): + * plan = stringToNode(nodeToString(plan)); + * Then, run the regression test. Let's just say you'll notice if either + * of the above function are not properly done. + * - ay 11/94 + * + *------------------------------------------------------------------------- + */ +#include +#include +#include + +#include "postgres.h" + +#include "access/heapam.h" +#include "access/htup.h" +#include "fmgr.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +#include "utils/lsyscache.h" +#include "utils/syscache.h" +#include "catalog/pg_type.h" + +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/execnodes.h" +#include "nodes/relation.h" +#include "nodes/readfuncs.h" + +/* ---------------- + * node creator declarations + * ---------------- + */ + +static Datum readDatum(Oid type); + +static List *toIntList(List *list) +{ + List *l; + foreach(l, list) { + /* ugly manipulation, should probably free the Value node too */ + lfirst(l) = (void*)intVal(lfirst(l)); + } + return list; +} + +/* ---------------- + * _readQuery + * ---------------- + */ +static Query * +_readQuery() +{ + Query *local_node; + char *token; + int length; + + local_node = makeNode(Query); + + token = lsptok(NULL, &length); /* skip the :command */ + token = lsptok(NULL, &length); /* get the commandType */ + local_node->commandType = atoi(token); + + token = lsptok(NULL, &length); /* skip the :utility */ + token = lsptok(NULL, &length); /* get the notify name if any*/ + if (token[0] == '"' && token[1] == '"') + local_node->utilityStmt = NULL; + else { + NotifyStmt *n = makeNode(NotifyStmt); + n->relname = palloc(length + 1); + strncpy(n->relname,token,length); + n->relname[length] = '\0'; + local_node->utilityStmt = (Node*)n; + } + + token = lsptok(NULL, &length); /* skip the :resrel */ + token = lsptok(NULL, &length); /* get the resultRelation */ + local_node->resultRelation = atoi(token); + + token = lsptok(NULL, &length); /* skip :rtable */ + local_node->rtable = nodeRead(true); + + token = lsptok(NULL, &length); /* skip the :unique */ + token = lsptok(NULL, &length); /* get the uniqueFlag */ +/* local_node->uniqueFlag = (bool)atoi(token); */ + if (token[0]=='"' && token[1] == '"') /* non-unique */ + local_node->uniqueFlag = NULL; + else { + local_node->uniqueFlag = palloc(length + 1); + strncpy(local_node->uniqueFlag,token,length); + local_node->uniqueFlag[length] = '\0'; + } + + token = lsptok(NULL, &length); /* skip :targetlist */ + local_node->targetList = nodeRead(true); + + token = lsptok(NULL, &length); /* skip :qual */ + local_node->qual = nodeRead(true); + + return (local_node); +} + +/* ---------------- + * _getPlan + * ---------------- + */ +static void +_getPlan(Plan *node) +{ + char *token; + int length; + + token = lsptok(NULL, &length); /* first token is :cost */ + token = lsptok(NULL, &length); /* next is the actual cost */ + node->cost = (Cost) atof(token); + + token = lsptok(NULL, &length); /* skip the :size */ + token = lsptok(NULL, &length); /* get the plan_size */ + node->plan_size = atoi(token); + + token = lsptok(NULL, &length); /* skip the :width */ + token = lsptok(NULL, &length); /* get the plan_width */ + node->plan_width = atoi(token); + + token = lsptok(NULL, &length); /* eat the :state stuff */ + token = lsptok(NULL, &length); /* now get the state */ + + if (!strncmp(token, "nil", 3)) { + node->state = (EState*) NULL; + }else { /* Disgusting hack until I figure out what to do here */ + node->state = (EState*) ! NULL; + } + + token = lsptok(NULL, &length); /* eat :qptargetlist */ + node->targetlist = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :qpqual */ + node->qual = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :lefttree */ + node->lefttree = (Plan*) nodeRead(true); + + token = lsptok(NULL, &length); /* eat :righttree */ + node->righttree = (Plan*) nodeRead(true); + + return; +} + +/* + * Stuff from plannodes.h + */ + +/* ---------------- + * _readPlan + * ---------------- + */ +static Plan * +_readPlan() +{ + Plan *local_node; + + local_node = makeNode(Plan); + + _getPlan(local_node); + + return (local_node); +} + +/* ---------------- + * _readResult + * + * Does some obscene, possibly unportable, magic with + * sizes of things. + * ---------------- + */ +static Result * +_readResult() +{ + Result *local_node; + char *token; + int length; + + local_node = makeNode(Result); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :resconstantqual */ + local_node->resconstantqual = nodeRead(true); /* now read it */ + + return( local_node ); +} + +/* ---------------- + * _readExistential + * + * Existential nodes are only used by the planner. + * ---------------- + */ +static Existential * +_readExistential() +{ + Existential *local_node; + + local_node = makeNode(Existential); + + _getPlan((Plan*)local_node); + + return( local_node ); +} + +/* ---------------- + * _readAppend + * + * Append is a subclass of Plan. + * ---------------- + */ + +static Append * +_readAppend() +{ + Append *local_node; + char *token; + int length; + + local_node = makeNode(Append); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :unionplans */ + local_node->unionplans = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* eat :unionrelid */ + token = lsptok(NULL, &length); /* get unionrelid */ + local_node->unionrelid = atoi(token); + + token = lsptok(NULL, &length); /* eat :unionrtentries */ + local_node->unionrtentries = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _getJoin + * + * In case Join is not the same structure as Plan someday. + * ---------------- + */ +static void +_getJoin(Join *node) +{ + _getPlan((Plan*)node); +} + + +/* ---------------- + * _readJoin + * + * Join is a subclass of Plan + * ---------------- + */ +static Join * +_readJoin() +{ + Join *local_node; + + local_node = makeNode(Join); + + _getJoin(local_node); + + return( local_node ); +} + +/* ---------------- + * _readNestLoop + * + * NestLoop is a subclass of Join + * ---------------- + */ + +static NestLoop * +_readNestLoop() +{ + NestLoop *local_node; + + local_node = makeNode(NestLoop); + + _getJoin((Join*)local_node); + + return( local_node ); +} + +/* ---------------- + * _readMergeJoin + * + * MergeJoin is a subclass of Join + * ---------------- + */ +static MergeJoin * +_readMergeJoin() +{ + MergeJoin *local_node; + char *token; + int length; + + local_node = makeNode(MergeJoin); + + _getJoin((Join*)local_node); + token = lsptok(NULL, &length); /* eat :mergeclauses */ + local_node->mergeclauses = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* eat :mergesortop */ + token = lsptok(NULL, &length); /* get mergesortop */ + local_node->mergesortop = atol(token); + + return( local_node ); +} + +/* ---------------- + * _readHashJoin + * + * HashJoin is a subclass of Join. + * ---------------- + */ +static HashJoin * +_readHashJoin() +{ + HashJoin *local_node; + char *token; + int length; + + local_node = makeNode(HashJoin); + + _getJoin((Join*)local_node); + + token = lsptok(NULL, &length); /* eat :hashclauses */ + local_node->hashclauses = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* eat :hashjoinop */ + token = lsptok(NULL, &length); /* get hashjoinop */ + local_node->hashjoinop = atoi(token); + + token = lsptok(NULL, &length); /* eat :hashjointable */ + token = lsptok(NULL, &length); /* eat hashjointable */ + local_node->hashjointable = NULL; + + token = lsptok(NULL, &length); /* eat :hashjointablekey */ + token = lsptok(NULL, &length); /* eat hashjointablekey */ + local_node->hashjointablekey = 0; + + token = lsptok(NULL, &length); /* eat :hashjointablesize */ + token = lsptok(NULL, &length); /* eat hashjointablesize */ + local_node->hashjointablesize = 0; + + token = lsptok(NULL, &length); /* eat :hashdone */ + token = lsptok(NULL, &length); /* eat hashdone */ + local_node->hashdone = false; + + return( local_node ); +} + +/* ---------------- + * _getScan + * + * Scan is a subclass of Node + * (Actually, according to the plannodes.h include file, it is a + * subclass of Plan. This is why _getPlan is used here.) + * + * Scan gets its own get function since stuff inherits it. + * ---------------- + */ +static void +_getScan(Scan *node) +{ + char *token; + int length; + + _getPlan((Plan*)node); + + token = lsptok(NULL, &length); /* eat :scanrelid */ + token = lsptok(NULL, &length); /* get scanrelid */ + node->scanrelid = atoi(token); +} + +/* ---------------- + * _readScan + * + * Scan is a subclass of Plan (Not Node, see above). + * ---------------- + */ +static Scan * +_readScan() +{ + Scan *local_node; + + local_node = makeNode(Scan); + + _getScan(local_node); + + return(local_node); +} + +/* ---------------- + * _readSeqScan + * + * SeqScan is a subclass of Scan + * ---------------- + */ +static SeqScan * +_readSeqScan() +{ + SeqScan *local_node; + + local_node = makeNode(SeqScan); + + _getScan((Scan*)local_node); + + return(local_node); +} + +/* ---------------- + * _readIndexScan + * + * IndexScan is a subclass of Scan + * ---------------- + */ +static IndexScan * +_readIndexScan() +{ + IndexScan *local_node; + char *token; + int length; + + local_node = makeNode(IndexScan); + + _getScan((Scan*)local_node); + + token = lsptok(NULL, &length); /* eat :indxid */ + local_node->indxid = + toIntList(nodeRead(true)); /* now read it */ + + token = lsptok(NULL, &length); /* eat :indxqual */ + local_node->indxqual = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readTemp + * + * Temp is a subclass of Plan + * ---------------- + */ +static Temp * +_readTemp() +{ + Temp *local_node; + char *token; + int length; + + local_node = makeNode(Temp); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :tempid */ + token = lsptok(NULL, &length); /* get tempid */ + local_node->tempid = atol(token); + + token = lsptok(NULL, &length); /* eat :keycount */ + token = lsptok(NULL, &length); /* get keycount */ + local_node->keycount = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readSort + * + * Sort is a subclass of Temp + * ---------------- + */ +static Sort * +_readSort() +{ + Sort *local_node; + char *token; + int length; + + local_node = makeNode(Sort); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :tempid */ + token = lsptok(NULL, &length); /* get tempid */ + local_node->tempid = atol(token); + + token = lsptok(NULL, &length); /* eat :keycount */ + token = lsptok(NULL, &length); /* get keycount */ + local_node->keycount = atoi(token); + + return(local_node); +} + +static Agg * +_readAgg() +{ + Agg *local_node; + char *token; + int length; + + local_node = makeNode(Agg); + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :numagg */ + token = lsptok(NULL, &length); /* get numagg */ + local_node->numAgg = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readUnique + * + * For some reason, unique is a subclass of Temp. + */ +static Unique * +_readUnique() +{ + Unique *local_node; + char *token; + int length; + + local_node = makeNode(Unique); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :tempid */ + token = lsptok(NULL, &length); /* get :tempid */ + local_node->tempid = atol(token); + + token = lsptok(NULL, &length); /* eat :keycount */ + token = lsptok(NULL, &length); /* get :keycount */ + local_node->keycount = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readHash + * + * Hash is a subclass of Temp + * ---------------- + */ +static Hash * +_readHash() +{ + Hash *local_node; + char *token; + int length; + + local_node = makeNode(Hash); + + _getPlan((Plan*)local_node); + + token = lsptok(NULL, &length); /* eat :hashkey */ + local_node->hashkey = (Var*) nodeRead(true); + + token = lsptok(NULL, &length); /* eat :hashtable */ + token = lsptok(NULL, &length); /* eat hashtable address*/ + local_node->hashtable = NULL; + + token = lsptok(NULL, &length); /* eat :hashtablekey*/ + token = lsptok(NULL, &length); /* get hashtablekey */ + local_node->hashtablekey = 0; + + token = lsptok(NULL, &length); /* eat :hashtablesize*/ + token = lsptok(NULL, &length); /* get hashtablesize */ + local_node->hashtablesize = 0; + + return(local_node); +} + +/* + * Stuff from primnodes.h. + */ + +/* ---------------- + * _readResdom + * + * Resdom is a subclass of Node + * ---------------- + */ +static Resdom * +_readResdom() +{ + Resdom *local_node; + char *token; + int length; + + local_node = makeNode(Resdom); + + token = lsptok(NULL, &length); /* eat :resno */ + token = lsptok(NULL, &length); /* get resno */ + local_node->resno = atoi(token); + + token = lsptok(NULL, &length); /* eat :restype */ + token = lsptok(NULL, &length); /* get restype */ + local_node->restype = atol(token); + + token = lsptok(NULL, &length); /* eat :reslen */ + token = lsptok(NULL, &length); /* get reslen */ + local_node->reslen = atoi(token); + + token = lsptok(NULL, &length); /* eat :resname */ + token = lsptok(NULL, &length); /* get the name */ + + if (!strncmp(token, "\"null\"", 5)) { + local_node->resname = NULL; + }else { + /* + * Peel off ""'s, then make a true copy. + */ + + token++; + token[length - 2] = '\0'; + + local_node->resname = palloc(length); + strcpy(local_node->resname, token); + token[length - 2] = '\"'; + } + + token = lsptok(NULL, &length); /* eat :reskey */ + token = lsptok(NULL, &length); /* get reskey */ + local_node->reskey = atoi(token); + + token = lsptok(NULL, &length); /* eat :reskeyop */ + token = lsptok(NULL, &length); /* get reskeyop */ + local_node->reskeyop = (Oid) atol(token); + + token = lsptok(NULL, &length); /* eat :resjunk */ + token = lsptok(NULL, &length); /* get resjunk */ + local_node->resjunk = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readExpr + * + * Expr is a subclass of Node + * ---------------- + */ +static Expr * +_readExpr() +{ + Expr *local_node; + char *token; + int length; + + local_node = makeNode(Expr); + + token = lsptok(NULL, &length); /* eat :typeOid */ + token = lsptok(NULL, &length); /* get typeOid */ + local_node->typeOid = (Oid)atol(token); + + token = lsptok(NULL, &length); /* eat :opType */ + token = lsptok(NULL, &length); /* get opType */ + if (!strncmp(token, "op", 2)) { + local_node->opType = OP_EXPR; + } else if (!strncmp(token, "func", 4)) { + local_node->opType = FUNC_EXPR; + } else if (!strncmp(token, "or", 2)) { + local_node->opType = OR_EXPR; + } else if (!strncmp(token, "and", 3)) { + local_node->opType = AND_EXPR; + } else if (!strncmp(token, "not", 3)) { + local_node->opType = NOT_EXPR; + } + + token = lsptok(NULL, &length); /* eat :oper */ + local_node->oper = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :args */ + local_node->args = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readVar + * + * Var is a subclass of Expr + * ---------------- + */ +static Var * +_readVar() +{ + Var *local_node; + char *token; + int length; + + local_node = makeNode(Var); + + token = lsptok(NULL, &length); /* eat :varno */ + token = lsptok(NULL, &length); /* get varno */ + local_node->varno = atoi(token); + + token = lsptok(NULL, &length); /* eat :varattno */ + token = lsptok(NULL, &length); /* get varattno */ + local_node->varattno = atoi(token); + + token = lsptok(NULL, &length); /* eat :vartype */ + token = lsptok(NULL, &length); /* get vartype */ + local_node->vartype = (Oid) atol(token); + + token = lsptok(NULL, &length); /* eat :varnoold */ + token = lsptok(NULL, &length); /* get varnoold */ + local_node->varnoold = (Oid) atol(token); + + token = lsptok(NULL, &length); /* eat :varoattno */ + token = lsptok(NULL, &length); /* eat :varoattno */ + local_node->varoattno = (int) atol(token); + + return(local_node); +} + +/* ---------------- + * _readArray + * + * Array is a subclass of Expr + * ---------------- + */ +static Array * +_readArray() +{ + Array *local_node; + char *token; + int length; + + local_node = makeNode(Array); + + token = lsptok(NULL, &length); /* eat :arrayelemtype */ + token = lsptok(NULL, &length); /* get arrayelemtype */ + local_node->arrayelemtype = (Oid) atoi(token); + + token = lsptok(NULL, &length); /* eat :arrayelemlength */ + token = lsptok(NULL, &length); /* get arrayelemlength */ + local_node->arrayelemlength = atoi(token); + + token = lsptok(NULL, &length); /* eat :arrayelembyval */ + token = lsptok(NULL, &length); /* get arrayelembyval */ + local_node->arrayelembyval = (token[0] == 't') ? true : false; + + token = lsptok(NULL, &length); /* eat :arraylow */ + token = lsptok(NULL, &length); /* get arraylow */ + local_node->arraylow.indx[0] = atoi(token); + + token = lsptok(NULL, &length); /* eat :arrayhigh */ + token = lsptok(NULL, &length); /* get arrayhigh */ + local_node->arrayhigh.indx[0] = atoi(token); + + token = lsptok(NULL, &length); /* eat :arraylen */ + token = lsptok(NULL, &length); /* get arraylen */ + local_node->arraylen = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readArrayRef + * + * ArrayRef is a subclass of Expr + * ---------------- + */ +static ArrayRef * +_readArrayRef() +{ + ArrayRef *local_node; + char *token; + int length; + + local_node = makeNode(ArrayRef); + + token = lsptok(NULL, &length); /* eat :refelemtype */ + token = lsptok(NULL, &length); /* get refelemtype */ + local_node->refelemtype = (Oid) atoi(token); + + token = lsptok(NULL, &length); /* eat :refattrlength */ + token = lsptok(NULL, &length); /* get refattrlength */ + local_node->refattrlength = atoi(token); + + token = lsptok(NULL, &length); /* eat :refelemlength */ + token = lsptok(NULL, &length); /* get refelemlength */ + local_node->refelemlength = atoi(token); + + token = lsptok(NULL, &length); /* eat :refelembyval */ + token = lsptok(NULL, &length); /* get refelembyval */ + local_node->refelembyval = (token[0] == 't') ? true : false; + + token = lsptok(NULL, &length); /* eat :refupperindex */ + local_node->refupperindexpr = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :reflowerindex */ + local_node->reflowerindexpr = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :refexpr */ + local_node->refexpr = nodeRead(true); + + token = lsptok(NULL, &length); /* eat :refassgnexpr */ + local_node->refassgnexpr = nodeRead(true); + + return(local_node); +} + +/* ---------------- + * _readConst + * + * Const is a subclass of Expr + * ---------------- + */ +static Const * +_readConst() +{ + Const *local_node; + char *token; + int length; + + local_node = makeNode(Const); + + token = lsptok(NULL, &length); /* get :consttype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->consttype = atol(token); + + + token = lsptok(NULL, &length); /* get :constlen */ + token = lsptok(NULL, &length); /* now read it */ + local_node->constlen = atoi(token); + + token = lsptok(NULL, &length); /* get :constisnull */ + token = lsptok(NULL, &length); /* now read it */ + + if (!strncmp(token, "true", 4)) { + local_node->constisnull = true; + }else { + local_node->constisnull = false; + } + + + token = lsptok(NULL, &length); /* get :constvalue */ + + if (local_node->constisnull) { + token = lsptok(NULL, &length); /* skip "NIL" */ + }else { + /* + * read the value + */ + local_node->constvalue = readDatum(local_node->consttype); + } + + token = lsptok(NULL, &length); /* get :constbyval */ + token = lsptok(NULL, &length); /* now read it */ + + if (!strncmp(token, "true", 4)) { + local_node->constbyval = true; + }else { + local_node->constbyval = false; + } + + return(local_node); +} + +/* ---------------- + * _readFunc + * + * Func is a subclass of Expr + * ---------------- + */ +static Func * +_readFunc() +{ + Func *local_node; + char *token; + int length; + + local_node = makeNode(Func); + + token = lsptok(NULL, &length); /* get :funcid */ + token = lsptok(NULL, &length); /* now read it */ + local_node->funcid = atol(token); + + token = lsptok(NULL, &length); /* get :functype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->functype = atol(token); + + token = lsptok(NULL, &length); /* get :funcisindex */ + token = lsptok(NULL, &length); /* now read it */ + + if (!strncmp(token, "true", 4)) { + local_node->funcisindex = true; + }else { + local_node->funcisindex = false; + } + + token = lsptok(NULL, &length); /* get :funcsize */ + token = lsptok(NULL, &length); /* now read it */ + local_node->funcsize = atol(token); + + token = lsptok(NULL, &length); /* get :func_fcache */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->func_fcache = (FunctionCache *) NULL; + + token = lsptok(NULL, &length); /* get :func_tlist */ + local_node->func_tlist = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :func_planlist */ + local_node->func_planlist = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readOper + * + * Oper is a subclass of Expr + * ---------------- + */ +static Oper * +_readOper() +{ + Oper *local_node; + char *token; + int length; + + local_node = makeNode(Oper); + + token = lsptok(NULL, &length); /* get :opno */ + token = lsptok(NULL, &length); /* now read it */ + local_node->opno = atol(token); + + token = lsptok(NULL, &length); /* get :opid */ + token = lsptok(NULL, &length); /* now read it */ + local_node->opid = atol(token); + + token = lsptok(NULL, &length); /* get :opresulttype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->opresulttype = atol(token); + + /* + * NOTE: Alternatively we can call 'replace_opid' + * which initializes both 'opid' and 'op_fcache'. + */ + local_node->op_fcache = (FunctionCache *) NULL; + + return(local_node); +} + +/* ---------------- + * _readParam + * + * Param is a subclass of Expr + * ---------------- + */ +static Param * +_readParam() +{ + Param *local_node; + char *token; + int length; + + local_node = makeNode(Param); + + token = lsptok(NULL, &length); /* get :paramkind */ + token = lsptok(NULL, &length); /* now read it */ + local_node->paramkind = atoi(token); + + token = lsptok(NULL, &length); /* get :paramid */ + token = lsptok(NULL, &length); /* now read it */ + local_node->paramid = atol(token); + + token = lsptok(NULL, &length); /* get :paramname */ + token = lsptok(NULL, &length); /* now read it */ + token++; /* skip the first `"' */ + token[length - 2] = '\0'; /* this is the 2nd `"' */ + + local_node->paramname = pstrdup(token); + token[length - 2] = '\"'; /* restore the 2nd `"' */ + + token = lsptok(NULL, &length); /* get :paramtype */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->paramtype = atol(token); + token = lsptok(NULL, &length); /* get :param_tlist */ + local_node->param_tlist = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* + * Stuff from execnodes.h + */ + +/* ---------------- + * _readEState + * + * EState is a subclass of Node. + * ---------------- + */ +static EState * +_readEState() +{ + EState *local_node; + char *token; + int length; + + local_node = makeNode(EState); + + token = lsptok(NULL, &length); /* get :direction */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->es_direction = atoi(token); + + token = lsptok(NULL, &length); /* get :range_table */ + + local_node->es_range_table = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :result_relation_info */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + sscanf(token, "%x", &local_node->es_result_relation_info); + + return(local_node); +} + +/* + * Stuff from relation.h + */ + +/* ---------------- + * _readRel + * ---------------- + */ +static Rel * +_readRel() +{ + Rel *local_node; + char *token; + int length; + + local_node = makeNode(Rel); + + token = lsptok(NULL, &length); /* get :relids */ + local_node->relids = + toIntList(nodeRead(true)); /* now read it */ + + token = lsptok(NULL, &length); /* get :indexed */ + token = lsptok(NULL, &length); /* now read it */ + + if (!strncmp(token, "true", 4)) + { + local_node->indexed = true; + } + else + { + local_node->indexed = false; + } + + token = lsptok(NULL, &length); /* get :pages */ + token = lsptok(NULL, &length); /* now read it */ + local_node->pages = (unsigned int) atoi(token); + + token = lsptok(NULL, &length); /* get :tuples */ + token = lsptok(NULL, &length); /* now read it */ + local_node->tuples = (unsigned int) atoi(token); + + token = lsptok(NULL, &length); /* get :size */ + token = lsptok(NULL, &length); /* now read it */ + local_node->size = (unsigned int) atoi(token); + + token = lsptok(NULL, &length); /* get :width */ + token = lsptok(NULL, &length); /* now read it */ + local_node->width = (unsigned int) atoi(token); + + token = lsptok(NULL, &length); /* get :targetlist */ + local_node->targetlist = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :pathlist */ + local_node->pathlist = nodeRead(true); /* now read it */ + + /* + * Not sure if these are nodes or not. They're declared as + * struct Path *. Since i don't know, i'll just print the + * addresses for now. This can be changed later, if necessary. + */ + + token = lsptok(NULL, &length); /* get :unorderpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + sscanf(token, "%x", &local_node->unorderedpath); + + token = lsptok(NULL, &length); /* get :cheapestpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + sscanf(token, "%x", &local_node->cheapestpath); + + + token = lsptok(NULL, &length); /* get :clauseinfo */ + local_node->clauseinfo = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :joininfo */ + local_node->joininfo = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :innerjoin */ + local_node->innerjoin = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readTargetEntry + * ---------------- + */ +static TargetEntry * +_readTargetEntry() +{ + TargetEntry *local_node; + char *token; + int length; + + local_node = makeNode(TargetEntry); + + token = lsptok(NULL, &length); /* get :resdom */ + local_node->resdom = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :expr */ + local_node->expr = nodeRead(true); /* now read it */ + + return (local_node); +} + +/* ---------------- + * _readTargetEntry + * ---------------- + */ +static RangeTblEntry * +_readRangeTblEntry() +{ + RangeTblEntry *local_node; + char *token; + int length; + + local_node = makeNode(RangeTblEntry); + + token = lsptok(NULL, &length); /* eat :relname */ + token = lsptok(NULL, &length); /* get :relname */ + if (!strncmp(token, "\"null\"", 5)) { + local_node->relname = NULL; + }else { + /* + * Peel off ""'s, then make a true copy. + */ + + token++; + token[length - 2] = '\0'; + + local_node->relname = (Name) palloc(NAMEDATALEN); + namestrcpy(local_node->relname, token); + token[length - 2] = '\"'; + } + + token = lsptok(NULL, &length); /* eat :inh */ + token = lsptok(NULL, &length); /* get :inh */ + local_node->inh = atoi(token); + + token = lsptok(NULL, &length); /* eat :refname */ + token = lsptok(NULL, &length); /* get :refname */ + if (!strncmp(token, "\"null\"", 5)) { + local_node->refname = NULL; + }else { + /* + * Peel off ""'s, then make a true copy. + */ + + token++; + token[length - 2] = '\0'; + + local_node->refname = (char*)pstrdup(token); + token[length - 2] = '\"'; + } + + token = lsptok(NULL, &length); /* eat :relid */ + token = lsptok(NULL, &length); /* get :relid */ + local_node->relid = atoi(token); + + return (local_node); +} + +/* ---------------- + * _readPath + * + * Path is a subclass of Node. + * ---------------- + */ +static Path * +_readPath() +{ + Path *local_node; + char *token; + int length; + + local_node = makeNode(Path); + + token = lsptok(NULL, &length); /* get :pathtype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->pathtype = atol(token); + + token = lsptok(NULL, &length); /* get :cost */ + token = lsptok(NULL, &length); /* now read it */ + local_node->path_cost = (Cost) atof(token); + +#if 0 + token = lsptok(NULL, &length); /* get :p_ordering */ + local_node->p_ordering = + nodeRead(true); /* now read it */ +#endif + + token = lsptok(NULL, &length); /* get :keys */ + local_node->keys = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readIndexPath + * + * IndexPath is a subclass of Path. + * ---------------- + */ +static IndexPath * +_readIndexPath() +{ + IndexPath *local_node; + char *token; + int length; + + local_node = makeNode(IndexPath); + + token = lsptok(NULL, &length); /* get :pathtype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->path.pathtype = atol(token); + + token = lsptok(NULL, &length); /* get :cost */ + token = lsptok(NULL, &length); /* now read it */ + local_node->path.path_cost = (Cost) atof(token); + +#if 0 + token = lsptok(NULL, &length); /* get :p_ordering */ + local_node->path.p_ordering = nodeRead(true); /* now read it */ +#endif + + token = lsptok(NULL, &length); /* get :keys */ + local_node->path.keys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :indexid */ + local_node->indexid = + toIntList(nodeRead(true)); + + token = lsptok(NULL, &length); /* get :indexqual */ + local_node->indexqual = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readJoinPath + * + * JoinPath is a subclass of Path + * ---------------- + */ +static JoinPath * +_readJoinPath() +{ + JoinPath *local_node; + char *token; + int length; + + + local_node = makeNode(JoinPath); + + token = lsptok(NULL, &length); /* get :pathtype */ + token = lsptok(NULL, &length); /* now read it */ + local_node->path.pathtype = atol(token); + + token = lsptok(NULL, &length); /* get :cost */ + token = lsptok(NULL, &length); /* now read it */ + local_node->path.path_cost = (Cost) atof(token); + +#if 0 + token = lsptok(NULL, &length); /* get :p_ordering */ + local_node->path.p_ordering = nodeRead(true); /* now read it */ +#endif + + token = lsptok(NULL, &length); /* get :keys */ + local_node->path.keys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :pathclauseinfo */ + local_node->pathclauseinfo = nodeRead(true); /* now read it */ + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + * + * GJK: Since I am parsing this stuff, I'll just ignore the addresses, + * and initialize these pointers to NULL. + */ + + token = lsptok(NULL, &length); /* get :outerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->outerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :innerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->innerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :outerjoincost */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->path.outerjoincost = (Cost) atof(token); + + token = lsptok(NULL, &length); /* get :joinid */ + local_node->path.joinid = + toIntList(nodeRead(true)); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readMergePath + * + * MergePath is a subclass of JoinPath. + * ---------------- + */ +static MergePath * +_readMergePath() +{ + MergePath *local_node; + char *token; + int length; + + local_node = makeNode(MergePath); + + token = lsptok(NULL, &length); /* get :pathtype */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.pathtype = atol(token); + + token = lsptok(NULL, &length); /* get :cost */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.path_cost = (Cost) atof(token); + +#if 0 + token = lsptok(NULL, &length); /* get :p_ordering */ + local_node->jpath.path.p_ordering = nodeRead(true); /* now read it */ +#endif + + token = lsptok(NULL, &length); /* get :keys */ + local_node->jpath.path.keys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :pathclauseinfo */ + local_node->jpath.pathclauseinfo = nodeRead(true); /* now read it */ + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + * + * GJK: Since I am parsing this stuff, I'll just ignore the addresses, + * and initialize these pointers to NULL. + */ + + token = lsptok(NULL, &length); /* get :outerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.outerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :innerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.innerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :outerjoincost */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.outerjoincost = (Cost) atof(token); + + token = lsptok(NULL, &length); /* get :joinid */ + local_node->jpath.path.joinid = + toIntList(nodeRead(true)); /* now read it */ + + token = lsptok(NULL, &length); /* get :path_mergeclauses */ + local_node->path_mergeclauses = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :outersortkeys */ + local_node->outersortkeys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :innersortkeys */ + local_node->innersortkeys = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readHashPath + * + * HashPath is a subclass of JoinPath. + * ---------------- + */ +static HashPath * +_readHashPath() +{ + HashPath *local_node; + char *token; + int length; + + local_node = makeNode(HashPath); + + token = lsptok(NULL, &length); /* get :pathtype */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.pathtype = atol(token); + + token = lsptok(NULL, &length); /* get :cost */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.path_cost = (Cost) atof(token); + +#if 0 + token = lsptok(NULL, &length); /* get :p_ordering */ + local_node->jpath.path.p_ordering = nodeRead(true); /* now read it */ +#endif + + token = lsptok(NULL, &length); /* get :keys */ + local_node->jpath.path.keys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :pathclauseinfo */ + local_node->jpath.pathclauseinfo = nodeRead(true); /* now read it */ + + /* + * Not sure if these are nodes; they're declared as "struct path *". + * For now, i'll just print the addresses. + * + * GJK: Since I am parsing this stuff, I'll just ignore the addresses, + * and initialize these pointers to NULL. + */ + + token = lsptok(NULL, &length); /* get :outerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.outerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :innerjoinpath */ + token = lsptok(NULL, &length); /* get @ */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.innerjoinpath = NULL; + + token = lsptok(NULL, &length); /* get :outerjoincost */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->jpath.path.outerjoincost = (Cost) atof(token); + + token = lsptok(NULL, &length); /* get :joinid */ + local_node->jpath.path.joinid = + toIntList(nodeRead(true)); /* now read it */ + + token = lsptok(NULL, &length); /* get :path_hashclauses */ + local_node->path_hashclauses = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :outerhashkeys */ + local_node->outerhashkeys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :innerhashkeys */ + local_node->innerhashkeys = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readOrderKey + * + * OrderKey is a subclass of Node. + * ---------------- + */ +static OrderKey * +_readOrderKey() +{ + OrderKey *local_node; + char *token; + int length; + + local_node = makeNode(OrderKey); + + token = lsptok(NULL, &length); /* get :attribute_number */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->attribute_number = atoi(token); + + token = lsptok(NULL, &length); /* get :array_index */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->array_index = atoi(token); + + return(local_node); +} + +/* ---------------- + * _readJoinKey + * + * JoinKey is a subclass of Node. + * ---------------- + */ +static JoinKey * +_readJoinKey() +{ + JoinKey *local_node; + char *token; + int length; + + local_node = makeNode(JoinKey); + + token = lsptok(NULL, &length); /* get :outer */ + local_node->outer = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :inner */ + local_node->inner = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readMergeOrder + * + * MergeOrder is a subclass of Node. + * ---------------- + */ +static MergeOrder * +_readMergeOrder() +{ + MergeOrder *local_node; + char *token; + int length; + + local_node = makeNode(MergeOrder); + token = lsptok(NULL, &length); /* get :join_operator */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->join_operator = atol(token); + + token = lsptok(NULL, &length); /* get :left_operator */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->left_operator = atol(token); + + token = lsptok(NULL, &length); /* get :right_operator */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->right_operator = atol(token); + + token = lsptok(NULL, &length); /* get :left_type */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->left_type = atol(token); + + token = lsptok(NULL, &length); /* get :right_type */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->right_type = atol(token); + + return(local_node); +} + +/* ---------------- + * _readCInfo + * + * CInfo is a subclass of Node. + * ---------------- + */ +static CInfo * +_readCInfo() +{ + CInfo *local_node; + char *token; + int length; + + local_node = makeNode(CInfo); + + token = lsptok(NULL, &length); /* get :clause */ + local_node->clause = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :selectivity */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->selectivity = atof(token); + + token = lsptok(NULL, &length); /* get :notclause */ + token = lsptok(NULL, &length); /* now read it */ + + if (!strncmp(token, "true", 4)) + { + local_node->notclause = true; + } + else + { + local_node->notclause = false; + } + + token = lsptok(NULL, &length); /* get :indexids */ + local_node->indexids = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :mergesortorder */ + local_node->mergesortorder = (MergeOrder*) nodeRead(true); + + token = lsptok(NULL, &length); /* get :hashjoinoperator */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->hashjoinoperator = atol(token); + + return(local_node); +} + +/* ---------------- + * _readJoinMethod + * + * JoinMethod is a subclass of Node. + * ---------------- + */ +static JoinMethod * +_readJoinMethod() +{ + JoinMethod *local_node; + char *token; + int length; + + local_node = makeNode(JoinMethod); + + token = lsptok(NULL, &length); /* get :jmkeys */ + local_node->jmkeys = nodeRead(true);/* now read it */ + + token = lsptok(NULL, &length); /* get :clauses */ + local_node->clauses = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readHInfo + * + * HInfo is a subclass of JoinMethod. + * ---------------- + */ +static HInfo * +_readHInfo() +{ + HInfo *local_node; + char *token; + int length; + + local_node = makeNode(HInfo); + + token = lsptok(NULL, &length); /* get :hashop */ + token = lsptok(NULL, &length); /* now read it */ + + local_node->hashop = atoi(token); + + token = lsptok(NULL, &length); /* get :jmkeys */ + local_node->jmethod.jmkeys = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :clauses */ + local_node->jmethod.clauses = nodeRead(true); /* now read it */ + + return(local_node); +} + +/* ---------------- + * _readJInfo() + * + * JInfo is a subclass of Node. + * ---------------- + */ +static JInfo * +_readJInfo() +{ + JInfo *local_node; + char *token; + int length; + + local_node = makeNode(JInfo); + + token = lsptok(NULL, &length); /* get :otherrels */ + local_node->otherrels = + toIntList(nodeRead(true)); /* now read it */ + + token = lsptok(NULL, &length); /* get :jinfoclauseinfo */ + local_node->jinfoclauseinfo = nodeRead(true); /* now read it */ + + token = lsptok(NULL, &length); /* get :mergesortable */ + + if (!strncmp(token, "true", 4)) + { + local_node->mergesortable = true; + } + else + { + local_node->mergesortable = false; + } + + token = lsptok(NULL, &length); /* get :hashjoinable */ + + if (!strncmp(token, "true", 4)) + { + local_node->hashjoinable = true; + } + else + { + local_node->hashjoinable = false; + } + + return(local_node); +} + +/* ---------------- + * _readIter() + * + * ---------------- + */ +static Iter * +_readIter() +{ + Iter *local_node; + char *token; + int length; + + local_node = makeNode(Iter); + + token = lsptok(NULL, &length); /* eat :iterexpr */ + local_node->iterexpr = nodeRead(true); /* now read it */ + + return(local_node); +} + + +/* ---------------- + * parsePlanString + * + * Given a character string containing a plan, parsePlanString sets up the + * plan structure representing that plan. + * + * The string passed to parsePlanString must be null-terminated. + * ---------------- + */ +Node * +parsePlanString() +{ + char *token; + int length; + void *return_value; + + token = lsptok(NULL, &length); + + if (!strncmp(token, "PLAN", 4)) { + return_value = _readPlan(); + }else if (!strncmp(token, "RESULT", 6)) { + return_value = _readResult(); + }else if (!strncmp(token, "EXISTENTIAL", 11)) { + return_value = _readExistential(); + }else if (!strncmp(token, "APPEND", 6)) { + return_value = _readAppend(); + }else if (!strncmp(token, "JOIN", 4)) { + return_value = _readJoin(); + }else if (!strncmp(token, "NESTLOOP", 8)) { + return_value = _readNestLoop(); + }else if (!strncmp(token, "MERGEJOIN", 9)) { + return_value = _readMergeJoin(); + }else if (!strncmp(token, "HASHJOIN", 8)) { + return_value = _readHashJoin(); + }else if (!strncmp(token, "SCAN", 4)) { + return_value = _readScan(); + }else if (!strncmp(token, "SEQSCAN", 7)) { + return_value = _readSeqScan(); + }else if (!strncmp(token, "INDEXSCAN", 9)) { + return_value = _readIndexScan(); + }else if (!strncmp(token, "TEMP", 4)) { + return_value = _readTemp(); + }else if (!strncmp(token, "SORT", 4)) { + return_value = _readSort(); + }else if (!strncmp(token, "AGG", 3)) { + return_value = _readAgg(); + }else if (!strncmp(token, "UNIQUE", 4)) { + return_value = _readUnique(); + }else if (!strncmp(token, "HASH", 4)) { + return_value = _readHash(); + }else if (!strncmp(token, "RESDOM", 6)) { + return_value = _readResdom(); + }else if (!strncmp(token, "EXPR", 4)) { + return_value = _readExpr(); + }else if (!strncmp(token, "ARRAYREF", 7)) { + /* make sure this strncmp is done before that of ARRAY */ + return_value = _readArrayRef(); + }else if (!strncmp(token, "ARRAY", 5)) { + return_value = _readArray(); + }else if (!strncmp(token, "VAR", 3)) { + return_value = _readVar(); + }else if (!strncmp(token, "CONST", 5)) { + return_value = _readConst(); + }else if (!strncmp(token, "FUNC", 4)) { + return_value = _readFunc(); + }else if (!strncmp(token, "OPER", 4)) { + return_value = _readOper(); + }else if (!strncmp(token, "PARAM", 5)) { + return_value = _readParam(); + }else if (!strncmp(token, "ESTATE", 6)) { + return_value = _readEState(); + }else if (!strncmp(token, "REL", 3)) { + return_value = _readRel(); + }else if (!strncmp(token, "TLE", 3)) { + return_value = _readTargetEntry(); + }else if (!strncmp(token, "RTE", 3)) { + return_value = _readRangeTblEntry(); + }else if (!strncmp(token, "PATH", 4)) { + return_value = _readPath(); + }else if (!strncmp(token, "INDEXPATH", 9)) { + return_value = _readIndexPath(); + }else if (!strncmp(token, "JOINPATH", 8)) { + return_value = _readJoinPath(); + }else if (!strncmp(token, "MERGEPATH", 9)) { + return_value = _readMergePath(); + }else if (!strncmp(token, "HASHPATH", 8)) { + return_value = _readHashPath(); + }else if (!strncmp(token, "ORDERKEY", 8)) { + return_value = _readOrderKey(); + }else if (!strncmp(token, "JOINKEY", 7)) { + return_value = _readJoinKey(); + }else if (!strncmp(token, "MERGEORDER", 10)) { + return_value = _readMergeOrder(); + }else if (!strncmp(token, "CINFO", 5)) { + return_value = _readCInfo(); + }else if (!strncmp(token, "JOINMETHOD", 10)) { + return_value = _readJoinMethod(); + }else if (!strncmp(token, "JINFO", 5)) { + return_value = _readJInfo(); + }else if (!strncmp(token, "HINFO", 5)) { + return_value = _readHInfo(); + }else if (!strncmp(token, "ITER", 4)) { + return_value = _readIter(); + }else if (!strncmp(token, "QUERY", 5)) { + return_value = _readQuery(); + }else { + elog(WARN, "badly formatted planstring \"%.10s\"...\n", token); + } + + return ((Node*)return_value); +} +/*------------------------------------------------------------*/ + +/* ---------------- + * readDatum + * + * given a string representation of the value of the given type, + * create the appropriate Datum + * ---------------- + */ +static Datum +readDatum(Oid type) +{ + int length; + int tokenLength; + char *token; + bool byValue; + Datum res; + char *s; + int i; + + byValue = get_typbyval(type); + + /* + * read the actual length of the value + */ + token = lsptok(NULL, &tokenLength); + length = atoi(token); + token = lsptok(NULL, &tokenLength); /* skip the '[' */ + + if (byValue) { + if (length > sizeof(Datum)) { + elog(WARN, "readValue: byval & length = %d", length); + } + s = (char *) (&res); + for (i=0; i= 1) { + s = (char*)palloc(length); + Assert( s!=NULL ); + for (i=0; iinactive=true*/ +/*#define joininfo_inactive(joininfo) joininfo->inactive */ + +#endif /* INTERNAL_H */ diff --git a/src/backend/optimizer/joininfo.h b/src/backend/optimizer/joininfo.h new file mode 100644 index 00000000000..beda9a67de0 --- /dev/null +++ b/src/backend/optimizer/joininfo.h @@ -0,0 +1,20 @@ +/*------------------------------------------------------------------------- + * + * joininfo.h-- + * prototypes for joininfo.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: joininfo.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef JOININFO_H +#define JOININFO_H + +extern JInfo *joininfo_member(List *join_relids, List *joininfo_list); +extern JInfo *find_joininfo_node(Rel *this_rel, List *join_relids); +extern Var *other_join_clause_var(Var *var, Expr *clause); + +#endif /* JOININFO_H */ diff --git a/src/backend/optimizer/keys.h b/src/backend/optimizer/keys.h new file mode 100644 index 00000000000..ac579089f51 --- /dev/null +++ b/src/backend/optimizer/keys.h @@ -0,0 +1,22 @@ +/*------------------------------------------------------------------------- + * + * keys.h-- + * prototypes for keys.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: keys.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef KEYS_H +#define KEYS_H + +extern bool match_indexkey_operand(int indexkey, Var *operand, Rel *rel); +extern bool equal_indexkey_var(int index_key, Var *var); +extern Var *extract_subkey(JoinKey *jk, int which_subkey); +extern bool samekeys(List *keys1, List *keys2); +extern List *collect_index_pathkeys(int *index_keys, List *tlist); + +#endif /* KEYS_H */ diff --git a/src/backend/optimizer/ordering.h b/src/backend/optimizer/ordering.h new file mode 100644 index 00000000000..0b598fb71a9 --- /dev/null +++ b/src/backend/optimizer/ordering.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * ordering.h-- + * prototypes for ordering.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: ordering.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef ORDERING_H +#define ORDERING_H + +extern bool equal_path_path_ordering(PathOrder *path_ordering1, + PathOrder *path_ordering2); +extern bool equal_path_merge_ordering(Oid *path_ordering, + MergeOrder *merge_ordering); +extern bool equal_merge_merge_ordering(MergeOrder *merge_ordering1, + MergeOrder *merge_ordering2); +extern bool equal_sortops_order(Oid *ordering1, Oid *ordering2); + +#endif /* ORDERING_H */ diff --git a/src/backend/optimizer/path/Makefile.inc b/src/backend/optimizer/path/Makefile.inc new file mode 100644 index 00000000000..6bb014b0a90 --- /dev/null +++ b/src/backend/optimizer/path/Makefile.inc @@ -0,0 +1,21 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for optimizer/path +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS= allpaths.c clausesel.c costsize.c hashutils.c indxpath.c \ + joinpath.c joinrels.c joinutils.c mergeutils.c orindxpath.c \ + prune.c + +# not ready yet: predmig.c xfunc.c + + + diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c new file mode 100644 index 00000000000..0ed2139095d --- /dev/null +++ b/src/backend/optimizer/path/allpaths.c @@ -0,0 +1,351 @@ +/*------------------------------------------------------------------------- + * + * allpaths.c-- + * Routines to find possible search paths for processing a query + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" + +#include "optimizer/internal.h" + +#include "optimizer/paths.h" +#include "optimizer/pathnode.h" +#include "optimizer/clauses.h" +#include "optimizer/xfunc.h" +#include "optimizer/cost.h" + +#include "commands/creatinh.h" + +static void find_rel_paths(Query *root, List *rels); +static List *find_join_paths(Query *root, List *outer_rels, int levels_left); + +/* + * find-paths-- + * Finds all possible access paths for executing a query, returning the + * top level list of relation entries. + * + * 'rels' is the list of single relation entries appearing in the query + */ +List * +find_paths(Query *root, List *rels) +{ + int levels_left; + + /* + * Set the number of join (not nesting) levels yet to be processed. + */ + levels_left = length(rels); + + if (levels_left <= 0) + return NIL; + + /* + * Find the base relation paths. + */ + find_rel_paths(root, rels); + + if (levels_left <= 1) { + /* + * Unsorted single relation, no more processing is required. + */ + return (rels); + }else { + /* + * this means that joins or sorts are required. + * set selectivities of clauses that have not been set + * by an index. + */ + set_rest_relselec(root, rels); + + return(find_join_paths(root, rels, levels_left-1)); + } +} + +/* + * find-rel-paths-- + * Finds all paths available for scanning each relation entry in + * 'rels'. Sequential scan and any available indices are considered + * if possible(indices are not considered for lower nesting levels). + * All unique paths are attached to the relation's 'pathlist' field. + * + * MODIFIES: rels + */ +static void +find_rel_paths(Query *root, List *rels) +{ + List *temp; + Rel *rel; + List *lastpath; + + foreach(temp, rels) { + List *sequential_scan_list; + List *rel_index_scan_list; + List *or_index_scan_list; + + rel = (Rel *)lfirst(temp); + sequential_scan_list = lcons(create_seqscan_path(rel), + NIL); + + rel_index_scan_list = + find_index_paths(root, + rel, + find_relation_indices(root,rel), + rel->clauseinfo, + rel->joininfo); + + or_index_scan_list = + create_or_index_paths(root, rel, rel->clauseinfo); + + rel->pathlist = add_pathlist(rel, + sequential_scan_list, + append(rel_index_scan_list, + or_index_scan_list)); + + /* The unordered path is always the last in the list. + * If it is not the cheapest path, prune it. + */ + lastpath = rel->pathlist; + while(lnext(lastpath)!=NIL) + lastpath=lnext(lastpath); + prune_rel_path(rel, (Path*)lfirst(lastpath)); + /* + * if there is a qualification of sequential scan the selec. + * value is not set -- so set it explicitly -- Sunita + */ + set_rest_selec(root, rel->clauseinfo); + rel->size = compute_rel_size(rel); + rel->width = compute_rel_width(rel); + } + return; +} + +/* + * find-join-paths-- + * Find all possible joinpaths for a query by successively finding ways + * to join single relations into join relations. + * + * if BushyPlanFlag is set, bushy tree plans will be generated: + * Find all possible joinpaths(bushy trees) for a query by systematically + * finding ways to join relations(both original and derived) together. + * + * 'outer-rels' is the current list of relations for which join paths + * are to be found, i.e., he current list of relations that + * have already been derived. + * 'levels-left' is the current join level being processed, where '1' is + * the "last" level + * + * Returns the final level of join relations, i.e., the relation that is + * the result of joining all the original relations togehter. + */ +static List * +find_join_paths(Query *root, List *outer_rels, int levels_left) +{ + List *x; + List *new_rels; + Rel *rel; + + /* + * Determine all possible pairs of relations to be joined at this level. + * Determine paths for joining these relation pairs and modify 'new-rels' + * accordingly, then eliminate redundant join relations. + */ + new_rels = find_join_rels(root, outer_rels); + + find_all_join_paths(root, new_rels); + + new_rels = prune_joinrels(new_rels); + +#if 0 + /* + ** for each expensive predicate in each path in each distinct rel, + ** consider doing pullup -- JMH + */ + if (XfuncMode != XFUNC_NOPULL && XfuncMode != XFUNC_OFF) + foreach(x, new_rels) + xfunc_trypullup((Rel*)lfirst(x)); +#endif + + prune_rel_paths(new_rels); + + if(BushyPlanFlag) { + /* + * In case of bushy trees + * if there is still a join between a join relation and another + * relation, add a new joininfo that involves the join relation + * to the joininfo list of the other relation + */ + add_new_joininfos(root, new_rels,outer_rels); + } + + foreach(x, new_rels) { + rel = (Rel*)lfirst(x); + rel->size = compute_rel_size(rel); + rel->width = compute_rel_width(rel); + +/*#define OPTIMIZER_DEBUG*/ +#ifdef OPTIMIZER_DEBUG + printf("levels left: %d\n", levels_left); + debug_print_rel(root, rel); +#endif + } + + if(BushyPlanFlag) { + /* + * prune rels that have been completely incorporated into + * new join rels + */ + outer_rels = prune_oldrels(outer_rels); + /* + * merge join rels if then contain the same list of base rels + */ + outer_rels = merge_joinrels(new_rels,outer_rels); + root->join_relation_list_ = outer_rels; + } + else { + root->join_relation_list_ = new_rels; + } + + if(levels_left == 1) { + if(BushyPlanFlag) + return(final_join_rels(outer_rels)); + else + return(new_rels); + } else { + if(BushyPlanFlag) + return(find_join_paths(root, outer_rels, levels_left - 1)); + else + return(find_join_paths(root, new_rels, levels_left - 1)); + } +} + +/***************************************************************************** + * + *****************************************************************************/ + +static void +print_joinclauses(Query *root, List *clauses) +{ + List *l; + extern void print_expr(Node *expr, List *rtable); /* in print.c */ + + foreach(l, clauses) { + CInfo *c = lfirst(l); + + print_expr((Node*)c->clause, root->rtable); + if (lnext(l)) printf(" "); + } +} + +void +print_path(Query *root, Path *path, int indent) +{ + char *ptype = NULL; + JoinPath *jp; + bool join; + int i; + + for(i=0; i < indent; i++) + printf("\t"); + + switch(nodeTag(path)) { + case T_Path: + ptype = "SeqScan"; join=false; break; + case T_IndexPath: + ptype = "IdxScan"; join=false; break; + case T_JoinPath: + ptype = "Nestloop"; join=true; break; + case T_MergePath: + ptype = "MergeJoin"; join=true; break; + case T_HashPath: + ptype = "HashJoin"; join=true; break; + default: + break; + } + if (join) { + int size = path->parent->size; + jp = (JoinPath*)path; + printf("%s size=%d cost=%f\n", ptype, size, path->path_cost); + switch(nodeTag(path)) { + case T_MergePath: + case T_HashPath: + for(i=0; i < indent+1; i++) + printf("\t"); + printf(" clauses=("); + print_joinclauses(root, + ((JoinPath*)path)->pathclauseinfo); + printf(")\n"); + + if (nodeTag(path)==T_MergePath) { + MergePath *mp = (MergePath*)path; + if (mp->outersortkeys || mp->innersortkeys) { + for(i=0; i < indent+1; i++) + printf("\t"); + printf(" sortouter=%d sortinner=%d\n", + ((mp->outersortkeys)?1:0), + ((mp->innersortkeys)?1:0)); + } + } + break; + default: + break; + } + print_path(root, jp->outerjoinpath, indent+1); + print_path(root, jp->innerjoinpath, indent+1); + } else { + int size = path->parent->size; + int relid = lfirsti(path->parent->relids); + printf("%s(%d) size=%d cost=%f", + ptype, relid, size, path->path_cost); + + if (nodeTag(path)==T_IndexPath) { + List *k, *l; + + printf(" keys="); + foreach (k, path->keys) { + printf("("); + foreach (l, lfirst(k)) { + Var *var = lfirst(l); + printf("%d.%d", var->varnoold, var->varoattno); + if (lnext(l)) printf(", "); + } + printf(")"); + if (lnext(k)) printf(", "); + } + } + printf("\n"); + } +} + +#ifdef OPTIMIZER_DEBUG +static void +debug_print_rel(Query *root, Rel *rel) +{ + List *l; + + printf("("); + foreach(l, rel->relids) { + printf("%d ", lfirsti(l)); + } + printf("): size=%d width=%d\n", rel->size, rel->width); + + printf("\tpath list:\n"); + foreach (l, rel->pathlist) { + print_path(root, lfirst(l), 1); + } + printf("\tcheapest path:\n"); + print_path(root, rel->cheapestpath, 1); +} +#endif /* OPTIMIZER_DEBUG */ diff --git a/src/backend/optimizer/path/clausesel.c b/src/backend/optimizer/path/clausesel.c new file mode 100644 index 00000000000..634e1130794 --- /dev/null +++ b/src/backend/optimizer/path/clausesel.c @@ -0,0 +1,331 @@ +/*------------------------------------------------------------------------- + * + * clausesel.c-- + * Routines to compute and set clause selectivities + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/clausesel.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/cost.h" +#include "optimizer/plancat.h" + +#include "parser/parsetree.h" /* for getrelid() */ + +#include "catalog/pg_proc.h" +#include "catalog/pg_operator.h" + +#include "utils/elog.h" +#include "utils/lsyscache.h" + +static Cost compute_selec(Query *root, List *clauses, List *or_selectivities); + +/**************************************************************************** + * ROUTINES TO SET CLAUSE SELECTIVITIES + ****************************************************************************/ + +/* + * set_clause_selectivities - + * Sets the selectivity field for each of clause in 'clauseinfo-list' + * to 'new-selectivity'. If the selectivity has already been set, reset + * it only if the new one is better. + * + * Returns nothing of interest. + * + */ +void +set_clause_selectivities(List *clauseinfo_list, Cost new_selectivity) +{ + List *temp; + CInfo *clausenode; + Cost cost_clause; + + foreach (temp,clauseinfo_list) { + clausenode = (CInfo*)lfirst(temp); + cost_clause = clausenode->selectivity; + if ( FLOAT_IS_ZERO(cost_clause) || new_selectivity < cost_clause) { + clausenode->selectivity = new_selectivity; + } + } +} + +/* + * product_selec - + * Multiplies the selectivities of each clause in 'clauseinfo-list'. + * + * Returns a flonum corresponding to the selectivity of 'clauseinfo-list'. + */ +Cost +product_selec(List *clauseinfo_list) +{ + Cost result = 1.0; + if (clauseinfo_list!=NIL) { + List *xclausenode = NIL; + Cost temp; + + foreach(xclausenode,clauseinfo_list) { + temp = ((CInfo *)lfirst(xclausenode))->selectivity; + result = result * temp; + } + } + return(result); +} + +/* + * set_rest_relselec - + * Scans through clauses on each relation and assigns a selectivity to + * those clauses that haven't been assigned a selectivity by an index. + * + * Returns nothing of interest. + * MODIFIES: selectivities of the various rel's clauseinfo + * slots. + */ +void +set_rest_relselec(Query *root, List *rel_list) +{ + Rel *rel; + List *x; + + foreach (x,rel_list) { + rel = (Rel*)lfirst(x); + set_rest_selec(root, rel->clauseinfo); + } +} + +/* + * set_rest_selec - + * Sets the selectivity fields for those clauses within a single + * relation's 'clauseinfo-list' that haven't already been set. + * + * Returns nothing of interest. + * + */ +void +set_rest_selec(Query *root, List *clauseinfo_list) +{ + List *temp = NIL; + CInfo *clausenode = (CInfo*)NULL; + Cost cost_clause; + + foreach (temp,clauseinfo_list) { + clausenode = (CInfo*)lfirst(temp); + cost_clause = clausenode->selectivity; + + /* + * Check to see if the selectivity of this clause or any 'or' + * subclauses (if any) haven't been set yet. + */ + if (valid_or_clause(clausenode) || FLOAT_IS_ZERO(cost_clause)) { + clausenode->selectivity = + compute_clause_selec(root, + (Node*)clausenode->clause, + lcons(makeFloat(cost_clause), NIL)); + } + } +} + +/**************************************************************************** + * ROUTINES TO COMPUTE SELECTIVITIES + ****************************************************************************/ + +/* + * compute_clause_selec - + * Given a clause, this routine will compute the selectivity of the + * clause by calling 'compute_selec' with the appropriate parameters + * and possibly use that return value to compute the real selectivity + * of a clause. + * + * 'or-selectivities' are selectivities that have already been assigned + * to subclauses of an 'or' clause. + * + * Returns a flonum corresponding to the clause selectivity. + * + */ +Cost +compute_clause_selec(Query *root, Node *clause, List *or_selectivities) +{ + if (!is_opclause (clause)) { + /* if it's not an operator clause, then it is a boolean clause -jolly*/ + /* + * Boolean variables get a selectivity of 1/2. + */ + return(0.1); + } else if (not_clause (clause)) { + /* + * 'not' gets "1.0 - selectivity-of-inner-clause". + */ + return (1.000000 - compute_selec(root, + lcons(get_notclausearg((Expr*)clause), + NIL), + or_selectivities)); + } else if (or_clause(clause)) { + /* + * Both 'or' and 'and' clauses are evaluated as described in + * (compute_selec). + */ + return (compute_selec(root, + ((Expr*)clause)->args, or_selectivities)); + } else { + return(compute_selec(root, + lcons(clause,NIL),or_selectivities)); + } +} + +/* + * compute_selec - + * Computes the selectivity of a clause. + * + * If there is more than one clause in the argument 'clauses', then the + * desired selectivity is that of an 'or' clause. Selectivities for an + * 'or' clause such as (OR a b) are computed by finding the selectivity + * of a (s1) and b (s2) and computing s1+s2 - s1*s2. + * + * In addition, if the clause is an 'or' clause, individual selectivities + * may have already been assigned by indices to subclauses. These values + * are contained in the list 'or-selectivities'. + * + * Returns the clause selectivity as a flonum. + * + */ +static Cost +compute_selec(Query *root, List *clauses, List *or_selectivities) +{ + Cost s1 = 0; + List *clause = lfirst(clauses); + + if (clauses==NULL) { + s1 = 1.0; + } else if (IsA(clause,Param)) { + /* XXX How're we handling this before?? -ay */ + s1 = 1.0; + } else if (IsA(clause,Const)) { + s1 = ((bool) ((Const*) clause)->constvalue) ? 1.0 : 0.0; + } else if (IsA(clause,Var)) { + Oid relid = getrelid(((Var*)clause)->varno, + root->rtable); + + /* + * we have a bool Var. This is exactly equivalent to the clause: + * reln.attribute = 't' + * so we compute the selectivity as if that is what we have. The + * magic #define constants are a hack. I didn't want to have to + * do system cache look ups to find out all of that info. + */ + + s1 = restriction_selectivity(EqualSelectivityProcedure, + BooleanEqualOperator, + relid, + ((Var*)clause)->varoattno, + "t", + _SELEC_CONSTANT_RIGHT_); + } else if (or_selectivities) { + /* If s1 has already been assigned by an index, use that value. */ + List *this_sel = lfirst(or_selectivities); + + s1 = floatVal(this_sel); + } else if (is_funcclause((Node*)clause)) { + /* this isn't an Oper, it's a Func!! */ + /* + ** This is not an operator, so we guess at the selectivity. + ** THIS IS A HACK TO GET V4 OUT THE DOOR. FUNCS SHOULD BE + ** ABLE TO HAVE SELECTIVITIES THEMSELVES. + ** -- JMH 7/9/92 + */ + s1 = 0.1; + } else if (NumRelids((Node*) clause) == 1) { + /* ...otherwise, calculate s1 from 'clauses'. + * The clause is not a join clause, since there is + * only one relid in the clause. The clause + * selectivity will be based on the operator + * selectivity and operand values. + */ + Oid opno = ((Oper*)((Expr*)clause)->oper)->opno; + RegProcedure oprrest = get_oprrest(opno); + Oid relid; + int relidx; + AttrNumber attno; + Datum constval; + int flag; + + get_relattval((Node*)clause, &relidx, &attno, &constval, &flag); + relid = getrelid(relidx, root->rtable); + + /* if the oprrest procedure is missing for whatever reason, + use a selectivity of 0.5*/ + if (!oprrest) + s1 = (Cost) (0.5); + else + if (attno == InvalidAttrNumber) { + /* attno can be Invalid if the clause had a function in it, + i.e. WHERE myFunc(f) = 10 */ + /* this should be FIXED somehow to use function selectivity */ + s1 = (Cost) (0.5); + } else + s1 = (Cost) restriction_selectivity(oprrest, + opno, + relid, + attno, + (char *)constval, + flag); + + } else { + /* The clause must be a join clause. The clause + * selectivity will be based on the relations to be + * scanned and the attributes they are to be joined + * on. + */ + Oid opno = ((Oper*)((Expr*)clause)->oper)->opno; + RegProcedure oprjoin = get_oprjoin (opno); + int relid1, relid2; + AttrNumber attno1, attno2; + + get_rels_atts((Node*)clause, &relid1, &attno1, &relid2, &attno2); + relid1 = getrelid(relid1, root->rtable); + relid2 = getrelid(relid2, root->rtable); + + /* if the oprjoin procedure is missing for whatever reason, + use a selectivity of 0.5*/ + if (!oprjoin) + s1 = (Cost) (0.5); + else + s1 = (Cost) join_selectivity(oprjoin, + opno, + relid1, + attno1, + relid2, + attno2); + } + + /* A null clause list eliminates no tuples, so return a selectivity + * of 1.0. If there is only one clause, the selectivity is not + * that of an 'or' clause, but rather that of the single clause. + */ + + if (length (clauses) < 2) { + return(s1); + } else { + /* Compute selectivity of the 'or'ed subclauses. */ + /* Added check for taking lnext(NIL). -- JMH 3/9/92 */ + Cost s2; + + if (or_selectivities != NIL) + s2 = compute_selec(root, lnext(clauses), lnext(or_selectivities)); + else + s2 = compute_selec(root, lnext(clauses), NIL); + return(s1 + s2 - s1 * s2); + } +} + diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c new file mode 100644 index 00000000000..14bbb5f8e4f --- /dev/null +++ b/src/backend/optimizer/path/costsize.c @@ -0,0 +1,456 @@ +/*------------------------------------------------------------------------- + * + * costsize.c-- + * Routines to compute (and set) relation sizes and path costs + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/costsize.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#ifdef WIN32 +#include +#include +#define MAXINT INT_MAX +#else +# if defined(PORTNAME_BSD44_derived) || defined(PORTNAME_bsdi) +# include +# define MAXINT INT_MAX +# else +# include +# endif /* !PORTNAME_BSD44_derived */ +#endif /* WIN32 */ + +#include "postgres.h" + +#include "nodes/relation.h" + +#include "optimizer/cost.h" +#include "optimizer/internal.h" +#include "optimizer/keys.h" +#include "optimizer/tlist.h" + +#include "storage/bufmgr.h" /* for BLCKSZ */ + +static int compute_attribute_width(TargetEntry *tlistentry); +static double base_log(double x, double b); + +int _disable_cost_ = 30000000; + +bool _enable_seqscan_ = true; +bool _enable_indexscan_ = true; +bool _enable_sort_ = true; +bool _enable_hash_ = true; +bool _enable_nestloop_ = true; +bool _enable_mergesort_ = true; +bool _enable_hashjoin_ = true; + +/* + * cost_seqscan-- + * Determines and returns the cost of scanning a relation sequentially. + * If the relation is a temporary to be materialized from a query + * embedded within a data field (determined by 'relid' containing an + * attribute reference), then a predetermined constant is returned (we + * have NO IDEA how big the result of a POSTQUEL procedure is going to + * be). + * + * disk = p + * cpu = *CPU-PAGE-WEIGHT* * t + * + * 'relid' is the relid of the relation to be scanned + * 'relpages' is the number of pages in the relation to be scanned + * (as determined from the system catalogs) + * 'reltuples' is the number of tuples in the relation to be scanned + * + * Returns a flonum. + * + */ +Cost +cost_seqscan(int relid, int relpages, int reltuples) +{ + Cost temp = 0; + + if ( !_enable_seqscan_ ) + temp += _disable_cost_; + + if (relid < 0) { + /* + * cost of sequentially scanning a materialized temporary relation + */ + temp += _TEMP_SCAN_COST_; + } else { + temp += relpages; + temp += _CPU_PAGE_WEIGHT_ * reltuples; + } + Assert(temp >= 0); + return(temp); +} + + +/* + * cost_index-- + * Determines and returns the cost of scanning a relation using an index. + * + * disk = expected-index-pages + expected-data-pages + * cpu = *CPU-PAGE-WEIGHT* * + * (expected-index-tuples + expected-data-tuples) + * + * 'indexid' is the index OID + * 'expected-indexpages' is the number of index pages examined in the scan + * 'selec' is the selectivity of the index + * 'relpages' is the number of pages in the main relation + * 'reltuples' is the number of tuples in the main relation + * 'indexpages' is the number of pages in the index relation + * 'indextuples' is the number of tuples in the index relation + * + * Returns a flonum. + * + */ +Cost +cost_index(Oid indexid, + int expected_indexpages, + Cost selec, + int relpages, + int reltuples, + int indexpages, + int indextuples, + bool is_injoin) +{ + Cost temp; + Cost temp2; + + temp = temp2 = (Cost) 0; + + if (!_enable_indexscan_ && !is_injoin) + temp += _disable_cost_; + + /* expected index relation pages */ + temp += expected_indexpages; + + /* about one base relation page */ + temp += Min(relpages,(int)ceil((double)selec*indextuples)); + + /* + * per index tuple + */ + temp2 += selec * indextuples; + temp2 += selec * reltuples; + + temp = temp + (_CPU_PAGE_WEIGHT_ * temp2); + Assert(temp >= 0); + return(temp); +} + +/* + * cost_sort-- + * Determines and returns the cost of sorting a relation by considering + * 1. the cost of doing an external sort: XXX this is probably too low + * disk = (p lg p) + * cpu = *CPU-PAGE-WEIGHT* * (t lg t) + * 2. the cost of reading the sort result into memory (another seqscan) + * unless 'noread' is set + * + * 'keys' is a list of sort keys + * 'tuples' is the number of tuples in the relation + * 'width' is the average tuple width in bytes + * 'noread' is a flag indicating that the sort result can remain on disk + * (i.e., the sort result is the result relation) + * + * Returns a flonum. + * + */ +Cost +cost_sort(List *keys, int tuples, int width, bool noread) +{ + Cost temp = 0; + int npages = page_size (tuples,width); + Cost pages = (Cost)npages; + Cost numTuples = tuples; + + if ( !_enable_sort_ ) + temp += _disable_cost_ ; + if (tuples == 0 || keys==NULL) + { + Assert(temp >= 0); + return(temp); + } + temp += pages * base_log((double)pages, (double)2.0); + + /* + * could be base_log(pages, NBuffers), but we are only doing 2-way merges + */ + temp += _CPU_PAGE_WEIGHT_ * + numTuples * base_log((double)pages,(double)2.0); + + if( !noread ) + temp = temp + cost_seqscan(_TEMP_RELATION_ID_, npages, tuples); + Assert(temp >= 0); + + return(temp); +} + + +/* + * cost_result-- + * Determines and returns the cost of writing a relation of 'tuples' + * tuples of 'width' bytes out to a result relation. + * + * Returns a flonum. + * + */ +Cost +cost_result(int tuples, int width) +{ + Cost temp =0; + temp = temp + page_size(tuples,width); + temp = temp + _CPU_PAGE_WEIGHT_ * tuples; + Assert(temp >= 0); + return(temp); +} + +/* + * cost_nestloop-- + * Determines and returns the cost of joining two relations using the + * nested loop algorithm. + * + * 'outercost' is the (disk+cpu) cost of scanning the outer relation + * 'innercost' is the (disk+cpu) cost of scanning the inner relation + * 'outertuples' is the number of tuples in the outer relation + * + * Returns a flonum. + * + */ +Cost +cost_nestloop(Cost outercost, + Cost innercost, + int outertuples, + int innertuples, + int outerpages, + bool is_indexjoin) +{ + Cost temp =0; + + if ( !_enable_nestloop_ ) + temp += _disable_cost_; + temp += outercost; + temp += outertuples * innercost; + Assert(temp >= 0); + + return(temp); +} + +/* + * cost_mergesort-- + * 'outercost' and 'innercost' are the (disk+cpu) costs of scanning the + * outer and inner relations + * 'outersortkeys' and 'innersortkeys' are lists of the keys to be used + * to sort the outer and inner relations + * 'outertuples' and 'innertuples' are the number of tuples in the outer + * and inner relations + * 'outerwidth' and 'innerwidth' are the (typical) widths (in bytes) + * of the tuples of the outer and inner relations + * + * Returns a flonum. + * + */ +Cost +cost_mergesort(Cost outercost, + Cost innercost, + List *outersortkeys, + List *innersortkeys, + int outersize, + int innersize, + int outerwidth, + int innerwidth) +{ + Cost temp = 0; + + if ( !_enable_mergesort_ ) + temp += _disable_cost_; + + temp += outercost; + temp += innercost; + temp += cost_sort(outersortkeys,outersize,outerwidth,false); + temp += cost_sort(innersortkeys,innersize,innerwidth,false); + temp += _CPU_PAGE_WEIGHT_ * (outersize + innersize); + Assert(temp >= 0); + + return(temp); +} + +/* + * cost_hashjoin-- XXX HASH + * 'outercost' and 'innercost' are the (disk+cpu) costs of scanning the + * outer and inner relations + * 'outerkeys' and 'innerkeys' are lists of the keys to be used + * to hash the outer and inner relations + * 'outersize' and 'innersize' are the number of tuples in the outer + * and inner relations + * 'outerwidth' and 'innerwidth' are the (typical) widths (in bytes) + * of the tuples of the outer and inner relations + * + * Returns a flonum. + */ +Cost +cost_hashjoin(Cost outercost, + Cost innercost, + List *outerkeys, + List *innerkeys, + int outersize, + int innersize, + int outerwidth, + int innerwidth) +{ + Cost temp = 0; + int outerpages = page_size (outersize,outerwidth); + int innerpages = page_size (innersize,innerwidth); + int nrun = ceil((double)outerpages/(double)NBuffers); + + if (outerpages < innerpages) + return _disable_cost_; + if ( !_enable_hashjoin_ ) + temp += _disable_cost_; +/* temp += outercost + (nrun + 1) * innercost; */ + /* + the innercost shouldn't be used it. Instead the + cost of hashing the innerpath should be used + + ASSUME innercost is 1 for now -- a horrible hack + - jolly + */ + temp += outercost + (nrun + 1); + + temp += _CPU_PAGE_WEIGHT_ * (outersize + nrun * innersize); + Assert(temp >= 0); + + return(temp); +} + +/* + * compute-rel-size-- + * Computes the size of each relation in 'rel-list' (after applying + * restrictions), by multiplying the selectivity of each restriction + * by the original size of the relation. + * + * Sets the 'size' field for each relation entry with this computed size. + * + * Returns the size. + */ +int compute_rel_size(Rel *rel) +{ + Cost temp; + int temp1; + + temp = rel->tuples * product_selec(rel->clauseinfo); + Assert(temp >= 0); + if (temp >= (MAXINT - 1)) { + temp1 = MAXINT; + } else { + temp1 = ceil((double) temp); + } + Assert(temp1 >= 0); + Assert(temp1 <= MAXINT); + return(temp1); +} + +/* + * compute-rel-width-- + * Computes the width in bytes of a tuple from 'rel'. + * + * Returns the width of the tuple as a fixnum. + */ +int +compute_rel_width(Rel *rel) +{ + return (compute_targetlist_width(get_actual_tlist(rel->targetlist))); +} + +/* + * compute-targetlist-width-- + * Computes the width in bytes of a tuple made from 'targetlist'. + * + * Returns the width of the tuple as a fixnum. + */ +int +compute_targetlist_width(List *targetlist) +{ + List *temp_tl; + int tuple_width = 0; + + foreach (temp_tl, targetlist) { + tuple_width = tuple_width + + compute_attribute_width(lfirst(temp_tl)); + } + return(tuple_width); +} + +/* + * compute-attribute-width-- + * Given a target list entry, find the size in bytes of the attribute. + * + * If a field is variable-length, it is assumed to be at least the size + * of a TID field. + * + * Returns the width of the attribute as a fixnum. + */ +static int +compute_attribute_width(TargetEntry *tlistentry) +{ + int width = get_typlen(tlistentry->resdom->restype); + if (width < 0) + return(_DEFAULT_ATTRIBUTE_WIDTH_); + else + return(width); +} + +/* + * compute-joinrel-size-- + * Computes the size of the join relation 'joinrel'. + * + * Returns a fixnum. + */ +int +compute_joinrel_size(JoinPath *joinpath) +{ + Cost temp = 1.0; + int temp1 = 0; + + temp *= ((Path*)joinpath->outerjoinpath)->parent->size; + temp *= ((Path*)joinpath->innerjoinpath)->parent->size; + + temp = temp * product_selec(joinpath->pathclauseinfo); + if (temp >= (MAXINT -1)) { + temp1 = MAXINT; + } else { + /* should be ceil here, we don't want joinrel size's of one, do we? */ + temp1 = ceil((double)temp); + } + Assert(temp1 >= 0); + + return(temp1); +} + +/* + * page-size-- + * Returns an estimate of the number of pages covered by a given + * number of tuples of a given width (size in bytes). + */ +int page_size(int tuples, int width) +{ + int temp =0; + + temp = ceil((double)(tuples * (width + sizeof(HeapTupleData))) + / BLCKSZ); + Assert(temp >= 0); + return(temp); +} + +static double +base_log(double x, double b) +{ + return(log(x)/log(b)); +} diff --git a/src/backend/optimizer/path/hashutils.c b/src/backend/optimizer/path/hashutils.c new file mode 100644 index 00000000000..cdbd9b6d901 --- /dev/null +++ b/src/backend/optimizer/path/hashutils.c @@ -0,0 +1,120 @@ +/*------------------------------------------------------------------------- + * + * hashutils.c-- + * Utilities for finding applicable merge clauses and pathkeys + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/hashutils.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/clauses.h" + + +static HInfo *match_hashop_hashinfo(Oid hashop, List *hashinfo_list); + +/* + * group-clauses-by-hashop-- + * If a join clause node in 'clauseinfo-list' is hashjoinable, store + * it within a hashinfo node containing other clause nodes with the same + * hash operator. + * + * 'clauseinfo-list' is the list of clauseinfo nodes + * 'inner-relid' is the relid of the inner join relation + * + * Returns the new list of hashinfo nodes. + * + */ +List * +group_clauses_by_hashop(List *clauseinfo_list, + int inner_relid) +{ + List *hashinfo_list = NIL; + CInfo *clauseinfo = (CInfo*)NULL; + List *i = NIL; + Oid hashjoinop = 0; + + foreach (i,clauseinfo_list) { + clauseinfo = (CInfo*)lfirst(i); + hashjoinop = clauseinfo->hashjoinoperator; + + /* + * Create a new hashinfo node and add it to 'hashinfo-list' if one + * does not yet exist for this hash operator. + */ + if (hashjoinop ) { + HInfo *xhashinfo = (HInfo*)NULL; + Expr *clause = clauseinfo->clause; + Var *leftop = get_leftop(clause); + Var *rightop = get_rightop(clause); + JoinKey *keys = (JoinKey*)NULL; + + xhashinfo = + match_hashop_hashinfo(hashjoinop,hashinfo_list); + + if (inner_relid == leftop->varno){ + keys = makeNode(JoinKey); + keys->outer = rightop; + keys->inner = leftop; + } else { + keys = makeNode(JoinKey); + keys->outer = leftop; + keys->inner = rightop; + } + + if (xhashinfo==NULL) { + xhashinfo = makeNode(HInfo); + xhashinfo->hashop = hashjoinop; + + xhashinfo->jmethod.jmkeys = NIL; + xhashinfo->jmethod.clauses = NIL; + + /* XXX was push */ + hashinfo_list = lappend(hashinfo_list,xhashinfo); + hashinfo_list = nreverse(hashinfo_list); + } + + xhashinfo->jmethod.clauses = + lcons(clause, xhashinfo->jmethod.clauses); + + xhashinfo->jmethod.jmkeys = + lcons(keys, xhashinfo->jmethod.jmkeys); + } + } + return(hashinfo_list); +} + + +/* + * match-hashop-hashinfo-- + * Searches the list 'hashinfo-list' for a hashinfo node whose hash op + * field equals 'hashop'. + * + * Returns the node if it exists. + * + */ +static HInfo * +match_hashop_hashinfo(Oid hashop, List *hashinfo_list) +{ + Oid key = 0; + HInfo *xhashinfo = (HInfo*)NULL; + List *i = NIL; + + foreach( i, hashinfo_list) { + xhashinfo = (HInfo*)lfirst(i); + key = xhashinfo->hashop; + if (hashop == key) { /* found */ + return(xhashinfo); /* should be a hashinfo node ! */ + } + } + return((HInfo*)NIL); +} diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c new file mode 100644 index 00000000000..844571847f9 --- /dev/null +++ b/src/backend/optimizer/path/indxpath.c @@ -0,0 +1,1206 @@ +/*------------------------------------------------------------------------- + * + * indxpath.c-- + * Routines to determine which indices are usable for scanning a + * given relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" +#include "access/attnum.h" +#include "access/heapam.h" +#include "access/nbtree.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" + +#include "utils/lsyscache.h" +#include "utils/elog.h" + +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/clauses.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/plancat.h" +#include "optimizer/keys.h" +#include "optimizer/cost.h" +#include "optimizer/pathnode.h" +#include "optimizer/xfunc.h" +#include "optimizer/ordering.h" + + +#include "catalog/catname.h" +#include "catalog/pg_amop.h" +#include "catalog/pg_proc.h" + +#include "executor/executor.h" +#include "parser/parsetree.h" /* for getrelid() */ + + +static void match_index_orclauses(Rel *rel, Rel *index, int indexkey, + int xclass, List *clauseinfo_list); +static bool match_index_to_operand(int indexkey, Expr *operand, + Rel *rel, Rel *index); +static List *match_index_orclause(Rel *rel, Rel *index, int indexkey, + int xclass, List *or_clauses, List *other_matching_indices); +static List *group_clauses_by_indexkey(Rel *rel, Rel *index, + int *indexkeys, Oid *classes, List *clauseinfo_list, + bool join); +static CInfo *match_clause_to_indexkey(Rel *rel, Rel *index, int indexkey, + int xclass, CInfo *clauseInfo, bool join); +static bool pred_test(List *predicate_list, List *clauseinfo_list, + List *joininfo_list); +static bool one_pred_test(Expr *predicate, List *clauseinfo_list); +static bool one_pred_clause_expr_test(Expr *predicate, Node *clause); +static bool one_pred_clause_test(Expr *predicate, Node *clause); +static bool clause_pred_clause_test(Expr *predicate, Node *clause); +static List *indexable_joinclauses(Rel *rel, Rel *index, List *joininfo_list); +static List *index_innerjoin(Query *root, Rel *rel, + List *clausegroup_list, Rel *index); +static List *create_index_paths(Query *root, Rel *rel, Rel *index, + List *clausegroup_list, bool join); +static List *add_index_paths(List *indexpaths, List *new_indexpaths); +static bool function_index_operand(Expr *funcOpnd, Rel *rel, Rel *index); +static bool SingleAttributeIndex(Rel *index); + +/* If Spyros can use a constant PRS2_BOOL_TYPEID, I can use this */ +#define BOOL_TYPEID ((Oid) 16) + +/* + * find-index-paths-- + * Finds all possible index paths by determining which indices in the + * list 'indices' are usable. + * + * To be usable, an index must match against either a set of + * restriction clauses or join clauses. + * + * Note that the current implementation requires that there exist + * matching clauses for every key in the index (i.e., no partial + * matches are allowed). + * + * If an index can't be used with restriction clauses, but its keys + * match those of the result sort order (according to information stored + * within 'sortkeys'), then the index is also considered. + * + * 'rel' is the relation entry to which these index paths correspond + * 'indices' is a list of possible index paths + * 'clauseinfo-list' is a list of restriction clauseinfo nodes for 'rel' + * 'joininfo-list' is a list of joininfo nodes for 'rel' + * 'sortkeys' is a node describing the result sort order (from + * (find_sortkeys)) + * + * Returns a list of index nodes. + * + */ +List * +find_index_paths (Query *root, + Rel *rel, + List *indices, + List *clauseinfo_list, + List *joininfo_list) +{ + List *scanclausegroups = NIL; + List *scanpaths = NIL; + Rel *index = (Rel *)NULL; + List *joinclausegroups = NIL; + List *joinpaths = NIL; + List *retval = NIL; + extern List *add_index_paths(); + + if(indices == NIL) + return(NULL); + + index = (Rel*)lfirst (indices); + + retval = find_index_paths(root, + rel, + lnext (indices), + clauseinfo_list, + joininfo_list); + + /* If this is a partial index, return if it fails the predicate test */ + if (index->indpred != NIL) + if (!pred_test(index->indpred, clauseinfo_list, joininfo_list)) + return retval; + + /* 1. If this index has only one key, try matching it against + * subclauses of an 'or' clause. The fields of the clauseinfo + * nodes are marked with lists of the matching indices no path + * are actually created. + * + * XXX NOTE: Currently btrees dos not support indices with + * > 1 key, so the following test will always be true for + * now but we have decided not to support index-scans + * on disjunction . -- lp + */ + if (SingleAttributeIndex(index)) + { + match_index_orclauses (rel, + index, + index->indexkeys[0], + index->classlist[0], + clauseinfo_list); + } + + /* + * 2. If the keys of this index match any of the available + * restriction clauses, then create pathnodes corresponding + * to each group of usable clauses. + */ + scanclausegroups = group_clauses_by_indexkey(rel, + index, + index->indexkeys, + index->classlist, + clauseinfo_list, + false); + + scanpaths = NIL; + if (scanclausegroups != NIL) + scanpaths = create_index_paths (root, + rel, + index, + scanclausegroups, + false); + + /* + * 3. If this index can be used with any join clause, then + * create pathnodes for each group of usable clauses. An + * index can be used with a join clause if its ordering is + * useful for a mergejoin, or if the index can possibly be + * used for scanning the inner relation of a nestloop join. + */ + joinclausegroups = indexable_joinclauses(rel,index,joininfo_list); + joinpaths = NIL; + + if (joinclausegroups != NIL) + { + List *new_join_paths = create_index_paths(root, rel, + index, + joinclausegroups, + true); + List *innerjoin_paths = index_innerjoin(root, rel,joinclausegroups,index); + + rel->innerjoin = nconc (rel->innerjoin, innerjoin_paths); + joinpaths = new_join_paths; + } + + /* + * Some sanity checks to make sure that + * the indexpath is valid. + */ + if (joinpaths!=NULL) + retval = add_index_paths(joinpaths,retval); + if (scanpaths!=NULL) + retval = add_index_paths(scanpaths,retval); + + return retval; + +} + + +/**************************************************************************** + * ---- ROUTINES TO MATCH 'OR' CLAUSES ---- + ****************************************************************************/ + + +/* + * match-index-orclauses-- + * Attempt to match an index against subclauses within 'or' clauses. + * If the index does match, then the clause is marked with information + * about the index. + * + * Essentially, this adds 'index' to the list of indices in the + * ClauseInfo field of each of the clauses which it matches. + * + * 'rel' is the node of the relation on which the index is defined. + * 'index' is the index node. + * 'indexkey' is the (single) key of the index + * 'class' is the class of the operator corresponding to 'indexkey'. + * 'clauseinfo-list' is the list of available restriction clauses. + * + * Returns nothing. + * + */ +static void +match_index_orclauses(Rel *rel, + Rel *index, + int indexkey, + int xclass, + List *clauseinfo_list) +{ + CInfo *clauseinfo = (CInfo*)NULL; + List *i = NIL; + + foreach (i, clauseinfo_list) { + clauseinfo = (CInfo*)lfirst(i); + if (valid_or_clause(clauseinfo)) { + + /* Mark the 'or' clause with a list of indices which + * match each of its subclauses. The list is + * generated by adding 'index' to the existing + * list where appropriate. + */ + clauseinfo->indexids = + match_index_orclause (rel,index,indexkey, + xclass, + clauseinfo->clause->args, + clauseinfo->indexids); + } + } +} + +/* + * match_index_operand-- + * Generalize test for a match between an existing index's key + * and the operand on the rhs of a restriction clause. Now check + * for functional indices as well. + */ +static bool +match_index_to_operand(int indexkey, + Expr *operand, + Rel *rel, + Rel *index) +{ + /* + * Normal index. + */ + if (index->indproc == InvalidOid) + return match_indexkey_operand(indexkey, (Var*)operand, rel); + + /* + * functional index check + */ + return (function_index_operand(operand, rel, index)); +} + +/* + * match-index-orclause-- + * Attempts to match an index against the subclauses of an 'or' clause. + * + * A match means that: + * (1) the operator within the subclause can be used with one + * of the index's operator classes, and + * (2) there is a usable key that matches the variable within a + * sargable clause. + * + * 'or-clauses' are the remaining subclauses within the 'or' clause + * 'other-matching-indices' is the list of information on other indices + * that have already been matched to subclauses within this + * particular 'or' clause (i.e., a list previously generated by + * this routine) + * + * Returns a list of the form ((a b c) (d e f) nil (g h) ...) where + * a,b,c are nodes of indices that match the first subclause in + * 'or-clauses', d,e,f match the second subclause, no indices + * match the third, g,h match the fourth, etc. + */ +static List * +match_index_orclause(Rel *rel, + Rel *index, + int indexkey, + int xclass, + List *or_clauses, + List *other_matching_indices) +{ + Node *clause = NULL; + List *matched_indices = other_matching_indices; + List *index_list = NIL; + List *clist; + List *ind; + + if (!matched_indices) + matched_indices = lcons(NIL, NIL); + + for (clist = or_clauses, ind = matched_indices; + clist; + clist = lnext(clist), ind = lnext(ind)) + { + clause = lfirst(clist); + if (is_opclause (clause) && + op_class(((Oper*)((Expr*)clause)->oper)->opno, + xclass, index->relam) && + match_index_to_operand(indexkey, + (Expr*)get_leftop((Expr*)clause), + rel, + index) && + IsA(get_rightop((Expr*)clause),Const)) { + + matched_indices = lcons(index, matched_indices); + index_list = lappend(index_list, + matched_indices); + } + } + return(index_list); + +} + +/**************************************************************************** + * ---- ROUTINES TO CHECK RESTRICTIONS ---- + ****************************************************************************/ + + +/* + * DoneMatchingIndexKeys() - MACRO + * + * Determine whether we should continue matching index keys in a clause. + * Depends on if there are more to match or if this is a functional index. + * In the latter case we stop after the first match since the there can + * be only key (i.e. the function's return value) and the attributes in + * keys list represent the arguments to the function. -mer 3 Oct. 1991 + */ +#define DoneMatchingIndexKeys(indexkeys, index) \ + (indexkeys[0] == 0 || \ + (index->indproc != InvalidOid)) + +/* + * group-clauses-by-indexkey-- + * Determines whether there are clauses which will match each and every + * one of the remaining keys of an index. + * + * 'rel' is the node of the relation corresponding to the index. + * 'indexkeys' are the remaining index keys to be matched. + * 'classes' are the classes of the index operators on those keys. + * 'clauses' is either: + * (1) the list of available restriction clauses on a single + * relation, or + * (2) a list of join clauses between 'rel' and a fixed set of + * relations, + * depending on the value of 'join'. + * 'startlist' is a list of those clause nodes that have matched the keys + * that have already been checked. + * 'join' is a flag indicating that the clauses being checked are join + * clauses. + * + * Returns all possible groups of clauses that will match (given that + * one or more clauses can match any of the remaining keys). + * E.g., if you have clauses A, B, and C, ((A B) (A C)) might be + * returned for an index with 2 keys. + * + */ +static List * +group_clauses_by_indexkey(Rel *rel, + Rel *index, + int *indexkeys, + Oid *classes, + List *clauseinfo_list, + bool join) +{ + List *curCinfo = NIL; + CInfo *matched_clause = (CInfo*)NULL; + List *clausegroup = NIL; + + + if (clauseinfo_list == NIL) + return NIL; + + foreach (curCinfo,clauseinfo_list) { + CInfo *temp = (CInfo*)lfirst(curCinfo); + int *curIndxKey = indexkeys; + Oid *curClass = classes; + + do { + /* + * If we can't find any matching clauses for the first of + * the remaining keys, give up. + */ + matched_clause = match_clause_to_indexkey (rel, + index, + curIndxKey[0], + curClass[0], + temp, + join); + if (!matched_clause) + break; + + clausegroup = lcons(matched_clause, clausegroup); + curIndxKey++; + curClass++; + + } while ( !DoneMatchingIndexKeys(curIndxKey, index) ); + } + + if (clausegroup != NIL) + return(lcons(clausegroup, NIL)); + return NIL; +} + +/* + * IndexScanableClause () MACRO + * + * Generalize condition on which we match a clause with an index. + * Now we can match with functional indices. + */ +#define IndexScanableOperand(opnd, indkeys, rel, index) \ + ((index->indproc == InvalidOid) ? \ + equal_indexkey_var(indkeys,opnd) : \ + function_index_operand((Expr*)opnd,rel,index)) + +/* + * match_clause_to-indexkey-- + * Finds the first of a relation's available restriction clauses that + * matches a key of an index. + * + * To match, the clause must: + * (1) be in the form (op var const) if the clause is a single- + * relation clause, and + * (2) contain an operator which is in the same class as the index + * operator for this key. + * + * If the clause being matched is a join clause, then 'join' is t. + * + * Returns a single clauseinfo node corresponding to the matching + * clause. + * + * NOTE: returns nil if clause is an or_clause. + * + */ +static CInfo * +match_clause_to_indexkey(Rel *rel, + Rel *index, + int indexkey, + int xclass, + CInfo *clauseInfo, + bool join) +{ + Expr *clause = clauseInfo->clause; + Var *leftop, *rightop; + Oid join_op = InvalidOid; + bool isIndexable = false; + + if (or_clause((Node*)clause) || + not_clause((Node*)clause) || single_node((Node*)clause)) + return ((CInfo*)NULL); + + leftop = get_leftop(clause); + rightop = get_rightop(clause); + /* + * If this is not a join clause, check for clauses of the form: + * (operator var/func constant) and (operator constant var/func) + */ + if (!join) + { + Oid restrict_op = InvalidOid; + + /* + * Check for standard s-argable clause + */ + if (IsA(rightop,Const)) + { + restrict_op = ((Oper*)((Expr*)clause)->oper)->opno; + isIndexable = + ( op_class(restrict_op, xclass, index->relam) && + IndexScanableOperand(leftop, + indexkey, + rel, + index) ); + } + + /* + * Must try to commute the clause to standard s-arg format. + */ + else if (IsA(leftop,Const)) + { + restrict_op = + get_commutator(((Oper*)((Expr*)clause)->oper)->opno); + + if ( (restrict_op != InvalidOid) && + op_class(restrict_op, xclass, index->relam) && + IndexScanableOperand(rightop, + indexkey,rel,index) ) + { + isIndexable = true; + /* + * In place list modification. + * (op const var/func) -> (op var/func const) + */ + /* BUG! Old version: + CommuteClause(clause, restrict_op); + */ + CommuteClause((Node*)clause); + } + } + } + /* + * Check for an indexable scan on one of the join relations. + * clause is of the form (operator var/func var/func) + */ + else + { + if (match_index_to_operand(indexkey,(Expr*)rightop,rel,index)) { + + join_op = get_commutator(((Oper*)((Expr*)clause)->oper)->opno); + + } else if (match_index_to_operand(indexkey, + (Expr*)leftop,rel,index)) { + join_op = ((Oper*)((Expr*)clause)->oper)->opno; + } + + if ( join_op && op_class(join_op,xclass,index->relam) && + join_clause_p((Node*)clause)) + { + isIndexable = true; + + /* + * If we're using the operand's commutator we must + * commute the clause. + */ + if (join_op != ((Oper*)((Expr*)clause)->oper)->opno) + CommuteClause((Node*)clause); + } + } + + if (isIndexable) + return(clauseInfo); + + return(NULL); +} + +/**************************************************************************** + * ---- ROUTINES TO DO PARTIAL INDEX PREDICATE TESTS ---- + ****************************************************************************/ + +/* + * pred_test-- + * Does the "predicate inclusion test" for partial indexes. + * + * Recursively checks whether the clauses in clauseinfo_list imply + * that the given predicate is true. + * + * This routine (together with the routines it calls) iterates over + * ANDs in the predicate first, then reduces the qualification + * clauses down to their constituent terms, and iterates over ORs + * in the predicate last. This order is important to make the test + * succeed whenever possible (assuming the predicate has been + * successfully cnfify()-ed). --Nels, Jan '93 + */ +static bool +pred_test(List *predicate_list, List *clauseinfo_list, List *joininfo_list) +{ + List *pred, *items, *item; + + /* + * Note: if Postgres tried to optimize queries by forming equivalence + * classes over equi-joined attributes (i.e., if it recognized that a + * qualification such as "where a.b=c.d and a.b=5" could make use of + * an index on c.d), then we could use that equivalence class info + * here with joininfo_list to do more complete tests for the usability + * of a partial index. For now, the test only uses restriction + * clauses (those in clauseinfo_list). --Nels, Dec '92 + */ + + if (predicate_list == NULL) + return true; /* no predicate: the index is usable */ + if (clauseinfo_list == NULL) + return false; /* no restriction clauses: the test must fail */ + + foreach (pred, predicate_list) { + /* if any clause is not implied, the whole predicate is not implied */ + if (and_clause(lfirst(pred))) { + items = ((Expr*)lfirst(pred))->args; + foreach (item, items) { + if (!one_pred_test(lfirst(item), clauseinfo_list)) + return false; + } + } + else if (!one_pred_test(lfirst(pred), clauseinfo_list)) + return false; + } + return true; +} + + +/* + * one_pred_test-- + * Does the "predicate inclusion test" for one conjunct of a predicate + * expression. + */ +static bool +one_pred_test(Expr *predicate, List *clauseinfo_list) +{ + CInfo *clauseinfo; + List *item; + + Assert(predicate != NULL); + foreach (item, clauseinfo_list) { + clauseinfo = (CInfo *)lfirst(item); + /* if any clause implies the predicate, return true */ + if (one_pred_clause_expr_test(predicate, (Node*)clauseinfo->clause)) + return true; + } + return false; +} + + +/* + * one_pred_clause_expr_test-- + * Does the "predicate inclusion test" for a general restriction-clause + * expression. + */ +static bool +one_pred_clause_expr_test(Expr *predicate, Node *clause) +{ + List *items, *item; + + if (is_opclause(clause)) + return one_pred_clause_test(predicate, clause); + else if (or_clause(clause)) { + items = ((Expr*)clause)->args; + foreach (item, items) { + /* if any OR item doesn't imply the predicate, clause doesn't */ + if (!one_pred_clause_expr_test(predicate, lfirst(item))) + return false; + } + return true; + }else if (and_clause(clause)) { + items = ((Expr*)clause)->args; + foreach (item, items) { + /* if any AND item implies the predicate, the whole clause does */ + if (one_pred_clause_expr_test(predicate, lfirst(item))) + return true; + } + return false; + }else { + /* unknown clause type never implies the predicate */ + return false; + } +} + + +/* + * one_pred_clause_test-- + * Does the "predicate inclusion test" for one conjunct of a predicate + * expression for a simple restriction clause. + */ +static bool +one_pred_clause_test(Expr *predicate, Node *clause) +{ + List *items, *item; + + if (is_opclause((Node*)predicate)) + return clause_pred_clause_test(predicate, clause); + else if (or_clause((Node*)predicate)) { + items = predicate->args; + foreach (item, items) { + /* if any item is implied, the whole predicate is implied */ + if (one_pred_clause_test(lfirst(item), clause)) + return true; + } + return false; + }else if (and_clause((Node*)predicate)) { + items = predicate->args; + foreach (item, items) { + /* + * if any item is not implied, the whole predicate is not + * implied + */ + if (!one_pred_clause_test(lfirst(item), clause)) + return false; + } + return true; + } + else { + elog(DEBUG, "Unsupported predicate type, index will not be used"); + return false; + } +} + + +/* + * Define an "operator implication table" for btree operators ("strategies"). + * The "strategy numbers" are: (1) < (2) <= (3) = (4) >= (5) > + * + * The interpretation of: + * + * test_op = BT_implic_table[given_op-1][target_op-1] + * + * where test_op, given_op and target_op are strategy numbers (from 1 to 5) + * of btree operators, is as follows: + * + * If you know, for some ATTR, that "ATTR given_op CONST1" is true, and you + * want to determine whether "ATTR target_op CONST2" must also be true, then + * you can use "CONST1 test_op CONST2" as a test. If this test returns true, + * then the target expression must be true; if the test returns false, then + * the target expression may be false. + * + * An entry where test_op==0 means the implication cannot be determined, i.e., + * this test should always be considered false. + */ + +StrategyNumber BT_implic_table[BTMaxStrategyNumber][BTMaxStrategyNumber] = { + {2, 2, 0, 0, 0}, + {1, 2, 0, 0, 0}, + {1, 2, 3, 4, 5}, + {0, 0, 0, 4, 5}, + {0, 0, 0, 4, 4} +}; + + +/* + * clause_pred_clause_test-- + * Use operator class info to check whether clause implies predicate. + * + * Does the "predicate inclusion test" for a "simple clause" predicate + * for a single "simple clause" restriction. Currently, this only handles + * (binary boolean) operators that are in some btree operator class. + * Eventually, rtree operators could also be handled by defining an + * appropriate "RT_implic_table" array. + */ +static bool +clause_pred_clause_test(Expr *predicate, Node *clause) +{ + Var *pred_var, *clause_var; + Const *pred_const, *clause_const; + Oid pred_op, clause_op, test_op; + Oid opclass_id; + StrategyNumber pred_strategy, clause_strategy, test_strategy; + Oper *test_oper; + Expr *test_expr; + bool test_result, isNull; + Relation relation; + HeapScanDesc scan; + HeapTuple tuple; + ScanKeyData entry[3]; + Form_pg_amop form; + + pred_var = (Var*)get_leftop(predicate); + pred_const = (Const*)get_rightop(predicate); + clause_var = (Var*)get_leftop((Expr*)clause); + clause_const = (Const*)get_rightop((Expr*)clause); + + /* Check the basic form; for now, only allow the simplest case */ + if (!is_opclause(clause) || + !IsA(clause_var,Var) || + !IsA(clause_const,Const) || + !IsA(predicate->oper,Oper) || + !IsA(pred_var,Var) || + !IsA(pred_const,Const)) { + return false; + } + + /* + * The implication can't be determined unless the predicate and the clause + * refer to the same attribute. + */ + if (clause_var->varattno != pred_var->varattno) + return false; + + /* Get the operators for the two clauses we're comparing */ + pred_op = ((Oper*)((Expr*)predicate)->oper)->opno; + clause_op = ((Oper*)((Expr*)clause)->oper)->opno; + + + /* + * 1. Find a "btree" strategy number for the pred_op + */ + /* XXX - hardcoded amopid value 403 to find "btree" operator classes */ + ScanKeyEntryInitialize(&entry[0], 0, + Anum_pg_amop_amopid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(403)); + + ScanKeyEntryInitialize(&entry[1], 0, + Anum_pg_amop_amopopr, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(pred_op)); + + relation = heap_openr(AccessMethodOperatorRelationName); + + /* + * The following assumes that any given operator will only be in a single + * btree operator class. This is true at least for all the pre-defined + * operator classes. If it isn't true, then whichever operator class + * happens to be returned first for the given operator will be used to + * find the associated strategy numbers for the test. --Nels, Jan '93 + */ + scan = heap_beginscan(relation, false, NowTimeQual, 2, entry); + tuple = heap_getnext(scan, false, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) { + elog(DEBUG, "clause_pred_clause_test: unknown pred_op"); + return false; + } + form = (Form_pg_amop) GETSTRUCT(tuple); + + /* Get the predicate operator's strategy number (1 to 5) */ + pred_strategy = (StrategyNumber)form->amopstrategy; + + /* Remember which operator class this strategy number came from */ + opclass_id = form->amopclaid; + + heap_endscan(scan); + + + /* + * 2. From the same opclass, find a strategy num for the clause_op + */ + ScanKeyEntryInitialize(&entry[1], 0, + Anum_pg_amop_amopclaid, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(opclass_id)); + + ScanKeyEntryInitialize(&entry[2], 0, + Anum_pg_amop_amopopr, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(clause_op)); + + scan = heap_beginscan(relation, false, NowTimeQual, 3, entry); + tuple = heap_getnext(scan, false, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) { + elog(DEBUG, "clause_pred_clause_test: unknown clause_op"); + return false; + } + form = (Form_pg_amop) GETSTRUCT(tuple); + + /* Get the restriction clause operator's strategy number (1 to 5) */ + clause_strategy = (StrategyNumber)form->amopstrategy; + heap_endscan(scan); + + + /* + * 3. Look up the "test" strategy number in the implication table + */ + + test_strategy = BT_implic_table[clause_strategy-1][pred_strategy-1]; + if (test_strategy == 0) + return false; /* the implication cannot be determined */ + + + /* + * 4. From the same opclass, find the operator for the test strategy + */ + + ScanKeyEntryInitialize(&entry[2], 0, + Anum_pg_amop_amopstrategy, + Integer16EqualRegProcedure, + Int16GetDatum(test_strategy)); + + scan = heap_beginscan(relation, false, NowTimeQual, 3, entry); + tuple = heap_getnext(scan, false, (Buffer *)NULL); + if (! HeapTupleIsValid(tuple)) { + elog(DEBUG, "clause_pred_clause_test: unknown test_op"); + return false; + } + form = (Form_pg_amop) GETSTRUCT(tuple); + + /* Get the test operator */ + test_op = form->amopopr; + heap_endscan(scan); + + + /* + * 5. Evaluate the test + */ + test_oper = makeOper(test_op, /* opno */ + InvalidOid, /* opid */ + BOOL_TYPEID, /* opresulttype */ + 0, /* opsize */ + NULL); /* op_fcache */ + (void) replace_opid(test_oper); + + test_expr = make_opclause(test_oper, + copyObject(clause_const), + copyObject(pred_const)); + +#ifndef OMIT_PARTIAL_INDEX + test_result = ExecEvalExpr((Node*)test_expr, NULL, &isNull, NULL); +#endif /* OMIT_PARTIAL_INDEX */ + if (isNull) { + elog(DEBUG, "clause_pred_clause_test: null test result"); + return false; + } + return test_result; +} + + +/**************************************************************************** + * ---- ROUTINES TO CHECK JOIN CLAUSES ---- + ****************************************************************************/ + +/* + * indexable-joinclauses-- + * Finds all groups of join clauses from among 'joininfo-list' that can + * be used in conjunction with 'index'. + * + * The first clause in the group is marked as having the other relation + * in the join clause as its outer join relation. + * + * Returns a list of these clause groups. + * + */ +static List * +indexable_joinclauses(Rel *rel, Rel *index, List *joininfo_list) +{ + JInfo *joininfo = (JInfo*)NULL; + List *cg_list = NIL; + List *i = NIL; + List *clausegroups = NIL; + + foreach(i,joininfo_list) { + joininfo = (JInfo*)lfirst(i); + clausegroups = + group_clauses_by_indexkey (rel, + index, + index->indexkeys, + index->classlist, + joininfo->jinfoclauseinfo, + true); + + if (clausegroups != NIL) { + List *clauses = lfirst(clausegroups); + + ((CInfo*)lfirst(clauses))->cinfojoinid = + joininfo->otherrels; + } + cg_list = nconc(cg_list,clausegroups); + } + return(cg_list); +} + +/**************************************************************************** + * ---- PATH CREATION UTILITIES ---- + ****************************************************************************/ + +/* + * extract_restrict_clauses - + * the list of clause info contains join clauses and restriction clauses. + * This routine returns the restriction clauses only. + */ +static List * +extract_restrict_clauses(List *clausegroup) +{ + List *restrict_cls = NIL; + List *l; + + foreach (l, clausegroup) { + CInfo *cinfo = lfirst(l); + + if (!join_clause_p((Node*)cinfo->clause)) { + restrict_cls = lappend(restrict_cls, cinfo); + } + } + return restrict_cls; +} + +/* + * index-innerjoin-- + * Creates index path nodes corresponding to paths to be used as inner + * relations in nestloop joins. + * + * 'clausegroup-list' is a list of list of clauseinfo nodes which can use + * 'index' on their inner relation. + * + * Returns a list of index pathnodes. + * + */ +static List * +index_innerjoin(Query *root, Rel *rel, List *clausegroup_list, Rel *index) +{ + List *clausegroup = NIL; + List *cg_list = NIL; + List *i = NIL; + IndexPath *pathnode = (IndexPath*)NULL; + Cost temp_selec; + float temp_pages; + + foreach(i,clausegroup_list) { + List *attnos, *values, *flags; + + clausegroup = lfirst(i); + pathnode = makeNode(IndexPath); + + get_joinvars(lfirsti(rel->relids),clausegroup, + &attnos, &values, &flags); + index_selectivity(lfirsti(index->relids), + index->classlist, + get_opnos(clausegroup), + getrelid((int)lfirst(rel->relids), + root->rtable), + attnos, + values, + flags, + length(clausegroup), + &temp_pages, + &temp_selec); + pathnode->path.pathtype = T_IndexScan; + pathnode->path.parent = rel; + pathnode->indexid = index->relids; + pathnode->indexqual = clausegroup; + + pathnode->path.joinid = ((CInfo*)lfirst(clausegroup))->cinfojoinid; + + pathnode->path.path_cost = + cost_index((Oid)lfirst(index->relids), + (int)temp_pages, + temp_selec, + rel->pages, + rel->tuples, + index->pages, + index->tuples, + true); + + /* copy clauseinfo list into path for expensive function processing + -- JMH, 7/7/92 */ + pathnode->path.locclauseinfo = + set_difference(copyObject((Node*)rel->clauseinfo), + clausegroup); + +#if 0 /* fix xfunc */ + /* add in cost for expensive functions! -- JMH, 7/7/92 */ + if (XfuncMode != XFUNC_OFF) { + ((Path*)pathnode)->path_cost += + xfunc_get_path_cost((Path*)pathnode); + } +#endif + cg_list = lappend(cg_list,pathnode); + } + return(cg_list); +} + +/* + * create-index-paths-- + * Creates a list of index path nodes for each group of clauses + * (restriction or join) that can be used in conjunction with an index. + * + * 'rel' is the relation for which 'index' is defined + * 'clausegroup-list' is the list of clause groups (lists of clauseinfo + * nodes) grouped by mergesortorder + * 'join' is a flag indicating whether or not the clauses are join + * clauses + * + * Returns a list of new index path nodes. + * + */ +static List * +create_index_paths(Query *root, + Rel *rel, + Rel *index, + List *clausegroup_list, + bool join) +{ + List *clausegroup = NIL; + List *ip_list = NIL; + List *i = NIL; + List *j = NIL; + IndexPath *temp_path; + + foreach(i, clausegroup_list) { + CInfo *clauseinfo; + List *temp_node = NIL; + bool temp = true; + + clausegroup = lfirst(i); + + foreach (j,clausegroup) { + clauseinfo = (CInfo*)lfirst(j); + if (!(join_clause_p((Node*)clauseinfo->clause) && + equal_path_merge_ordering(index->ordering, + clauseinfo->mergesortorder))) { + temp = false; + } + } + + if (!join || temp) { /* restriction, ordering scan */ + temp_path = create_index_path (root, rel,index,clausegroup,join); + temp_node = + lcons(temp_path, NIL); + ip_list = nconc(ip_list,temp_node); + } + } + return(ip_list); +} + +static List * +add_index_paths(List *indexpaths, List *new_indexpaths) +{ + return append(indexpaths, new_indexpaths); +} + +static bool +function_index_operand(Expr *funcOpnd, Rel *rel, Rel *index) +{ + Oid heapRelid = (Oid)lfirst(rel->relids); + Func *function; + List *funcargs; + int *indexKeys = index->indexkeys; + List *arg; + int i; + + /* + * sanity check, make sure we know what we're dealing with here. + */ + if (funcOpnd==NULL || + nodeTag(funcOpnd)!=T_Expr || funcOpnd->opType!=FUNC_EXPR || + funcOpnd->oper==NULL || indexKeys==NULL) + return false; + + function = (Func*)funcOpnd->oper; + funcargs = funcOpnd->args; + + if (function->funcid != index->indproc) + return false; + + /* + * Check that the arguments correspond to the same arguments used + * to create the functional index. To do this we must check that + * 1. refer to the right relatiion. + * 2. the args have the right attr. numbers in the right order. + * + * + * Check all args refer to the correct relation (i.e. the one with + * the functional index defined on it (rel). To do this we can + * simply compare range table entry numbers, they must be the same. + */ + foreach (arg, funcargs) { + if (heapRelid != ((Var*)lfirst(arg))->varno) + return false; + } + + /* + * check attr numbers and order. + */ + i = 0; + foreach (arg, funcargs) { + + if (indexKeys[i]==0) + return (false); + + if (((Var*)lfirst(arg))->varattno != indexKeys[i]) + return (false); + + i++; + } + + return true; +} + +static bool +SingleAttributeIndex(Rel *index) +{ + /* + * return false for now as I don't know if we support index scans + * on disjunction and the code doesn't work + */ + return (false); + +#if 0 + /* + * Non-functional indices. + */ + if (index->indproc == InvalidOid) + return (index->indexkeys[0] != 0 && + index->indexkeys[1] == 0); + + /* + * We have a functional index which is a single attr index + */ + return true; +#endif +} diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c new file mode 100644 index 00000000000..e727388715c --- /dev/null +++ b/src/backend/optimizer/path/joinpath.c @@ -0,0 +1,623 @@ +/*------------------------------------------------------------------------- + * + * joinpath.c-- + * Routines to find all possible paths for processing a set of joins + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinpath.c,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "storage/buf_internals.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/plannodes.h" + +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/pathnode.h" +#include "optimizer/keys.h" +#include "optimizer/cost.h" /* for _enable_{hashjoin, _enable_mergesort} */ + +static Path *best_innerjoin(List *join_paths, List *outer_relid); +static List *sort_inner_and_outer(Rel *joinrel, Rel *outerrel, Rel *innerrel, + List *mergeinfo_list); +static List *match_unsorted_outer(Rel *joinrel, Rel *outerrel, Rel *innerrel, + List *outerpath_list, Path *cheapest_inner, Path *best_innerjoin, + List *mergeinfo_list); +static List *match_unsorted_inner(Rel *joinrel, Rel *outerrel, Rel *innerrel, + List *innerpath_list, List *mergeinfo_list); +static bool EnoughMemoryForHashjoin(Rel *hashrel); +static List *hash_inner_and_outer(Rel *joinrel, Rel *outerrel, Rel *innerrel, + List *hashinfo_list); + +/* + * find-all-join-paths-- + * Creates all possible ways to process joins for each of the join + * relations in the list 'joinrels.' Each unique path will be included + * in the join relation's 'pathlist' field. + * + * In postgres, n-way joins are handled left-only(permuting clauseless + * joins doesn't usually win much). + * + * if BushyPlanFlag is true, bushy tree plans will be generated + * + * 'joinrels' is the list of relation entries to be joined + * + * Modifies the pathlist field of the appropriate rel node to contain + * the unique join paths. + * If bushy trees are considered, may modify the relid field of the + * join rel nodes to flatten the lists. + * + * Returns nothing of interest. (?) + * It does a destructive modification. + */ +void +find_all_join_paths(Query *root, List *joinrels) +{ + List *mergeinfo_list = NIL; + List *hashinfo_list = NIL; + List *temp_list = NIL; + List *path = NIL; + + while (joinrels != NIL) { + Rel *joinrel = (Rel *)lfirst(joinrels); + List *innerrelids; + List *outerrelids; + Rel *innerrel; + Rel *outerrel; + Path *bestinnerjoin; + List *pathlist = NIL; + + innerrelids = lsecond(joinrel->relids); + outerrelids = lfirst(joinrel->relids); + + /* + * base relation id is an integer and join relation relid is a + * list of integers. + */ + innerrel = (length(innerrelids)==1)? + get_base_rel(root, lfirsti(innerrelids)) : get_join_rel(root,innerrelids); + outerrel = (length(outerrelids)==1)? + get_base_rel(root, lfirsti(outerrelids)) : get_join_rel(root, outerrelids); + + bestinnerjoin = best_innerjoin(innerrel->innerjoin, + outerrel->relids); + if( _enable_mergesort_ ) { + mergeinfo_list = + group_clauses_by_order(joinrel->clauseinfo, + lfirsti(innerrel->relids)); + } + + if( _enable_hashjoin_ ) { + hashinfo_list = + group_clauses_by_hashop(joinrel->clauseinfo, + lfirsti(innerrel->relids)); + } + + /* need to flatten the relids list */ + joinrel->relids = intAppend(outerrelids, innerrelids); + + /* + * 1. Consider mergesort paths where both relations must be + * explicitly sorted. + */ + pathlist = sort_inner_and_outer(joinrel,outerrel, + innerrel,mergeinfo_list); + + /* + * 2. Consider paths where the outer relation need not be explicitly + * sorted. This may include either nestloops and mergesorts where + * the outer path is already ordered. + */ + pathlist = + add_pathlist(joinrel, pathlist, + match_unsorted_outer(joinrel, + outerrel, + innerrel, + outerrel->pathlist, + (Path*)innerrel->cheapestpath, + bestinnerjoin, + mergeinfo_list)); + + /* + * 3. Consider paths where the inner relation need not be explicitly + * sorted. This may include nestloops and mergesorts the actual + * nestloop nodes were constructed in (match-unsorted-outer). + */ + pathlist = + add_pathlist(joinrel,pathlist, + match_unsorted_inner(joinrel,outerrel, + innerrel, + innerrel->pathlist, + mergeinfo_list)); + + /* + * 4. Consider paths where both outer and inner relations must be + * hashed before being joined. + */ + + pathlist = + add_pathlist(joinrel, pathlist, + hash_inner_and_outer(joinrel,outerrel, + innerrel,hashinfo_list)); + + joinrel->pathlist = pathlist; + + /* + * 'OuterJoinCost is only valid when calling (match-unsorted-inner) + * with the same arguments as the previous invokation of + * (match-unsorted-outer), so clear the field before going on. + */ + temp_list = innerrel->pathlist; + foreach(path, temp_list) { + + /* + * XXX + * + * This gross hack is to get around an apparent optimizer bug on + * Sparc (or maybe it is a bug of ours?) that causes really wierd + * behavior. + */ + if (IsA_JoinPath(path)) { + ((Path*)lfirst(path))->outerjoincost = (Cost) 0; + } + + /* do it iff it is a join path, which is not always + true, esp since the base level */ + } + + joinrels = lnext(joinrels); + } +} + +/* + * best-innerjoin-- + * Find the cheapest index path that has already been identified by + * (indexable_joinclauses) as being a possible inner path for the given + * outer relation in a nestloop join. + * + * 'join-paths' is a list of join nodes + * 'outer-relid' is the relid of the outer join relation + * + * Returns the pathnode of the selected path. + */ +static Path * +best_innerjoin(List *join_paths, List *outer_relids) +{ + Path *cheapest = (Path*)NULL; + List *join_path; + + foreach(join_path, join_paths) { + Path *path = (Path *)lfirst(join_path); + + if (intMember(lfirsti(path->joinid), outer_relids) + && ((cheapest==NULL || + path_is_cheaper((Path*)lfirst(join_path),cheapest)))) { + + cheapest = (Path*)lfirst(join_path); + } + } + return(cheapest); +} + +/* + * sort-inner-and-outer-- + * Create mergesort join paths by explicitly sorting both the outer and + * inner join relations on each available merge ordering. + * + * 'joinrel' is the join relation + * 'outerrel' is the outer join relation + * 'innerrel' is the inner join relation + * 'mergeinfo-list' is a list of nodes containing info on(mergesortable) + * clauses for joining the relations + * + * Returns a list of mergesort paths. + */ +static List * +sort_inner_and_outer(Rel *joinrel, + Rel *outerrel, + Rel *innerrel, + List *mergeinfo_list) +{ + List *ms_list = NIL; + MInfo *xmergeinfo = (MInfo*)NULL; + MergePath *temp_node = (MergePath*)NULL; + List *i; + List *outerkeys = NIL; + List *innerkeys = NIL; + List *merge_pathkeys = NIL; + + foreach(i, mergeinfo_list) { + xmergeinfo = (MInfo *)lfirst(i); + + outerkeys = + extract_path_keys(xmergeinfo->jmethod.jmkeys, + outerrel->targetlist, + OUTER); + + innerkeys = + extract_path_keys(xmergeinfo->jmethod.jmkeys, + innerrel->targetlist, + INNER); + + merge_pathkeys = + new_join_pathkeys(outerkeys, joinrel->targetlist, + xmergeinfo->jmethod.clauses); + + temp_node = + create_mergesort_path(joinrel, + outerrel->size, + innerrel->size, + outerrel->width, + innerrel->width, + (Path*)outerrel->cheapestpath, + (Path*)innerrel->cheapestpath, + merge_pathkeys, + xmergeinfo->m_ordering, + xmergeinfo->jmethod.clauses, + outerkeys, + innerkeys); + + ms_list = lappend(ms_list, temp_node); + } + return(ms_list); +} + +/* + * match-unsorted-outer-- + * Creates possible join paths for processing a single join relation + * 'joinrel' by employing either iterative substitution or + * mergesorting on each of its possible outer paths(assuming that the + * outer relation need not be explicitly sorted). + * + * 1. The inner path is the cheapest available inner path. + * 2. Mergesort wherever possible. Mergesorts are considered if there + * are mergesortable join clauses between the outer and inner join + * relations such that the outer path is keyed on the variables + * appearing in the clauses. The corresponding inner merge path is + * either a path whose keys match those of the outer path(if such a + * path is available) or an explicit sort on the appropriate inner + * join keys, whichever is cheaper. + * + * 'joinrel' is the join relation + * 'outerrel' is the outer join relation + * 'innerrel' is the inner join relation + * 'outerpath-list' is the list of possible outer paths + * 'cheapest-inner' is the cheapest inner path + * 'best-innerjoin' is the best inner index path(if any) + * 'mergeinfo-list' is a list of nodes containing info on mergesortable + * clauses + * + * Returns a list of possible join path nodes. + */ +static List * +match_unsorted_outer(Rel *joinrel, + Rel *outerrel, + Rel *innerrel, + List *outerpath_list, + Path *cheapest_inner, + Path *best_innerjoin, + List *mergeinfo_list) +{ + Path *outerpath = (Path*)NULL; + List *jp_list = NIL; + List *temp_node = NIL; + List *merge_pathkeys = NIL; + Path *nestinnerpath =(Path*)NULL; + List *paths = NIL; + List *i = NIL; + PathOrder *outerpath_ordering = NULL; + + foreach(i,outerpath_list) { + List *clauses = NIL; + List *matchedJoinKeys = NIL; + List *matchedJoinClauses = NIL; + MInfo *xmergeinfo = (MInfo*)NULL; + + outerpath = (Path*)lfirst(i); + + outerpath_ordering = &outerpath->p_ordering; + + if (outerpath_ordering) { + xmergeinfo = + match_order_mergeinfo(outerpath_ordering, + mergeinfo_list); + } + + if (xmergeinfo) { + clauses = xmergeinfo->jmethod.clauses; + } + + if (clauses) { + List *keys = xmergeinfo->jmethod.jmkeys; + List *clauses = xmergeinfo->jmethod.clauses; + + matchedJoinKeys = + match_pathkeys_joinkeys(outerpath->keys, + keys, + clauses, + OUTER, + &matchedJoinClauses); + merge_pathkeys = + new_join_pathkeys(outerpath->keys, + joinrel->targetlist, clauses); + } else { + merge_pathkeys = outerpath->keys; + } + + if(best_innerjoin && + path_is_cheaper(best_innerjoin, cheapest_inner)) { + nestinnerpath = best_innerjoin; + } else { + nestinnerpath = cheapest_inner; + } + + paths = lcons(create_nestloop_path(joinrel, + outerrel, + outerpath, + nestinnerpath, + merge_pathkeys), + NIL); + + if (clauses && matchedJoinKeys) { + bool path_is_cheaper_than_sort; + List *varkeys = NIL; + Path *mergeinnerpath = + match_paths_joinkeys(matchedJoinKeys, + outerpath_ordering, + innerrel->pathlist, + INNER); + + path_is_cheaper_than_sort = + (bool) (mergeinnerpath && + (mergeinnerpath->path_cost < + (cheapest_inner->path_cost + + cost_sort(matchedJoinKeys, + innerrel->size, + innerrel->width, + false)))); + if(!path_is_cheaper_than_sort) { + varkeys = + extract_path_keys(matchedJoinKeys, + innerrel->targetlist, + INNER); + } + + + /* + * Keep track of the cost of the outer path used with + * this ordered inner path for later processing in + * (match-unsorted-inner), since it isn't a sort and + * thus wouldn't otherwise be considered. + */ + if (path_is_cheaper_than_sort) { + mergeinnerpath->outerjoincost = outerpath->path_cost; + } else { + mergeinnerpath = cheapest_inner; + } + + temp_node = + lcons(create_mergesort_path(joinrel, + outerrel->size, + innerrel->size, + outerrel->width, + innerrel->width, + outerpath, + mergeinnerpath, + merge_pathkeys, + xmergeinfo->m_ordering, + matchedJoinClauses, + NIL, + varkeys), + paths); + } else { + temp_node = paths; + } + jp_list = nconc(jp_list, temp_node); + } + return(jp_list); +} + +/* + * match-unsorted-inner -- + * Find the cheapest ordered join path for a given(ordered, unsorted) + * inner join path. + * + * Scans through each path available on an inner join relation and tries + * matching its ordering keys against those of mergejoin clauses. + * If 1. an appropriately-ordered inner path and matching mergeclause are + * found, and + * 2. sorting the cheapest outer path is cheaper than using an ordered + * but unsorted outer path(as was considered in + * (match-unsorted-outer)), + * then this merge path is considered. + * + * 'joinrel' is the join result relation + * 'outerrel' is the outer join relation + * 'innerrel' is the inner join relation + * 'innerpath-list' is the list of possible inner join paths + * 'mergeinfo-list' is a list of nodes containing info on mergesortable + * clauses + * + * Returns a list of possible merge paths. + */ +static List * +match_unsorted_inner(Rel *joinrel, + Rel *outerrel, + Rel *innerrel, + List *innerpath_list, + List *mergeinfo_list) +{ + Path *innerpath = (Path*)NULL; + List *mp_list = NIL; + List *temp_node = NIL; + PathOrder *innerpath_ordering = NULL; + Cost temp1 = 0.0; + bool temp2 = false; + List *i = NIL; + + foreach (i, innerpath_list) { + MInfo *xmergeinfo = (MInfo*)NULL; + List *clauses = NIL; + List *matchedJoinKeys = NIL; + List *matchedJoinClauses = NIL; + + innerpath = (Path*)lfirst(i); + + innerpath_ordering = &innerpath->p_ordering; + + if (innerpath_ordering) { + xmergeinfo = + match_order_mergeinfo(innerpath_ordering, + mergeinfo_list); + } + + if (xmergeinfo) { + clauses = ((JoinMethod*)xmergeinfo)->clauses; + } + + if (clauses) { + List *keys = xmergeinfo->jmethod.jmkeys; + List *cls = xmergeinfo->jmethod.clauses; + + matchedJoinKeys = + match_pathkeys_joinkeys(innerpath->keys, + keys, + cls, + INNER, + &matchedJoinClauses); + } + + /* + * (match-unsorted-outer) if it is applicable. + * 'OuterJoinCost was set above in + */ + if (clauses && matchedJoinKeys) { + temp1 = outerrel->cheapestpath->path_cost + + cost_sort(matchedJoinKeys, outerrel->size, outerrel->width, + false); + + temp2 = (bool) (FLOAT_IS_ZERO(innerpath->outerjoincost) + || (innerpath->outerjoincost > temp1)); + + if(temp2) { + List *outerkeys = + extract_path_keys(matchedJoinKeys, + outerrel->targetlist, + OUTER); + List *merge_pathkeys = + new_join_pathkeys(outerkeys, + joinrel->targetlist, + clauses); + + temp_node = + lcons(create_mergesort_path(joinrel, + outerrel->size, + innerrel->size, + outerrel->width, + innerrel->width, + (Path*)outerrel->cheapestpath, + innerpath, + merge_pathkeys, + xmergeinfo->m_ordering, + matchedJoinClauses, + outerkeys, + NIL), + NIL); + + mp_list = nconc(mp_list,temp_node); + } + } + } + return(mp_list); + +} + +static bool +EnoughMemoryForHashjoin(Rel *hashrel) +{ + int ntuples; + int tupsize; + int pages; + + ntuples = hashrel->size; + if (ntuples == 0) ntuples = 1000; + tupsize = hashrel->width + sizeof(HeapTupleData); + pages = page_size(ntuples, tupsize); + /* + * if amount of buffer space below hashjoin threshold, + * return false + */ + if (ceil(sqrt((double)pages)) > NBuffers) + return false; + return true; +} + +/* + * hash-inner-and-outer-- XXX HASH + * Create hashjoin join paths by explicitly hashing both the outer and + * inner join relations on each available hash op. + * + * 'joinrel' is the join relation + * 'outerrel' is the outer join relation + * 'innerrel' is the inner join relation + * 'hashinfo-list' is a list of nodes containing info on(hashjoinable) + * clauses for joining the relations + * + * Returns a list of hashjoin paths. + */ +static List * +hash_inner_and_outer(Rel *joinrel, + Rel *outerrel, + Rel *innerrel, + List *hashinfo_list) +{ + HInfo *xhashinfo = (HInfo*)NULL; + List *hjoin_list = NIL; + HashPath *temp_node = (HashPath*)NULL; + List *i = NIL; + List *outerkeys = NIL; + List *innerkeys = NIL; + List *hash_pathkeys = NIL; + + foreach (i, hashinfo_list) { + xhashinfo = (HInfo*)lfirst(i); + outerkeys = + extract_path_keys(((JoinMethod*)xhashinfo)->jmkeys, + outerrel->targetlist, + OUTER); + innerkeys = + extract_path_keys(((JoinMethod*)xhashinfo)->jmkeys, + innerrel->targetlist, + INNER); + hash_pathkeys = + new_join_pathkeys(outerkeys, + joinrel->targetlist, + ((JoinMethod*)xhashinfo)->clauses); + + if (EnoughMemoryForHashjoin(innerrel)) { + temp_node = create_hashjoin_path(joinrel, + outerrel->size, + innerrel->size, + outerrel->width, + innerrel->width, + (Path*)outerrel->cheapestpath, + (Path*)innerrel->cheapestpath, + hash_pathkeys, + xhashinfo->hashop, + ((JoinMethod*)xhashinfo)->clauses, + outerkeys, + innerkeys); + hjoin_list = lappend(hjoin_list, temp_node); + } + } + return(hjoin_list); +} + diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c new file mode 100644 index 00000000000..b26e3364f93 --- /dev/null +++ b/src/backend/optimizer/path/joinrels.c @@ -0,0 +1,528 @@ +/*------------------------------------------------------------------------- + * + * joinrels.c-- + * Routines to determine which relations should be joined + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/joinrels.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/cost.h" +#include "optimizer/paths.h" +#include "optimizer/tlist.h" +#include "optimizer/joininfo.h" +#include "optimizer/pathnode.h" + + +static List *find_clause_joins(Query *root, Rel *outer_rel, List *joininfo_list); +static List *find_clauseless_joins(Rel *outer_rel, List *inner_rels); +static Rel *init_join_rel(Rel *outer_rel, Rel *inner_rel, JInfo *joininfo); +static List *new_join_tlist(List *tlist, List *other_relids, + int first_resdomno); +static List *new_joininfo_list(List *joininfo_list, List *join_relids); +static void add_superrels(Rel *rel, Rel *super_rel); +static bool nonoverlap_rels(Rel *rel1, Rel *rel2); +static bool nonoverlap_sets(List *s1, List *s2); +static void set_joinrel_size(Rel *joinrel, Rel *outer_rel, Rel *inner_rel, + JInfo *jinfo); + +/* + * find-join-rels-- + * Find all possible joins for each of the outer join relations in + * 'outer-rels'. A rel node is created for each possible join relation, + * and the resulting list of nodes is returned. If at all possible, only + * those relations for which join clauses exist are considered. If none + * of these exist for a given relation, all remaining possibilities are + * considered. + * + * 'outer-rels' is the list of rel nodes + * + * Returns a list of rel nodes corresponding to the new join relations. + */ +List * +find_join_rels(Query *root, List *outer_rels) +{ + List *joins = NIL; + List *join_list = NIL; + List *r = NIL; + + foreach(r, outer_rels) { + Rel *outer_rel = (Rel *)lfirst(r); + + if(!(joins = find_clause_joins(root, outer_rel,outer_rel->joininfo))) + if (BushyPlanFlag) + joins = find_clauseless_joins(outer_rel,outer_rels); + else + joins = find_clauseless_joins(outer_rel,root->base_relation_list_); + + join_list = nconc(join_list, joins); + } + + return(join_list); +} + +/* + * find-clause-joins-- + * Determines whether joins can be performed between an outer relation + * 'outer-rel' and those relations within 'outer-rel's joininfo nodes + * (i.e., relations that participate in join clauses that 'outer-rel' + * participates in). This is possible if all but one of the relations + * contained within the join clauses of the joininfo node are already + * contained within 'outer-rel'. + * + * 'outer-rel' is the relation entry for the outer relation + * 'joininfo-list' is a list of join clauses which 'outer-rel' + * participates in + * + * Returns a list of new join relations. + */ +static List * +find_clause_joins(Query *root, Rel *outer_rel, List *joininfo_list) +{ + List *join_list = NIL; + List *i = NIL; + + foreach (i, joininfo_list) { + JInfo *joininfo = (JInfo*)lfirst(i); + Rel *rel; + + if(!joininfo->inactive) { + List *other_rels = joininfo->otherrels; + + if(other_rels != NIL) { + if(length(other_rels) == 1) { + rel = init_join_rel(outer_rel, + get_base_rel(root, lfirsti(other_rels)), + joininfo); + } else if (BushyPlanFlag) { + rel = init_join_rel(outer_rel, + get_join_rel(root, other_rels), + joininfo); + } else { + rel = NULL; + } + + if (rel != NULL) + join_list = lappend(join_list, rel); + } + } + } + + return(join_list); +} + +/* + * find-clauseless-joins-- + * Given an outer relation 'outer-rel' and a list of inner relations + * 'inner-rels', create a join relation between 'outer-rel' and each + * member of 'inner-rels' that isn't already included in 'outer-rel'. + * + * Returns a list of new join relations. + */ +static List * +find_clauseless_joins(Rel *outer_rel, List *inner_rels) +{ + Rel *inner_rel; + List *t_list = NIL; + List *temp_node = NIL; + List *i = NIL; + + foreach (i, inner_rels) { + inner_rel = (Rel *)lfirst(i); + if(nonoverlap_rels(inner_rel, outer_rel)) { + temp_node = lcons(init_join_rel(outer_rel, + inner_rel, + (JInfo*)NULL), + NIL); + t_list = nconc(t_list,temp_node); + } + } + + return(t_list); +} + +/* + * init-join-rel-- + * Creates and initializes a new join relation. + * + * 'outer-rel' and 'inner-rel' are relation nodes for the relations to be + * joined + * 'joininfo' is the joininfo node(join clause) containing both + * 'outer-rel' and 'inner-rel', if any exists + * + * Returns the new join relation node. + */ +static Rel * +init_join_rel(Rel *outer_rel, Rel *inner_rel, JInfo *joininfo) +{ + Rel *joinrel = makeNode(Rel); + List *joinrel_joininfo_list = NIL; + List *new_outer_tlist; + List *new_inner_tlist; + + /* + * Create a new tlist by removing irrelevant elements from both + * tlists of the outer and inner join relations and then merging + * the results together. + */ + new_outer_tlist = + new_join_tlist(outer_rel->targetlist, /* XXX 1-based attnos */ + inner_rel->relids, 1); + new_inner_tlist = + new_join_tlist(inner_rel->targetlist, /* XXX 1-based attnos */ + outer_rel->relids, + length(new_outer_tlist) + 1); + + joinrel->relids = NIL; + joinrel->indexed = false; + joinrel->pages = 0; + joinrel->tuples = 0; + joinrel->width = 0; +/* joinrel->targetlist = NIL;*/ + joinrel->pathlist = NIL; + joinrel->unorderedpath = (Path *)NULL; + joinrel->cheapestpath = (Path *)NULL; + joinrel->pruneable = true; + joinrel->classlist = NULL; + joinrel->relam = InvalidOid; + joinrel->ordering = NULL; + joinrel->clauseinfo = NIL; + joinrel->joininfo = NULL; + joinrel->innerjoin = NIL; + joinrel->superrels = NIL; + + joinrel->relids = lcons(outer_rel->relids, /* ??? aren't they lists? -ay */ + lcons(inner_rel->relids, NIL)); + + new_outer_tlist = nconc(new_outer_tlist,new_inner_tlist); + joinrel->targetlist = new_outer_tlist; + + if (joininfo) { + joinrel->clauseinfo = joininfo->jinfoclauseinfo; + if (BushyPlanFlag) + joininfo->inactive = true; + } + + joinrel_joininfo_list = + new_joininfo_list(append(outer_rel->joininfo, inner_rel->joininfo), + intAppend(outer_rel->relids, inner_rel->relids)); + + joinrel->joininfo = joinrel_joininfo_list; + + set_joinrel_size(joinrel, outer_rel, inner_rel, joininfo); + + return(joinrel); +} + +/* + * new-join-tlist-- + * Builds a join relations's target list by keeping those elements that + * will be in the final target list and any other elements that are still + * needed for future joins. For a target list entry to still be needed + * for future joins, its 'joinlist' field must not be empty after removal + * of all relids in 'other-relids'. + * + * 'tlist' is the target list of one of the join relations + * 'other-relids' is a list of relids contained within the other + * join relation + * 'first-resdomno' is the resdom number to use for the first created + * target list entry + * + * Returns the new target list. + */ +static List * +new_join_tlist(List *tlist, + List *other_relids, + int first_resdomno) +{ + int resdomno = first_resdomno - 1; + TargetEntry *xtl = NULL; + List *temp_node = NIL; + List *t_list = NIL; + List *i = NIL; + List *join_list = NIL; + bool in_final_tlist =false; + + + foreach(i,tlist) { + xtl= lfirst(i); + in_final_tlist = (join_list==NIL); + if( in_final_tlist) { + resdomno += 1; + temp_node = + lcons(create_tl_element(get_expr(xtl), + resdomno), + NIL); + t_list = nconc(t_list,temp_node); + } + } + + return(t_list); +} + +/* + * new-joininfo-list-- + * Builds a join relation's joininfo list by checking for join clauses + * which still need to used in future joins involving this relation. A + * join clause is still needed if there are still relations in the clause + * not contained in the list of relations comprising this join relation. + * New joininfo nodes are only created and added to + * 'current-joininfo-list' if a node for a particular join hasn't already + * been created. + * + * 'current-joininfo-list' contains a list of those joininfo nodes that + * have already been built + * 'joininfo-list' is the list of join clauses involving this relation + * 'join-relids' is a list of relids corresponding to the relations + * currently being joined + * + * Returns a list of joininfo nodes, new and old. + */ +static List * +new_joininfo_list(List *joininfo_list, List *join_relids) +{ + List *current_joininfo_list = NIL; + List *new_otherrels = NIL; + JInfo *other_joininfo = (JInfo*)NULL; + List *xjoininfo = NIL; + + foreach (xjoininfo, joininfo_list) { + JInfo *joininfo = (JInfo*)lfirst(xjoininfo); + + new_otherrels = joininfo->otherrels; + if (nonoverlap_sets(new_otherrels,join_relids)) { + other_joininfo = joininfo_member(new_otherrels, + current_joininfo_list); + if(other_joininfo) { + other_joininfo->jinfoclauseinfo = + (List*)LispUnion(joininfo->jinfoclauseinfo, + other_joininfo->jinfoclauseinfo); + }else { + other_joininfo = makeNode(JInfo); + + other_joininfo->otherrels = + joininfo->otherrels; + other_joininfo->jinfoclauseinfo = + joininfo->jinfoclauseinfo; + other_joininfo->mergesortable = + joininfo->mergesortable; + other_joininfo->hashjoinable = + joininfo->hashjoinable; + other_joininfo->inactive = false; + + current_joininfo_list = lcons(other_joininfo, + current_joininfo_list); + } + } + } + + return(current_joininfo_list); +} + +/* + * add-new-joininfos-- + * For each new join relation, create new joininfos that + * use the join relation as inner relation, and add + * the new joininfos to those rel nodes that still + * have joins with the join relation. + * + * 'joinrels' is a list of join relations. + * + * Modifies the joininfo field of appropriate rel nodes. + */ +void +add_new_joininfos(Query *root, List *joinrels, List *outerrels) +{ + List *xjoinrel = NIL; + List *xrelid = NIL; + List *xrel = NIL; + List *xjoininfo = NIL; + + foreach(xjoinrel, joinrels) { + Rel *joinrel = (Rel *)lfirst(xjoinrel); + foreach(xrelid, joinrel->relids) { + Relid relid = (Relid)lfirst(xrelid); + Rel *rel = get_join_rel(root, relid); + add_superrels(rel,joinrel); + } + } + foreach(xjoinrel, joinrels) { + Rel *joinrel = (Rel *)lfirst(xjoinrel); + + foreach(xjoininfo, joinrel->joininfo) { + JInfo *joininfo = (JInfo*)lfirst(xjoininfo); + List *other_rels = joininfo->otherrels; + List *clause_info = joininfo->jinfoclauseinfo; + bool mergesortable = joininfo->mergesortable; + bool hashjoinable = joininfo->hashjoinable; + + foreach(xrelid, other_rels) { + Relid relid = (Relid)lfirst(xrelid); + Rel *rel = get_join_rel(root, relid); + List *super_rels = rel->superrels; + List *xsuper_rel = NIL; + JInfo *new_joininfo = makeNode(JInfo); + + new_joininfo->otherrels = joinrel->relids; + new_joininfo->jinfoclauseinfo = clause_info; + new_joininfo->mergesortable = mergesortable; + new_joininfo->hashjoinable = hashjoinable; + new_joininfo->inactive = false; + rel->joininfo = + lappend(rel->joininfo, new_joininfo); + + foreach(xsuper_rel, super_rels) { + Rel *super_rel = (Rel *)lfirst(xsuper_rel); + + if( nonoverlap_rels(super_rel,joinrel) ) { + List *new_relids = super_rel->relids; + JInfo *other_joininfo = + joininfo_member(new_relids, + joinrel->joininfo); + + if (other_joininfo) { + other_joininfo->jinfoclauseinfo = + (List*)LispUnion(clause_info, + other_joininfo->jinfoclauseinfo); + } else { + JInfo *new_joininfo = makeNode(JInfo); + + new_joininfo->otherrels = new_relids; + new_joininfo->jinfoclauseinfo = clause_info; + new_joininfo->mergesortable = mergesortable; + new_joininfo->hashjoinable = hashjoinable; + new_joininfo->inactive = false; + joinrel->joininfo = + lappend(joinrel->joininfo, + new_joininfo); + } + } + } + } + } + } + foreach(xrel, outerrels) { + Rel *rel = (Rel *)lfirst(xrel); + rel->superrels = NIL; + } +} + +/* + * final-join-rels-- + * Find the join relation that includes all the original + * relations, i.e. the final join result. + * + * 'join-rel-list' is a list of join relations. + * + * Returns the list of final join relations. + */ +List * +final_join_rels(List *join_rel_list) +{ + List *xrel = NIL; + List *temp = NIL; + List *t_list = NIL; + + /* + * find the relations that has no further joins, + * i.e., its joininfos all have otherrels nil. + */ + foreach(xrel,join_rel_list) { + Rel *rel = (Rel *)lfirst(xrel); + List *xjoininfo = NIL; + bool final = true; + + foreach (xjoininfo, rel->joininfo) { + JInfo *joininfo = (JInfo*)lfirst(xjoininfo); + + if (joininfo->otherrels != NIL) { + final = false; + break; + } + } + if (final) { + temp = lcons(rel, NIL); + t_list = nconc(t_list, temp); + } + } + + return(t_list); +} + +/* + * add_superrels-- + * add rel to the temporary property list superrels. + * + * 'rel' a rel node + * 'super-rel' rel node of a join relation that includes rel + * + * Modifies the superrels field of rel + */ +static void +add_superrels(Rel *rel, Rel *super_rel) +{ + rel->superrels = lappend(rel->superrels, super_rel); +} + +/* + * nonoverlap-rels-- + * test if two join relations overlap, i.e., includes the same + * relation. + * + * 'rel1' and 'rel2' are two join relations + * + * Returns non-nil if rel1 and rel2 do not overlap. + */ +static bool +nonoverlap_rels(Rel *rel1, Rel *rel2) +{ + return(nonoverlap_sets(rel1->relids, rel2->relids)); +} + +static bool +nonoverlap_sets(List *s1, List *s2) +{ + List *x = NIL; + + foreach(x,s1) { + int e = lfirsti(x); + if(intMember(e,s2)) + return(false); + } + return(true); +} + +static void +set_joinrel_size(Rel *joinrel, Rel *outer_rel, Rel *inner_rel, JInfo *jinfo) +{ + int ntuples; + float selec; + + /* voodoo magic. but better than a size of 0. I have no idea why + we didn't set the size before. -ay 2/95 */ + if (jinfo==NULL) { + /* worst case: the cartesian product */ + ntuples = outer_rel->tuples * inner_rel->tuples; + } else { + selec = product_selec(jinfo->jinfoclauseinfo); +/* ntuples = Min(outer_rel->tuples,inner_rel->tuples) * selec; */ + ntuples = outer_rel->tuples * inner_rel->tuples * selec; + } + + /* I bet sizes less than 1 will screw up optimization so + make the best case 1 instead of 0 - jolly*/ + if (ntuples < 1) + ntuples = 1; + + joinrel->tuples = ntuples; +} diff --git a/src/backend/optimizer/path/joinutils.c b/src/backend/optimizer/path/joinutils.c new file mode 100644 index 00000000000..1be5a57f2ec --- /dev/null +++ b/src/backend/optimizer/path/joinutils.c @@ -0,0 +1,432 @@ +/*------------------------------------------------------------------------- + * + * joinutils.c-- + * Utilities for matching and building join and path keys + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/joinutils.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/plannodes.h" + +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/var.h" +#include "optimizer/keys.h" +#include "optimizer/tlist.h" +#include "optimizer/joininfo.h" +#include "optimizer/ordering.h" + + +static int match_pathkey_joinkeys(List *pathkey, List *joinkeys, + int which_subkey); +static bool every_func(List *joinkeys, List *pathkey, + int which_subkey); +static List *new_join_pathkey(List *subkeys, + List *considered_subkeys, List *join_rel_tlist, + List *joinclauses); +static List *new_matching_subkeys(Var *subkey, List *considered_subkeys, + List *join_rel_tlist, List *joinclauses); + +/**************************************************************************** + * KEY COMPARISONS + ****************************************************************************/ + +/* + * match-pathkeys-joinkeys-- + * Attempts to match the keys of a path against the keys of join clauses. + * This is done by looking for a matching join key in 'joinkeys' for + * every path key in the list 'pathkeys'. If there is a matching join key + * (not necessarily unique) for every path key, then the list of + * corresponding join keys and join clauses are returned in the order in + * which the keys matched the path keys. + * + * 'pathkeys' is a list of path keys: + * ( ( (var) (var) ... ) ( (var) ... ) ) + * 'joinkeys' is a list of join keys: + * ( (outer inner) (outer inner) ... ) + * 'joinclauses' is a list of clauses corresponding to the join keys in + * 'joinkeys' + * 'which-subkey' is a flag that selects the desired subkey of a join key + * in 'joinkeys' + * + * Returns the join keys and corresponding join clauses in a list if all + * of the path keys were matched: + * ( + * ( (outerkey0 innerkey0) ... (outerkeyN innerkeyN) ) + * ( clause0 ... clauseN ) + * ) + * and nil otherwise. + * + * Returns a list of matched join keys and a list of matched join clauses + * in matchedJoinClausesPtr. - ay 11/94 + */ +List * +match_pathkeys_joinkeys(List *pathkeys, + List *joinkeys, + List *joinclauses, + int which_subkey, + List **matchedJoinClausesPtr) +{ + List *matched_joinkeys = NIL; + List *matched_joinclauses = NIL; + List *pathkey = NIL; + List *i = NIL; + int matched_joinkey_index = -1; + + foreach(i, pathkeys) { + pathkey = lfirst(i); + matched_joinkey_index = + match_pathkey_joinkeys(pathkey, joinkeys, which_subkey); + + if (matched_joinkey_index != -1 ) { + List *xjoinkey = nth(matched_joinkey_index,joinkeys); + List *joinclause = nth(matched_joinkey_index,joinclauses); + + /* XXX was "push" function */ + matched_joinkeys = lappend(matched_joinkeys,xjoinkey); + matched_joinkeys = nreverse(matched_joinkeys); + + matched_joinclauses = lappend(matched_joinclauses,joinclause); + matched_joinclauses = nreverse(matched_joinclauses); + joinkeys = LispRemove(xjoinkey,joinkeys); + } else { + return(NIL); + } + + } + if(matched_joinkeys==NULL || + length(matched_joinkeys) != length(pathkeys)) { + return NIL; + } + + *matchedJoinClausesPtr = nreverse(matched_joinclauses); + return (nreverse(matched_joinkeys)); +} + +/* + * match-pathkey-joinkeys-- + * Returns the 0-based index into 'joinkeys' of the first joinkey whose + * outer or inner subkey matches any subkey of 'pathkey'. + */ +static int +match_pathkey_joinkeys(List *pathkey, + List *joinkeys, + int which_subkey) +{ + Var *path_subkey; + int pos; + List *i = NIL; + List *x = NIL; + JoinKey *jk; + + foreach(i, pathkey) { + path_subkey = (Var *)lfirst(i); + pos = 0; + foreach(x, joinkeys) { + jk = (JoinKey*)lfirst(x); + if(var_equal(path_subkey, + extract_subkey(jk, which_subkey))) + return(pos); + pos++; + } + } + return(-1); /* no index found */ +} + +/* + * match-paths-joinkeys-- + * Attempts to find a path in 'paths' whose keys match a set of join + * keys 'joinkeys'. To match, + * 1. the path node ordering must equal 'ordering'. + * 2. each subkey of a given path must match(i.e., be(var_equal) to) the + * appropriate subkey of the corresponding join key in 'joinkeys', + * i.e., the Nth path key must match its subkeys against the subkey of + * the Nth join key in 'joinkeys'. + * + * 'joinkeys' is the list of key pairs to which the path keys must be + * matched + * 'ordering' is the ordering of the(outer) path to which 'joinkeys' + * must correspond + * 'paths' is a list of(inner) paths which are to be matched against + * each join key in 'joinkeys' + * 'which-subkey' is a flag that selects the desired subkey of a join key + * in 'joinkeys' + * + * Returns the matching path node if one exists, nil otherwise. + */ +static bool +every_func(List *joinkeys, List *pathkey, int which_subkey) +{ + JoinKey *xjoinkey; + Var *temp; + Var *tempkey = NULL; + bool found = false; + List *i = NIL; + List *j = NIL; + + foreach(i,joinkeys) { + xjoinkey = (JoinKey*)lfirst(i); + found = false; + foreach(j,pathkey) { + temp = (Var*)lfirst((List*)lfirst(j)); + if(temp == NULL) continue; + tempkey = extract_subkey(xjoinkey,which_subkey); + if(var_equal(tempkey, temp)) { + found = true; + break; + } + } + if(found == false) + return(false); + } + return(found); +} + + +/* + * match_paths_joinkeys - + * find the cheapest path that matches the join keys + */ +Path * +match_paths_joinkeys(List *joinkeys, + PathOrder *ordering, + List *paths, + int which_subkey) +{ + Path *matched_path = NULL ; + bool key_match = false; + List *i = NIL; + + foreach(i,paths) { + Path *path = (Path*)lfirst(i); + + key_match = every_func(joinkeys, path->keys, which_subkey); + + if (equal_path_path_ordering(ordering, + &path->p_ordering) && + length(joinkeys) == length(path->keys) && + key_match) { + + if (matched_path) { + if (path->path_cost < matched_path->path_cost) + matched_path = path; + } else { + matched_path = path; + } + } + } + return matched_path; +} + + + +/* + * extract-path-keys-- + * Builds a subkey list for a path by pulling one of the subkeys from + * a list of join keys 'joinkeys' and then finding the var node in the + * target list 'tlist' that corresponds to that subkey. + * + * 'joinkeys' is a list of join key pairs + * 'tlist' is a relation target list + * 'which-subkey' is a flag that selects the desired subkey of a join key + * in 'joinkeys' + * + * Returns a list of pathkeys: ((tlvar1)(tlvar2)...(tlvarN)). + * [I've no idea why they have to be list of lists. Should be fixed. -ay 12/94] + */ +List * +extract_path_keys(List *joinkeys, + List *tlist, + int which_subkey) +{ + List *pathkeys = NIL; + List *jk; + + foreach(jk, joinkeys) { + JoinKey *jkey = (JoinKey*)lfirst(jk); + Var *var, *key; + List *p; + + /* + * find the right Var in the target list for this key + */ + var = (Var*)extract_subkey(jkey, which_subkey); + key = (Var*)matching_tlvar(var, tlist); + + /* + * include it in the pathkeys list if we haven't already done so + */ + foreach(p, pathkeys) { + Var *pkey = lfirst((List*)lfirst(p)); /* XXX fix me */ + if (key == pkey) + break; + } + if (p!=NIL) + continue; /* key already in pathkeys */ + + pathkeys = + lappend(pathkeys, lcons(key,NIL)); + } + return(pathkeys); +} + + +/**************************************************************************** + * NEW PATHKEY FORMATION + ****************************************************************************/ + +/* + * new-join-pathkeys-- + * Find the path keys for a join relation by finding all vars in the list + * of join clauses 'joinclauses' such that: + * (1) the var corresponding to the outer join relation is a + * key on the outer path + * (2) the var appears in the target list of the join relation + * In other words, add to each outer path key the inner path keys that + * are required for qualification. + * + * 'outer-pathkeys' is the list of the outer path's path keys + * 'join-rel-tlist' is the target list of the join relation + * 'joinclauses' is the list of restricting join clauses + * + * Returns the list of new path keys. + * + */ +List * +new_join_pathkeys(List *outer_pathkeys, + List *join_rel_tlist, + List *joinclauses) +{ + List *outer_pathkey = NIL; + List *t_list = NIL; + List *x; + List *i = NIL; + + foreach(i, outer_pathkeys) { + outer_pathkey = lfirst(i); + x = new_join_pathkey(outer_pathkey, NIL, + join_rel_tlist,joinclauses); + if (x!=NIL) { + t_list = lappend(t_list, x); + } + } + return(t_list); +} + +/* + * new-join-pathkey-- + * Finds new vars that become subkeys due to qualification clauses that + * contain any previously considered subkeys. These new subkeys plus the + * subkeys from 'subkeys' form a new pathkey for the join relation. + * + * Note that each returned subkey is the var node found in + * 'join-rel-tlist' rather than the joinclause var node. + * + * 'subkeys' is a list of subkeys for which matching subkeys are to be + * found + * 'considered-subkeys' is the current list of all subkeys corresponding + * to a given pathkey + * + * Returns a new pathkey(list of subkeys). + * + */ +static List * +new_join_pathkey(List *subkeys, + List *considered_subkeys, + List *join_rel_tlist, + List *joinclauses) +{ + List *t_list = NIL; + Var *subkey; + List *i = NIL; + List *matched_subkeys = NIL; + Expr *tlist_key = (Expr*)NULL; + List *newly_considered_subkeys = NIL; + + foreach (i, subkeys) { + subkey = (Var *)lfirst(i); + if(subkey == NULL) + break; /* XXX something is wrong */ + matched_subkeys = + new_matching_subkeys(subkey,considered_subkeys, + join_rel_tlist,joinclauses); + tlist_key = matching_tlvar(subkey,join_rel_tlist); + newly_considered_subkeys = NIL; + + if (tlist_key) { + if(!member(tlist_key, matched_subkeys)) + newly_considered_subkeys = lcons(tlist_key, + matched_subkeys); + } + else { + newly_considered_subkeys = matched_subkeys; + } + + considered_subkeys = + append(considered_subkeys, newly_considered_subkeys); + + t_list = nconc(t_list,newly_considered_subkeys); + } + return(t_list); +} + +/* + * new-matching-subkeys-- + * Returns a list of new subkeys: + * (1) which are not listed in 'considered-subkeys' + * (2) for which the "other" variable in some clause in 'joinclauses' is + * 'subkey' + * (3) which are mentioned in 'join-rel-tlist' + * + * Note that each returned subkey is the var node found in + * 'join-rel-tlist' rather than the joinclause var node. + * + * 'subkey' is the var node for which we are trying to find matching + * clauses + * + * Returns a list of new subkeys. + * + */ +static List * +new_matching_subkeys(Var *subkey, + List *considered_subkeys, + List *join_rel_tlist, + List *joinclauses) +{ + Expr *joinclause = NULL; + List *t_list = NIL; + List *temp = NIL; + List *i = NIL; + Expr *tlist_other_var = (Expr *)NULL; + + foreach(i,joinclauses) { + joinclause = lfirst(i); + tlist_other_var = + matching_tlvar(other_join_clause_var(subkey,joinclause), + join_rel_tlist); + + if(tlist_other_var && + !(member(tlist_other_var,considered_subkeys))) { + + /* XXX was "push" function */ + considered_subkeys = lappend(considered_subkeys, + tlist_other_var); + + /* considered_subkeys = nreverse(considered_subkeys); + XXX -- I am not sure of this. */ + + temp = lcons(tlist_other_var, NIL); + t_list = nconc(t_list,temp); + } + } + return(t_list); +} diff --git a/src/backend/optimizer/path/mergeutils.c b/src/backend/optimizer/path/mergeutils.c new file mode 100644 index 00000000000..d5f0fdcb65b --- /dev/null +++ b/src/backend/optimizer/path/mergeutils.c @@ -0,0 +1,122 @@ +/*------------------------------------------------------------------------- + * + * mergeutils.c-- + * Utilities for finding applicable merge clauses and pathkeys + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/mergeutils.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/clauses.h" +#include "optimizer/ordering.h" + +/* + * group-clauses-by-order-- + * If a join clause node in 'clauseinfo-list' is mergesortable, store + * it within a mergeinfo node containing other clause nodes with the same + * mergesort ordering. + * + * 'clauseinfo-list' is the list of clauseinfo nodes + * 'inner-relid' is the relid of the inner join relation + * + * Returns the new list of mergeinfo nodes. + * + */ +List * +group_clauses_by_order(List *clauseinfo_list, + int inner_relid) +{ + List *mergeinfo_list = NIL; + List *xclauseinfo = NIL; + + foreach (xclauseinfo, clauseinfo_list) { + CInfo *clauseinfo = (CInfo *)lfirst(xclauseinfo); + MergeOrder *merge_ordering = clauseinfo->mergesortorder; + + if (merge_ordering) { + /* + * Create a new mergeinfo node and add it to + * 'mergeinfo-list' if one does not yet exist for this + * merge ordering. + */ + PathOrder p_ordering; + MInfo *xmergeinfo; + Expr *clause = clauseinfo->clause; + Var *leftop = get_leftop (clause); + Var *rightop = get_rightop (clause); + JoinKey *keys; + + p_ordering.ordtype = MERGE_ORDER; + p_ordering.ord.merge = merge_ordering; + xmergeinfo = + match_order_mergeinfo(&p_ordering, mergeinfo_list); + if (inner_relid == leftop->varno) { + keys = makeNode(JoinKey); + keys->outer = rightop; + keys->inner = leftop; + } else { + keys = makeNode(JoinKey); + keys->outer = leftop; + keys->inner = rightop; + } + + if (xmergeinfo==NULL) { + xmergeinfo = makeNode(MInfo); + + xmergeinfo->m_ordering = merge_ordering; + mergeinfo_list = lcons(xmergeinfo, + mergeinfo_list); + } + + ((JoinMethod *)xmergeinfo)->clauses = + lcons(clause, + ((JoinMethod *)xmergeinfo)->clauses); + ((JoinMethod *)xmergeinfo)->jmkeys = + lcons(keys, + ((JoinMethod *)xmergeinfo)->jmkeys); + } + } + return(mergeinfo_list); +} + + +/* + * match-order-mergeinfo-- + * Searches the list 'mergeinfo-list' for a mergeinfo node whose order + * field equals 'ordering'. + * + * Returns the node if it exists. + * + */ +MInfo * +match_order_mergeinfo(PathOrder *ordering, List *mergeinfo_list) +{ + MergeOrder *xmergeorder; + List *xmergeinfo = NIL; + + foreach(xmergeinfo, mergeinfo_list) { + MInfo *mergeinfo = (MInfo*)lfirst(xmergeinfo); + + xmergeorder = mergeinfo->m_ordering; + + if ((ordering->ordtype==MERGE_ORDER && + equal_merge_merge_ordering(ordering->ord.merge, xmergeorder)) || + (ordering->ordtype==SORTOP_ORDER && + equal_path_merge_ordering(ordering->ord.sortop, xmergeorder))) { + + return (mergeinfo); + } + } + return((MInfo*) NIL); +} diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c new file mode 100644 index 00000000000..e040675e6ec --- /dev/null +++ b/src/backend/optimizer/path/orindxpath.c @@ -0,0 +1,271 @@ +/*------------------------------------------------------------------------- + * + * orindxpath.c-- + * Routines to find index paths that match a set of 'or' clauses + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/orindxpath.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" + +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/paths.h" +#include "optimizer/cost.h" +#include "optimizer/plancat.h" +#include "optimizer/xfunc.h" + +#include "parser/parsetree.h" + + +static void best_or_subclause_indices(Query *root, Rel *rel, List *subclauses, + List *indices, List *examined_indexids, Cost subcost, List *selectivities, + List **indexids, Cost *cost, List **selecs); +static void best_or_subclause_index(Query *root, Rel *rel, Expr *subclause, + List *indices, int *indexid, Cost *cost, Cost *selec); + + +/* + * create-or-index-paths-- + * Creates index paths for indices that match 'or' clauses. + * + * 'rel' is the relation entry for which the paths are to be defined on + * 'clauses' is the list of available restriction clause nodes + * + * Returns a list of these index path nodes. + * + */ +List * +create_or_index_paths(Query *root, + Rel *rel, List *clauses) +{ + List *t_list = NIL; + + if (clauses != NIL) { + CInfo *clausenode = (CInfo *) (lfirst (clauses)); + + /* Check to see if this clause is an 'or' clause, and, if so, + * whether or not each of the subclauses within the 'or' clause has + * been matched by an index (the 'Index field was set in + * (match_or) if no index matches a given subclause, one of the + * lists of index nodes returned by (get_index) will be 'nil'). + */ + if (valid_or_clause(clausenode) && + clausenode->indexids) { + List *temp = NIL; + List *index_list = NIL; + bool index_flag = true; + + index_list = clausenode->indexids; + foreach(temp,index_list) { + if (!temp) + index_flag = false; + } + if (index_flag) { /* used to be a lisp every function */ + IndexPath *pathnode = makeNode(IndexPath); + List *indexids; + Cost cost; + List *selecs; + + best_or_subclause_indices(root, + rel, + clausenode->clause->args, + clausenode->indexids, + NIL, + (Cost)0, + NIL, + &indexids, + &cost, + &selecs); + + pathnode->path.pathtype = T_IndexScan; + pathnode->path.parent = rel; + pathnode->indexqual = + lcons(clausenode,NIL); + pathnode->indexid = indexids; + pathnode->path.path_cost = cost; + + /* copy clauseinfo list into path for expensive + function processing -- JMH, 7/7/92 */ + pathnode->path.locclauseinfo = + set_difference(clauses, + copyObject((Node*) + rel->clauseinfo)); + +#if 0 /* fix xfunc */ + /* add in cost for expensive functions! -- JMH, 7/7/92 */ + if (XfuncMode != XFUNC_OFF) { + ((Path*)pathnode)->path_cost += + xfunc_get_path_cost((Path)pathnode); + } +#endif + clausenode->selectivity = (Cost)floatVal(selecs); + t_list = + lcons(pathnode, + create_or_index_paths(root, rel,lnext(clauses))); + } else { + t_list = create_or_index_paths(root, rel,lnext(clauses)); + } + } + } + + return(t_list); +} + +/* + * best-or-subclause-indices-- + * Determines the best index to be used in conjunction with each subclause + * of an 'or' clause and the cost of scanning a relation using these + * indices. The cost is the sum of the individual index costs. + * + * 'rel' is the node of the relation on which the index is defined + * 'subclauses' are the subclauses of the 'or' clause + * 'indices' are those index nodes that matched subclauses of the 'or' + * clause + * 'examined-indexids' is a list of those index ids to be used with + * subclauses that have already been examined + * 'subcost' is the cost of using the indices in 'examined-indexids' + * 'selectivities' is a list of the selectivities of subclauses that + * have already been examined + * + * Returns a list of the indexids, cost, and selectivities of each + * subclause, e.g., ((i1 i2 i3) cost (s1 s2 s3)), where 'i' is an OID, + * 'cost' is a flonum, and 's' is a flonum. + */ +static void +best_or_subclause_indices(Query *root, + Rel *rel, + List *subclauses, + List *indices, + List *examined_indexids, + Cost subcost, + List *selectivities, + List **indexids, /* return value */ + Cost *cost, /* return value */ + List **selecs) /* return value */ +{ + if (subclauses==NIL) { + *indexids = nreverse(examined_indexids); + *cost = subcost; + *selecs = nreverse(selectivities); + } else { + int best_indexid; + Cost best_cost; + Cost best_selec; + + best_or_subclause_index(root, rel, lfirst(subclauses), lfirst(indices), + &best_indexid, &best_cost, &best_selec); + + best_or_subclause_indices(root, + rel, + lnext(subclauses), + lnext(indices), + lconsi(best_indexid, examined_indexids), + subcost + best_cost, + lcons(makeFloat(best_selec), selectivities), + indexids, + cost, + selecs); + } + return; +} + +/* + * best-or-subclause-index-- + * Determines which is the best index to be used with a subclause of + * an 'or' clause by estimating the cost of using each index and selecting + * the least expensive. + * + * 'rel' is the node of the relation on which the index is defined + * 'subclause' is the subclause + * 'indices' is a list of index nodes that match the subclause + * + * Returns a list (index-id index-subcost index-selectivity) + * (a fixnum, a fixnum, and a flonum respectively). + * + */ +static void +best_or_subclause_index(Query *root, + Rel *rel, + Expr *subclause, + List *indices, + int *retIndexid, /* return value */ + Cost *retCost, /* return value */ + Cost *retSelec) /* return value */ +{ + if (indices != NIL) { + Datum value; + int flag = 0; + Cost subcost; + Rel *index = (Rel *)lfirst (indices); + AttrNumber attno = (get_leftop (subclause))->varattno ; + Oid opno = ((Oper*)subclause->oper)->opno; + bool constant_on_right = non_null((Expr*)get_rightop(subclause)); + float npages, selec; + int subclause_indexid; + Cost subclause_cost; + Cost subclause_selec; + + if(constant_on_right) { + value = ((Const*)get_rightop (subclause))->constvalue; + } else { + value = NameGetDatum(""); + } + if(constant_on_right) { + flag = (_SELEC_IS_CONSTANT_ ||_SELEC_CONSTANT_RIGHT_); + } else { + flag = _SELEC_CONSTANT_RIGHT_; + } + index_selectivity(lfirsti(index->relids), + index->classlist, + lconsi(opno,NIL), + getrelid(lfirsti(rel->relids), + root->rtable), + lconsi(attno,NIL), + lconsi(value,NIL), + lconsi(flag,NIL), + 1, + &npages, + &selec); + + subcost = cost_index((Oid) lfirsti(index->relids), + (int)npages, + (Cost)selec, + rel->pages, + rel->tuples, + index->pages, + index->tuples, + false); + best_or_subclause_index(root, + rel, + subclause, + lnext(indices), + &subclause_indexid, + &subclause_cost, + &subclause_selec); + + if (subclause_indexid==0 || subcost < subclause_cost) { + *retIndexid = lfirsti(index->relids); + *retCost = subcost; + *retSelec = selec; + } else { + *retIndexid = 0; + *retCost = 0.0; + *retSelec = 0.0; + } + } + return; +} diff --git a/src/backend/optimizer/path/predmig.c b/src/backend/optimizer/path/predmig.c new file mode 100644 index 00000000000..2d3b5c5767f --- /dev/null +++ b/src/backend/optimizer/path/predmig.c @@ -0,0 +1,773 @@ +/*------------------------------------------------------------------------- + * + * predmig.c-- + * + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/predmig.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* +** DESCRIPTION +** Main Routines to handle Predicate Migration (i.e. correct optimization +** of queries with expensive functions.) +** +** The reasoning behind some of these algorithms is rather detailed. +** Have a look at Sequoia Tech Report 92/13 for more info. Also +** see Monma and Sidney's paper "Sequencing with Series-Parallel +** Precedence Constraints", in "Mathematics of Operations Research", +** volume 4 (1979), pp. 215-224. +** +** The main thing that this code does that wasn't handled in xfunc.c is +** it considers the possibility that two joins in a stream may not +** be ordered by ascending rank -- in such a scenario, it may be optimal +** to pullup more restrictions than we did via xfunc_try_pullup. +** +** This code in some sense generalizes xfunc_try_pullup; if you +** run postgres -x noprune, you'll turn off xfunc_try_pullup, and this +** code will do everything that xfunc_try_pullup would have, and maybe +** more. However, this results in no pruning, which may slow down the +** optimizer and/or cause the system to run out of memory. +** -- JMH, 11/13/92 +*/ + +#include "nodes/pg_list.h" +#include "nodes/nodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "utils/palloc.h" +#include "utils/elog.h" +#include "planner/xfunc.h" +#include "planner/pathnode.h" +#include "planner/internal.h" +#include "planner/cost.h" +#include "planner/keys.h" +#include "planner/tlist.h" +#include "lib/qsort.h" + +#define is_clause(node) (get_cinfo(node)) /* a stream node represents a + clause (not a join) iff it + has a non-NULL cinfo field */ + +static void xfunc_predmig(JoinPath pathnode, Stream streamroot, + Stream laststream, bool *progressp); +static bool xfunc_series_llel(Stream stream); +static bool xfunc_llel_chains(Stream root, Stream bottom); +static Stream xfunc_complete_stream(Stream stream); +static bool xfunc_prdmig_pullup(Stream origstream, Stream pullme, + JoinPath joinpath); +static void xfunc_form_groups(Stream root, Stream bottom); +static void xfunc_free_stream(Stream root); +static Stream xfunc_add_clauses(Stream current); +static void xfunc_setup_group(Stream node, Stream bottom); +static Stream xfunc_streaminsert(CInfo clauseinfo, Stream current, + int clausetype); +static int xfunc_num_relids(Stream node); +static StreamPtr xfunc_get_downjoin(Stream node); +static StreamPtr xfunc_get_upjoin(Stream node); +static Stream xfunc_stream_qsort(Stream root, Stream bottom); +static int xfunc_stream_compare(void *arg1, void *arg2); +static bool xfunc_check_stream(Stream node); +static bool xfunc_in_stream(Stream node, Stream stream); + +/* ----------------- MAIN FUNCTIONS ------------------------ */ +/* +** xfunc_do_predmig +** wrapper for Predicate Migration. It calls xfunc_predmig until no +** more progress is made. +** return value says if any changes were ever made. +*/ +bool xfunc_do_predmig(Path root) +{ + bool progress, changed = false; + + if (is_join(root)) + do + { + progress = false; + Assert(IsA(root,JoinPath)); + xfunc_predmig((JoinPath)root, (Stream)NULL, (Stream)NULL, + &progress); + if (changed && progress) + elog(DEBUG, "Needed to do a second round of predmig!\n"); + if (progress) changed = true; + } while (progress); + return(changed); +} + + +/* + ** xfunc_predmig + ** The main routine for Predicate Migration. It traverses a join tree, + ** and for each root-to-leaf path in the plan tree it constructs a + ** "Stream", which it passes to xfunc_series_llel for optimization. + ** Destructively modifies the join tree (via predicate pullup). + */ +static void +xfunc_predmig(JoinPath pathnode, /* root of the join tree */ + Stream streamroot, + Stream laststream, /* for recursive calls -- these are + the root of the stream under + construction, and the lowest node + created so far */ + bool *progressp) +{ + Stream newstream; + + /* + ** traverse the join tree dfs-style, constructing a stream as you go. + ** When you hit a scan node, pass the stream off to xfunc_series_llel. + */ + + /* sanity check */ + if ((!streamroot && laststream) || + (streamroot && !laststream)) + elog(WARN, "called xfunc_predmig with bad inputs"); + if (streamroot) Assert(xfunc_check_stream(streamroot)); + + /* add path node to stream */ + newstream = RMakeStream(); + if (!streamroot) + streamroot = newstream; + set_upstream(newstream, (StreamPtr)laststream); + if (laststream) + set_downstream(laststream, (StreamPtr)newstream); + set_downstream(newstream, (StreamPtr)NULL); + set_pathptr(newstream, (pathPtr)pathnode); + set_cinfo(newstream, (CInfo)NULL); + set_clausetype(newstream, XFUNC_UNKNOWN); + + /* base case: we're at a leaf, call xfunc_series_llel */ + if (!is_join(pathnode)) + { + /* form a fleshed-out copy of the stream */ + Stream fullstream = xfunc_complete_stream(streamroot); + + /* sort it via series-llel */ + if (xfunc_series_llel(fullstream)) + *progressp = true; + + /* free up the copy */ + xfunc_free_stream(fullstream); + } + else + { + /* visit left child */ + xfunc_predmig((JoinPath)get_outerjoinpath(pathnode), + streamroot, newstream, progressp); + + /* visit right child */ + xfunc_predmig((JoinPath)get_innerjoinpath(pathnode), + streamroot, newstream, progressp); + } + + /* remove this node */ + if (get_upstream(newstream)) + set_downstream((Stream)get_upstream(newstream), (StreamPtr)NULL); + pfree(newstream); +} + +/* + ** xfunc_series_llel + ** A flavor of Monma and Sidney's Series-Parallel algorithm. + ** Traverse stream downwards. When you find a node with restrictions on it, + ** call xfunc_llel_chains on the substream from root to that node. + */ +static bool xfunc_series_llel(Stream stream) +{ + Stream temp, next; + bool progress = false; + + for (temp = stream; temp != (Stream)NULL; temp = next) + { + next = (Stream)xfunc_get_downjoin(temp); + /* + ** if there are restrictions/secondary join clauses above this + ** node, call xfunc_llel_chains + */ + if (get_upstream(temp) && is_clause((Stream)get_upstream(temp))) + if (xfunc_llel_chains(stream, temp)) + progress = true; + } + return(progress); +} + +/* + ** xfunc_llel_chains + ** A flavor of Monma and Sidney's Parallel Chains algorithm. + ** Given a stream which has been well-ordered except for its lowermost + ** restrictions/2-ary joins, pull up the restrictions/2-arys as appropriate. + ** What that means here is to form groups in the chain above the lowest + ** join node above bottom inclusive, and then take all the restrictions + ** following bottom, and try to pull them up as far as possible. + */ +static bool xfunc_llel_chains(Stream root, Stream bottom) +{ + bool progress = false; + Stream origstream; + Stream tmpstream, pathstream; + Stream rootcopy = root; + + Assert(xfunc_check_stream(root)); + + /* xfunc_prdmig_pullup will need an unmodified copy of the stream */ + origstream = (Stream)copyObject((Node)root); + + /* form groups among ill-ordered nodes */ + xfunc_form_groups(root, bottom); + + /* sort chain by rank */ + Assert(xfunc_in_stream(bottom, root)); + rootcopy = xfunc_stream_qsort(root, bottom); + + /* + ** traverse sorted stream -- if any restriction has moved above a join, + ** we must pull it up in the plan. That is, make plan tree + ** reflect order of sorted stream. + */ + for (tmpstream = rootcopy, + pathstream = (Stream)xfunc_get_downjoin(rootcopy); + tmpstream != (Stream)NULL && pathstream != (Stream)NULL; + tmpstream = (Stream)get_downstream(tmpstream)) + { + if (is_clause(tmpstream) + && get_pathptr(pathstream) != get_pathptr(tmpstream)) + { + /* + ** If restriction moved above a Join after sort, we pull it + ** up in the join plan. + ** If restriction moved down, we ignore it. + ** This is because Joey's Sequoia paper proves that + ** restrictions should never move down. If this + ** one were moved down, it would violate "semantic correctness", + ** i.e. it would be lower than the attributes it references. + */ + Assert(xfunc_num_relids(pathstream)>xfunc_num_relids(tmpstream)); + progress = + xfunc_prdmig_pullup(origstream, tmpstream, + (JoinPath)get_pathptr(pathstream)); + } + if (get_downstream(tmpstream)) + pathstream = + (Stream)xfunc_get_downjoin((Stream)get_downstream(tmpstream)); + } + + /* free up origstream */ + xfunc_free_stream(origstream); + return(progress); +} + +/* + ** xfunc_complete_stream -- + ** Given a stream composed of join nodes only, make a copy containing the + ** join nodes along with the associated restriction nodes. + */ +static Stream xfunc_complete_stream(Stream stream) +{ + Stream tmpstream, copystream, curstream = (Stream)NULL; + + copystream = (Stream)copyObject((Node)stream); + Assert(xfunc_check_stream(copystream)); + + curstream = copystream; + Assert(!is_clause(curstream)); + + /* curstream = (Stream)xfunc_get_downjoin(curstream); */ + + while(curstream != (Stream)NULL) + { + xfunc_add_clauses(curstream); + curstream = (Stream)xfunc_get_downjoin(curstream); + } + + /* find top of stream and return it */ + for (tmpstream = copystream; get_upstream(tmpstream) != (StreamPtr)NULL; + tmpstream = (Stream)get_upstream(tmpstream)) + /* no body in for loop */; + + return(tmpstream); +} + +/* + ** xfunc_prdmig_pullup + ** pullup a clause in a path above joinpath. Since the JoinPath tree + ** doesn't have upward pointers, it's difficult to deal with. Thus we + ** require the original stream, which maintains pointers to all the path + ** nodes. We use the original stream to find out what joins are + ** above the clause. + */ +static bool +xfunc_prdmig_pullup(Stream origstream, Stream pullme, JoinPath joinpath) +{ + CInfo clauseinfo = get_cinfo(pullme); + bool progress = false; + Stream upjoin, orignode, temp; + int whichchild; + + /* find node in origstream that contains clause */ + for (orignode = origstream; + orignode != (Stream) NULL + && get_cinfo(orignode) != clauseinfo; + orignode = (Stream)get_downstream(orignode)) + /* empty body in for loop */ ; + if (!orignode) + elog(WARN, "Didn't find matching node in original stream"); + + + /* pull up this node as far as it should go */ + for (upjoin = (Stream)xfunc_get_upjoin(orignode); + upjoin != (Stream)NULL + && (JoinPath)get_pathptr((Stream)xfunc_get_downjoin(upjoin)) + != joinpath; + upjoin = (Stream)xfunc_get_upjoin(upjoin)) + { +#ifdef DEBUG + elog(DEBUG, "pulling up in xfunc_predmig_pullup!"); +#endif + /* move clause up in path */ + if (get_pathptr((Stream)get_downstream(upjoin)) + == (pathPtr)get_outerjoinpath((JoinPath)get_pathptr(upjoin))) + whichchild = OUTER; + else whichchild = INNER; + clauseinfo = xfunc_pullup((Path)get_pathptr((Stream)get_downstream(upjoin)), + (JoinPath)get_pathptr(upjoin), + clauseinfo, + whichchild, + get_clausetype(orignode)); + set_pathptr(pullme, get_pathptr(upjoin)); + /* pullme has been moved into locclauseinfo */ + set_clausetype(pullme, XFUNC_LOCPRD); + + /* + ** xfunc_pullup makes new path nodes for children of + ** get_pathptr(current). We must modify the stream nodes to point + ** to these path nodes + */ + if (whichchild == OUTER) + { + for(temp = (Stream)get_downstream(upjoin); is_clause(temp); + temp = (Stream)get_downstream(temp)) + set_pathptr + (temp, (pathPtr) + get_outerjoinpath((JoinPath)get_pathptr(upjoin))); + set_pathptr + (temp, + (pathPtr)get_outerjoinpath((JoinPath)get_pathptr(upjoin))); + } + else + { + for(temp = (Stream)get_downstream(upjoin); is_clause(temp); + temp = (Stream)get_downstream(temp)) + set_pathptr + (temp, (pathPtr) + get_innerjoinpath((JoinPath)get_pathptr(upjoin))); + set_pathptr + (temp, (pathPtr) + get_innerjoinpath((JoinPath)get_pathptr(upjoin))); + } + progress = true; + } + if (!progress) + elog(DEBUG, "didn't succeed in pulling up in xfunc_prdmig_pullup"); + return(progress); +} + +/* + ** xfunc_form_groups -- + ** A group is a pair of stream nodes a,b such that a is constrained to + ** precede b (for instance if a and b are both joins), but rank(a) > rank(b). + ** In such a situation, Monma and Sidney prove that no clauses should end + ** up between a and b, and therefore we may treat them as a group, with + ** selectivity equal to the product of their selectivities, and cost + ** equal to the cost of the first plus the selectivity of the first times the + ** cost of the second. We define each node to be in a group by itself, + ** and then repeatedly find adjacent groups which are ordered by descending + ** rank, and make larger groups. You know that two adjacent nodes are in a + ** group together if the lower has groupup set to true. They will both have + ** the same groupcost and groupsel (since they're in the same group!) + */ +static void xfunc_form_groups(Query* queryInfo, Stream root, Stream bottom) +{ + Stream temp, parent; + int lowest = xfunc_num_relids((Stream)xfunc_get_upjoin(bottom)); + bool progress; + LispValue primjoin; + int whichchild; + + if (!lowest) return; /* no joins in stream, so no groups */ + + /* initialize groups to be single nodes */ + for (temp = root; + temp != (Stream)NULL && temp != bottom; + temp = (Stream)get_downstream(temp)) + { + /* if a Join node */ + if (!is_clause(temp)) + { + if (get_pathptr((Stream)get_downstream(temp)) + == (pathPtr)get_outerjoinpath((JoinPath)get_pathptr(temp))) + whichchild = OUTER; + else whichchild = INNER; + set_groupcost(temp, + xfunc_join_expense((JoinPath)get_pathptr(temp), + whichchild)); + if (primjoin = xfunc_primary_join((JoinPath)get_pathptr(temp))) + { + set_groupsel(temp, + compute_clause_selec(queryInfo, + primjoin, NIL)); + } + else + { + set_groupsel(temp,1.0); + } + } + else /* a restriction, or 2-ary join pred */ + { + set_groupcost(temp, + xfunc_expense(queryInfo, + get_clause(get_cinfo(temp)))); + set_groupsel(temp, + compute_clause_selec(queryInfo, + get_clause(get_cinfo(temp)), + NIL)); + } + set_groupup(temp,false); + } + + /* make passes upwards, forming groups */ + do + { + progress = false; + for (temp = (Stream)get_upstream(bottom); + temp != (Stream)NULL; + temp = (Stream)get_upstream(temp)) + { + /* check for grouping with node upstream */ + if (!get_groupup(temp) && /* not already grouped */ + (parent = (Stream)get_upstream(temp)) != (Stream)NULL && + /* temp is a join or temp is the top of a group */ + (is_join((Path)get_pathptr(temp)) || + get_downstream(temp) && + get_groupup((Stream)get_downstream(temp))) && + get_grouprank(parent) < get_grouprank(temp)) + { + progress = true; /* we formed a new group */ + set_groupup(temp,true); + set_groupcost(temp, + get_groupcost(temp) + + get_groupsel(temp) * get_groupcost(parent)); + set_groupsel(temp,get_groupsel(temp) * get_groupsel(parent)); + + /* fix costs and sels of all members of group */ + xfunc_setup_group(temp, bottom); + } + } + } while(progress); +} + + +/* ------------------- UTILITY FUNCTIONS ------------------------- */ + +/* + ** xfunc_free_stream -- + ** walk down a stream and pfree it + */ +static void xfunc_free_stream(Stream root) +{ + Stream cur, next; + + Assert(xfunc_check_stream(root)); + + if (root != (Stream)NULL) + for (cur = root; cur != (Stream)NULL; cur = next) + { + next = (Stream)get_downstream(cur); + pfree(cur); + } +} + +/* + ** xfunc_add<_clauses + ** find any clauses above current, and insert them into stream as + ** appropriate. Return uppermost clause inserted, or current if none. + */ +static Stream xfunc_add_clauses(Stream current) +{ + Stream topnode = current; + LispValue temp; + LispValue primjoin; + + /* first add in the local clauses */ + foreach(temp, get_locclauseinfo((Path)get_pathptr(current))) + { + topnode = + xfunc_streaminsert((CInfo)lfirst(temp), topnode, + XFUNC_LOCPRD); + } + + /* and add in the join clauses */ + if (IsA(get_pathptr(current),JoinPath)) + { + primjoin = xfunc_primary_join((JoinPath)get_pathptr(current)); + foreach(temp, get_pathclauseinfo((JoinPath)get_pathptr(current))) + { + if (!equal(get_clause((CInfo)lfirst(temp)), primjoin)) + topnode = + xfunc_streaminsert((CInfo)lfirst(temp), topnode, + XFUNC_JOINPRD); + } + } + return(topnode); +} + + +/* + ** xfunc_setup_group + ** find all elements of stream that are grouped with node and are above + ** bottom, and set their groupcost and groupsel to be the same as node's. + */ +static void xfunc_setup_group(Stream node, Stream bottom) +{ + Stream temp; + + if (node != bottom) + /* traverse downwards */ + for (temp = (Stream)get_downstream(node); + temp != (Stream)NULL && temp != bottom; + temp = (Stream)get_downstream(temp)) + { + if (!get_groupup(temp)) break; + else + { + set_groupcost(temp, get_groupcost(node)); + set_groupsel(temp, get_groupsel(node)); + } + } + + /* traverse upwards */ + for (temp = (Stream)get_upstream(node); temp != (Stream)NULL; + temp = (Stream)get_upstream(temp)) + { + if (!get_groupup((Stream)get_downstream(temp))) break; + else + { + set_groupcost(temp, get_groupcost(node)); + set_groupsel(temp, get_groupsel(node)); + } + } +} + + +/* + ** xfunc_streaminsert + ** Make a new Stream node to hold clause, and insert it above current. + ** Return new node. + */ +static Stream +xfunc_streaminsert(CInfo clauseinfo, + Stream current, + int clausetype) /* XFUNC_LOCPRD or XFUNC_JOINPRD */ +{ + Stream newstream = RMakeStream(); + set_upstream(newstream, get_upstream(current)); + if (get_upstream(current)) + set_downstream((Stream)(get_upstream(current)), (StreamPtr)newstream); + set_upstream(current, (StreamPtr)newstream); + set_downstream(newstream, (StreamPtr)current); + set_pathptr(newstream, get_pathptr(current)); + set_cinfo(newstream, clauseinfo); + set_clausetype(newstream, clausetype); + return(newstream); +} + +/* + ** Given a Stream node, find the number of relids referenced in the pathnode + ** associated with the stream node. The number of relids gives a unique + ** ordering on the joins in a stream, which we use to compare the height of + ** join nodes. + */ +static int xfunc_num_relids(Stream node) +{ + if (!node || !IsA(get_pathptr(node),JoinPath)) + return(0); + else return(length + (get_relids(get_parent((JoinPath)get_pathptr(node))))); +} + +/* + ** xfunc_get_downjoin -- + ** Given a stream node, find the next lowest node which points to a + ** join predicate or a scan node. + */ +static StreamPtr xfunc_get_downjoin(Stream node) +{ + Stream temp; + + if (!is_clause(node)) /* if this is a join */ + node = (Stream)get_downstream(node); + for (temp = node; temp && is_clause(temp); + temp = (Stream)get_downstream(temp)) + /* empty body in for loop */ ; + + return((StreamPtr)temp); +} + +/* + ** xfunc_get_upjoin -- + ** same as above, but upwards. + */ +static StreamPtr xfunc_get_upjoin(Stream node) +{ + Stream temp; + + if (!is_clause(node)) /* if this is a join */ + node = (Stream)get_upstream(node); + for (temp = node; temp && is_clause(temp); + temp = (Stream)get_upstream(temp)) + /* empty body in for loop */ ; + + return((StreamPtr)temp); +} + +/* + ** xfunc_stream_qsort -- + ** Given a stream, sort by group rank the elements in the stream from the + ** node "bottom" up. DESTRUCTIVELY MODIFIES STREAM! Returns new root. + */ +static Stream xfunc_stream_qsort(Stream root, Stream bottom) +{ + int i; + size_t num; + Stream *nodearray, output; + Stream tmp; + + /* find size of list */ + for (num = 0, tmp = root; tmp != bottom; + tmp = (Stream)get_downstream(tmp)) + num ++; + if (num <= 1) return (root); + + /* copy elements of the list into an array */ + nodearray = (Stream *) palloc(num * sizeof(Stream)); + + for (tmp = root, i = 0; tmp != bottom; + tmp = (Stream)get_downstream(tmp), i++) + nodearray[i] = tmp; + + /* sort the array */ + pg_qsort(nodearray, num, sizeof(LispValue), xfunc_stream_compare); + + /* paste together the array elements */ + output = nodearray[num - 1]; + set_upstream(output, (StreamPtr)NULL); + for (i = num - 2; i >= 0; i--) + { + set_downstream(nodearray[i+1], (StreamPtr)nodearray[i]); + set_upstream(nodearray[i], (StreamPtr)nodearray[i+1]); + } + set_downstream(nodearray[0], (StreamPtr)bottom); + if (bottom) + set_upstream(bottom, (StreamPtr)nodearray[0]); + + Assert(xfunc_check_stream(output)); + return(output); +} + +/* + ** xfunc_stream_compare + ** comparison function for xfunc_stream_qsort. + ** Compare nodes by group rank. If group ranks are equal, ensure that + ** join nodes appear in same order as in plan tree. + */ +static int xfunc_stream_compare(void *arg1, void *arg2) +{ + Stream stream1 = *(Stream *) arg1; + Stream stream2 = *(Stream *) arg2; + Cost rank1, rank2; + + rank1 = get_grouprank(stream1); + rank2 = get_grouprank(stream2); + + if (rank1 > rank2) return(1); + else if (rank1 < rank2) return(-1); + else + { + if (is_clause(stream1) && is_clause(stream2)) + return(0); /* doesn't matter what order if both are restrictions */ + else if (!is_clause(stream1) && !is_clause(stream2)) + { + if (xfunc_num_relids(stream1) < xfunc_num_relids(stream2)) + return(-1); + else return(1); + } + else if (is_clause(stream1) && !is_clause(stream2)) + { + if (xfunc_num_relids(stream1) == xfunc_num_relids(stream2)) + /* stream1 is a restriction over stream2 */ + return(1); + else return(-1); + } + else if (!is_clause(stream1) && is_clause(stream2)) + { + /* stream2 is a restriction over stream1: never push down */ + return(-1); + } + } +} + +/* ------------------ DEBUGGING ROUTINES ---------------------------- */ + +/* + ** Make sure all pointers in stream make sense. Make sure no joins are + ** out of order. + */ +static bool xfunc_check_stream(Stream node) +{ + Stream temp; + int numrelids, tmp; + + /* set numrelids higher than max */ + if (!is_clause(node)) + numrelids = xfunc_num_relids(node) + 1; + else if (xfunc_get_downjoin(node)) + numrelids = xfunc_num_relids((Stream)xfunc_get_downjoin(node)) + 1; + else numrelids = 1; + + for (temp = node; get_downstream(temp); temp = (Stream)get_downstream(temp)) + { + if ((Stream)get_upstream((Stream)get_downstream(temp)) != temp) + { + elog(WARN, "bad pointers in stream"); + return(false); + } + if (!is_clause(temp)) + { + if ((tmp = xfunc_num_relids(temp)) >= numrelids) + { + elog(WARN, "Joins got reordered!"); + return(false); + } + numrelids = tmp; + } + } + + return(true); +} + +/* + ** xfunc_in_stream + ** check if node is in stream + */ +static bool xfunc_in_stream(Stream node, Stream stream) +{ + Stream temp; + + for (temp = stream; temp; temp = (Stream)get_downstream(temp)) + if (temp == node) return(1); + return(0); +} diff --git a/src/backend/optimizer/path/prune.c b/src/backend/optimizer/path/prune.c new file mode 100644 index 00000000000..70f9b209e0c --- /dev/null +++ b/src/backend/optimizer/path/prune.c @@ -0,0 +1,203 @@ +/*------------------------------------------------------------------------- + * + * prune.c-- + * Routines to prune redundant paths and relations + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/prune.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/cost.h" +#include "optimizer/paths.h" +#include "optimizer/pathnode.h" + +#include "utils/elog.h" + + +static List *prune_joinrel(Rel *rel, List *other_rels); + +/* + * prune-joinrels-- + * Removes any redundant relation entries from a list of rel nodes + * 'rel-list'. + * + * Returns the resulting list. + * + */ +List *prune_joinrels(List *rel_list) +{ + List *temp_list = NIL; + + if (rel_list != NIL) { + temp_list = lcons(lfirst(rel_list), + prune_joinrels(prune_joinrel((Rel*)lfirst(rel_list), + lnext(rel_list)))); + } + return(temp_list); +} + +/* + * prune-joinrel-- + * Prunes those relations from 'other-rels' that are redundant with + * 'rel'. A relation is redundant if it is built up of the same + * relations as 'rel'. Paths for the redundant relation are merged into + * the pathlist of 'rel'. + * + * Returns a list of non-redundant relations, and sets the pathlist field + * of 'rel' appropriately. + * + */ +static List * +prune_joinrel(Rel *rel, List *other_rels) +{ + List *i = NIL; + List *t_list = NIL; + List *temp_node = NIL; + Rel *other_rel = (Rel *)NULL; + + foreach(i, other_rels) { + other_rel = (Rel*)lfirst(i); + if(same(rel->relids, other_rel->relids)) { + rel->pathlist = add_pathlist(rel, + rel->pathlist, + other_rel->pathlist); + t_list = nconc(t_list, NIL); /* XXX is this right ? */ + } else { + temp_node = lcons(other_rel, NIL); + t_list = nconc(t_list,temp_node); + } + } + return(t_list); +} + +/* + * prune-rel-paths-- + * For each relation entry in 'rel-list' (which corresponds to a join + * relation), set pointers to the unordered path and cheapest paths + * (if the unordered path isn't the cheapest, it is pruned), and + * reset the relation's size field to reflect the join. + * + * Returns nothing of interest. + * + */ +void +prune_rel_paths(List *rel_list) +{ + List *x = NIL; + List *y = NIL; + Path *path; + Rel *rel = (Rel*)NULL; + JoinPath *cheapest = (JoinPath*)NULL; + + foreach(x, rel_list) { + rel = (Rel*)lfirst(x); + foreach(y, rel->pathlist) { + path = (Path*)lfirst(y); + + if(!path->p_ordering.ord.sortop) { + break; + } + } + cheapest = (JoinPath*)prune_rel_path(rel, path); + if (IsA_JoinPath(cheapest)) + { + rel->size = compute_joinrel_size(cheapest); + } + else + elog(WARN, "non JoinPath called"); + } +} + + +/* + * prune-rel-path-- + * Compares the unordered path for a relation with the cheapest path. If + * the unordered path is not cheapest, it is pruned. + * + * Resets the pointers in 'rel' for unordered and cheapest paths. + * + * Returns the cheapest path. + * + */ +Path * +prune_rel_path(Rel *rel, Path *unorderedpath) +{ + Path *cheapest = set_cheapest(rel, rel->pathlist); + + /* don't prune if not pruneable -- JMH, 11/23/92 */ + if(unorderedpath != cheapest + && rel->pruneable) { + + rel->unorderedpath = (Path *)NULL; + rel->pathlist = lremove(unorderedpath, rel->pathlist); + } else { + rel->unorderedpath = (Path *)unorderedpath; + } + + return(cheapest); +} + +/* + * merge-joinrels-- + * Given two lists of rel nodes that are already + * pruned, merge them into one pruned rel node list + * + * 'rel-list1' and + * 'rel-list2' are the rel node lists + * + * Returns one pruned rel node list + */ +List * +merge_joinrels(List *rel_list1, List *rel_list2) +{ + List *xrel = NIL; + + foreach(xrel,rel_list1) { + Rel *rel = (Rel*)lfirst(xrel); + rel_list2 = prune_joinrel(rel,rel_list2); + } + return(append(rel_list1, rel_list2)); +} + +/* + * prune_oldrels-- + * If all the joininfo's in a rel node are inactive, + * that means that this node has been joined into + * other nodes in all possible ways, therefore + * this node can be discarded. If not, it will cause + * extra complexity of the optimizer. + * + * old_rels is a list of rel nodes + * + * Returns a new list of rel nodes + */ +List *prune_oldrels(List *old_rels) +{ + Rel *rel; + List *joininfo_list, *xjoininfo; + + if(old_rels == NIL) + return(NIL); + + rel = (Rel*)lfirst(old_rels); + joininfo_list = rel->joininfo; + if(joininfo_list == NIL) + return (lcons(rel, prune_oldrels(lnext(old_rels)))); + + foreach(xjoininfo, joininfo_list) { + JInfo *joininfo = (JInfo*)lfirst(xjoininfo); + if(!joininfo->inactive) + return (lcons(rel, prune_oldrels(lnext(old_rels)))); + } + return(prune_oldrels(lnext(old_rels))); +} diff --git a/src/backend/optimizer/path/xfunc.c b/src/backend/optimizer/path/xfunc.c new file mode 100644 index 00000000000..405b3b77f00 --- /dev/null +++ b/src/backend/optimizer/path/xfunc.c @@ -0,0 +1,1360 @@ +/*------------------------------------------------------------------------- + * + * xfunc.c-- + * Utility routines to handle expensive function optimization. + * Includes xfunc_trypullup(), which attempts early pullup of predicates + * to allow for maximal pruning. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/path/Attic/xfunc.c,v 1.1.1.1 1996/07/09 06:21:36 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef WIN32 +#include /* for MAXFLOAT on most systems */ +#else +#include +#define MAXFLOAT DBL_MAX +#endif /* WIN32 */ + +#include /* for MAXFLOAT on SunOS */ +#include + +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/nodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/syscache.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_type.h" +#include "utils/syscache.h" +#include "catalog/pg_language.h" +#include "planner/xfunc.h" +#include "planner/clauses.h" +#include "planner/pathnode.h" +#include "planner/internal.h" +#include "planner/cost.h" +#include "planner/keys.h" +#include "planner/tlist.h" +#include "lib/lispsort.h" +#include "access/heapam.h" +#include "tcop/dest.h" +#include "storage/buf_internals.h" /* for NBuffers */ +#include "optimizer/tlist.h" /* for get_expr */ + +#define ever ; 1 ; + +/* local funcs */ +static int xfunc_card_unreferenced(Query *queryInfo, + Expr *clause, Relid referenced); */ + +/* +** xfunc_trypullup -- +** Preliminary pullup of predicates, to allow for maximal pruning. +** Given a relation, check each of its paths and see if you can +** pullup clauses from its inner and outer. +*/ + +void xfunc_trypullup(Rel rel) +{ + LispValue y; /* list ptr */ + CInfo maxcinfo; /* The CInfo to pull up, as calculated by + xfunc_shouldpull() */ + JoinPath curpath; /* current path in list */ + int progress; /* has progress been made this time through? */ + int clausetype; + + do { + progress = false; /* no progress yet in this iteration */ + foreach(y, get_pathlist(rel)) { + curpath = (JoinPath)lfirst(y); + + /* + ** for each operand, attempt to pullup predicates until first + ** failure. + */ + for(ever) { + /* No, the following should NOT be '==' !! */ + if (clausetype = + xfunc_shouldpull((Path)get_innerjoinpath(curpath), + curpath, INNER, &maxcinfo)) { + + xfunc_pullup((Path)get_innerjoinpath(curpath), + curpath, maxcinfo, INNER, clausetype); + progress = true; + }else + break; + } + for(ever) { + + /* No, the following should NOT be '==' !! */ + if (clausetype = + xfunc_shouldpull((Path)get_outerjoinpath(curpath), + curpath, OUTER, &maxcinfo)) { + + xfunc_pullup((Path)get_outerjoinpath(curpath), + curpath, maxcinfo, OUTER, clausetype); + progress = true; + }else + break; + } + + /* + ** make sure the unpruneable flag bubbles up, i.e. + ** if anywhere below us in the path pruneable is false, + ** then pruneable should be false here + */ + if (get_pruneable(get_parent(curpath)) && + (!get_pruneable(get_parent + ((Path)get_innerjoinpath(curpath))) || + !get_pruneable(get_parent((Path) + get_outerjoinpath(curpath))))) { + + set_pruneable(get_parent(curpath),false); + progress = true; + } + } + } while(progress); +} + +/* + ** xfunc_shouldpull -- + ** find clause with highest rank, and decide whether to pull it up + ** from child to parent. Currently we only pullup secondary join clauses + ** that are in the pathclauseinfo. Secondary hash and sort clauses are + ** left where they are. + ** If we find an expensive function but decide *not* to pull it up, + ** we'd better set the unpruneable flag. -- JMH, 11/11/92 + ** + ** Returns: 0 if nothing left to pullup + ** XFUNC_LOCPRD if a local predicate is to be pulled up + ** XFUNC_JOINPRD if a secondary join predicate is to be pulled up + */ +int xfunc_shouldpull(Query* queryInfo, + Path childpath, + JoinPath parentpath, + int whichchild, + CInfo *maxcinfopt) /* Out: pointer to clause to pullup */ +{ + LispValue clauselist, tmplist; /* lists of clauses */ + CInfo maxcinfo; /* clause to pullup */ + LispValue primjoinclause /* primary join clause */ + = xfunc_primary_join(parentpath); + Cost tmprank, maxrank = (-1 * MAXFLOAT); /* ranks of clauses */ + Cost joinselec = 0; /* selectivity of the join predicate */ + Cost joincost = 0; /* join cost + primjoinclause cost */ + int retval = XFUNC_LOCPRD; + + clauselist = get_locclauseinfo(childpath); + + if (clauselist != LispNil) { + /* find local predicate with maximum rank */ + for (tmplist = clauselist, + maxcinfo = (CInfo) lfirst(tmplist), + maxrank = xfunc_rank(get_clause(maxcinfo)); + tmplist != LispNil; + tmplist = lnext(tmplist)) { + + if ((tmprank = xfunc_rank(get_clause((CInfo)lfirst(tmplist)))) + > maxrank) { + maxcinfo = (CInfo) lfirst(tmplist); + maxrank = tmprank; + } + } + } + + /* + ** If child is a join path, and there are multiple join clauses, + ** see if any join clause has even higher rank than the highest + ** local predicate + */ + if (is_join(childpath) && xfunc_num_join_clauses((JoinPath)childpath) > 1) + for (tmplist = get_pathclauseinfo((JoinPath)childpath); + tmplist != LispNil; + tmplist = lnext(tmplist)) { + + if (tmplist != LispNil && + (tmprank = xfunc_rank(get_clause((CInfo) lfirst(tmplist)))) + > maxrank) { + maxcinfo = (CInfo) lfirst(tmplist); + maxrank = tmprank; + retval = XFUNC_JOINPRD; + } + } + if (maxrank == (-1 * MAXFLOAT)) /* no expensive clauses */ + return(0); + + /* + ** Pullup over join if clause is higher rank than join, or if + ** join is nested loop and current path is inner child (note that + ** restrictions on the inner of a nested loop don't buy you anything -- + ** you still have to scan the entire inner relation each time). + ** Note that the cost of a secondary join clause is only what's + ** calculated by xfunc_expense(), since the actual joining + ** (i.e. the usual path_cost) is paid for by the primary join clause. + */ + if (primjoinclause != LispNil) { + joinselec = compute_clause_selec(queryInfo, primjoinclause, LispNil); + joincost = xfunc_join_expense(parentpath, whichchild); + + if (XfuncMode == XFUNC_PULLALL || + (XfuncMode != XFUNC_WAIT && + ((joincost != 0 && + (maxrank = xfunc_rank(get_clause(maxcinfo))) > + ((joinselec - 1.0) / joincost)) + || (joincost == 0 && joinselec < 1) + || (!is_join(childpath) + && (whichchild == INNER) + && IsA(parentpath,JoinPath) + && !IsA(parentpath,HashPath) + && !IsA(parentpath,MergePath))))) { + + *maxcinfopt = maxcinfo; + return(retval); + + }else if (maxrank != -(MAXFLOAT)) { + /* + ** we've left an expensive restriction below a join. Since + ** we may pullup this restriction in predmig.c, we'd best + ** set the Rel of this join to be unpruneable + */ + set_pruneable(get_parent(parentpath), false); + /* and fall through */ + } + } + return(0); +} + + +/* + ** xfunc_pullup -- + ** move clause from child pathnode to parent pathnode. This operation + ** makes the child pathnode produce a larger relation than it used to. + ** This means that we must construct a new Rel just for the childpath, + ** although this Rel will not be added to the list of Rels to be joined up + ** in the query; it's merely a parent for the new childpath. + ** We also have to fix up the path costs of the child and parent. + ** + ** Now returns a pointer to the new pulled-up CInfo. -- JMH, 11/18/92 + */ +CInfo xfunc_pullup(Query* queryInfo, + Path childpath, + JoinPath parentpath, + CInfo cinfo, /* clause to pull up */ + int whichchild,/* whether child is INNER or OUTER of join */ + int clausetype)/* whether clause to pull is join or local */ +{ + Path newkid; + Rel newrel; + Cost pulled_selec; + Cost cost; + CInfo newinfo; + + /* remove clause from childpath */ + newkid = (Path)copyObject((Node)childpath); + if (clausetype == XFUNC_LOCPRD) { + set_locclauseinfo(newkid, + xfunc_LispRemove((LispValue)cinfo, + (List)get_locclauseinfo(newkid))); + }else { + set_pathclauseinfo + ((JoinPath)newkid, + xfunc_LispRemove((LispValue)cinfo, + (List)get_pathclauseinfo((JoinPath)newkid))); + } + + /* + ** give the new child path its own Rel node that reflects the + ** lack of the pulled-up predicate + */ + pulled_selec = compute_clause_selec(queryInfo, + get_clause(cinfo), LispNil); + xfunc_copyrel(get_parent(newkid), &newrel); + set_parent(newkid, newrel); + set_pathlist(newrel, lcons(newkid, NIL)); + set_unorderedpath(newrel, (PathPtr)newkid); + set_cheapestpath(newrel, (PathPtr)newkid); + set_size(newrel, + (Count)((Cost)get_size(get_parent(childpath)) / pulled_selec)); + + /* + ** fix up path cost of newkid. To do this we subtract away all the + ** xfunc_costs of childpath, then recompute the xfunc_costs of newkid + */ + cost = get_path_cost(newkid) - xfunc_get_path_cost(childpath); + Assert(cost >= 0); + set_path_cost(newkid, cost); + cost = get_path_cost(newkid) + xfunc_get_path_cost(newkid); + set_path_cost(newkid, cost); + + /* + ** We copy the cinfo, since it may appear in other plans, and we're going + ** to munge it. -- JMH, 7/22/92 + */ + newinfo = (CInfo)copyObject((Node)cinfo); + + /* + ** Fix all vars in the clause + ** to point to the right varno and varattno in parentpath + */ + xfunc_fixvars(get_clause(newinfo), newrel, whichchild); + + /* add clause to parentpath, and fix up its cost. */ + set_locclauseinfo(parentpath, + lispCons((LispValue)newinfo, + (LispValue)get_locclauseinfo(parentpath))); + /* put new childpath into the path tree */ + if (whichchild == INNER) { + set_innerjoinpath(parentpath, (pathPtr)newkid); + }else { + set_outerjoinpath(parentpath, (pathPtr)newkid); + } + + /* + ** recompute parentpath cost from scratch -- the cost + ** of the join method has changed + */ + cost = xfunc_total_path_cost(parentpath); + set_path_cost(parentpath, cost); + + return(newinfo); +} + +/* + ** calculate (selectivity-1)/cost. + */ +Cost xfunc_rank(Query *queryInfo,LispValue clause) +{ + Cost selec = compute_clause_selec(queryInfo, clause, LispNil); + Cost cost = xfunc_expense(queryInfo,clause); + + if (cost == 0) + if (selec > 1) return(MAXFLOAT); + else return(-(MAXFLOAT)); + return((selec - 1)/cost); +} + +/* + ** Find the "global" expense of a clause; i.e. the local expense divided + ** by the cardinalities of all the base relations of the query that are *not* + ** referenced in the clause. + */ +Cost xfunc_expense(Query* queryInfo, clause) + LispValue clause; +{ + Cost cost = xfunc_local_expense(clause); + + if (cost) + { + Count card = xfunc_card_unreferenced(queryInfo, clause, LispNil); + if (card) + cost /= card; + } + + return(cost); +} + +/* + ** xfunc_join_expense -- + ** Find global expense of a join clause + */ +Cost xfunc_join_expense(Query *queryInfo, JoinPath path, int whichchild) +{ + LispValue primjoinclause = xfunc_primary_join(path); + + /* + ** the second argument to xfunc_card_unreferenced reflects all the + ** relations involved in the join clause, i.e. all the relids in the Rel + ** of the join clause + */ + Count card = 0; + Cost cost = xfunc_expense_per_tuple(path, whichchild); + + card = xfunc_card_unreferenced(queryInfo, + primjoinclause, + get_relids(get_parent(path))); + if (primjoinclause) + cost += xfunc_local_expense(primjoinclause); + + if (card) cost /= card; + + return(cost); +} + +/* + ** Recursively find the per-tuple expense of a clause. See + ** xfunc_func_expense for more discussion. + */ +Cost xfunc_local_expense(LispValue clause) +{ + Cost cost = 0; /* running expense */ + LispValue tmpclause; + + /* First handle the base case */ + if (IsA(clause,Const) || IsA(clause,Var) || IsA(clause,Param)) + return(0); + /* now other stuff */ + else if (IsA(clause,Iter)) + /* Too low. Should multiply by the expected number of iterations. */ + return(xfunc_local_expense(get_iterexpr((Iter)clause))); + else if (IsA(clause,ArrayRef)) + return(xfunc_local_expense(get_refexpr((ArrayRef)clause))); + else if (fast_is_clause(clause)) + return(xfunc_func_expense((LispValue)get_op(clause), + (LispValue)get_opargs(clause))); + else if (fast_is_funcclause(clause)) + return(xfunc_func_expense((LispValue)get_function(clause), + (LispValue)get_funcargs(clause))); + else if (fast_not_clause(clause)) + return(xfunc_local_expense(lsecond(clause))); + else if (fast_or_clause(clause)) { + /* find cost of evaluating each disjunct */ + for (tmpclause = lnext(clause); tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + cost += xfunc_local_expense(lfirst(tmpclause)); + return(cost); + }else { + elog(WARN, "Clause node of undetermined type"); + return(-1); + } +} + +/* + ** xfunc_func_expense -- + ** given a Func or Oper and its args, find its expense. + ** Note: in Stonebraker's SIGMOD '91 paper, he uses a more complicated metric + ** than the one here. We can ignore the expected number of tuples for + ** our calculations; we just need the per-tuple expense. But he also + ** proposes components to take into account the costs of accessing disk and + ** archive. We didn't adopt that scheme here; eventually the vacuum + ** cleaner should be able to tell us what percentage of bytes to find on + ** which storage level, and that should be multiplied in appropriately + ** in the cost function below. Right now we don't model the cost of + ** accessing secondary or tertiary storage, since we don't have sufficient + ** stats to do it right. + */ +Cost xfunc_func_expense(LispValue node, LispValue args) +{ + HeapTuple tupl; /* the pg_proc tuple for each function */ + Form_pg_proc proc; /* a data structure to hold the pg_proc tuple */ + int width = 0; /* byte width of the field referenced by each clause */ + RegProcedure funcid; /* ID of function associate with node */ + Cost cost = 0; /* running expense */ + LispValue tmpclause; + LispValue operand; /* one operand of an operator */ + + if (IsA(node,Oper)) { + /* don't trust the opid in the Oper node. Use the opno. */ + if (!(funcid = get_opcode(get_opno((Oper)node)))) + elog(WARN, "Oper's function is undefined"); + }else { + funcid = get_funcid((Func)node); + } + + /* look up tuple in cache */ + tupl = SearchSysCacheTuple(PROOID, ObjectIdGetDatum(funcid),0,0,0); + if (!HeapTupleIsValid(tupl)) + elog(WARN, "Cache lookup failed for procedure %d", funcid); + proc = (Form_pg_proc) GETSTRUCT(tupl); + + /* + ** if it's a Postquel function, its cost is stored in the + ** associated plan. + */ + if (proc->prolang == SQLlanguageId) { + LispValue tmpplan; + List planlist; + + if (IsA(node,Oper) || get_func_planlist((Func)node) == LispNil) { + Oid *argOidVect; /* vector of argtypes */ + char *pq_src; /* text of PQ function */ + int nargs; /* num args to PQ function */ + QueryTreeList *queryTree_list; /* dummy variable */ + + /* + ** plan the function, storing it in the Func node for later + ** use by the executor. + */ + pq_src = (char *) textout(&(proc->prosrc)); + nargs = proc->pronargs; + if (nargs > 0) + argOidVect = proc->proargtypes; + planlist = (List)pg_plan(pq_src, argOidVect, nargs, + &parseTree_list, None); + if (IsA(node,Func)) + set_func_planlist((Func)node, planlist); + + }else {/* plan has been cached inside the Func node already */ + planlist = get_func_planlist((Func)node); + } + + /* + ** Return the sum of the costs of the plans (the PQ function + ** may have many queries in its body). + */ + foreach(tmpplan, planlist) + cost += get_cost((Plan)lfirst(tmpplan)); + return(cost); + }else { /* it's a C function */ + /* + ** find the cost of evaluating the function's arguments + ** and the width of the operands + */ + for (tmpclause = args; tmpclause != LispNil; + tmpclause = lnext(tmpclause)) { + + if ((operand = lfirst(tmpclause)) != LispNil) { + cost += xfunc_local_expense(operand); + width += xfunc_width(operand); + } + } + + /* + ** when stats become available, add in cost of accessing secondary + ** and tertiary storage here. + */ + return(cost + + (Cost)proc->propercall_cpu + + (Cost)proc->properbyte_cpu * (Cost)proc->probyte_pct/100.00 * + (Cost)width + /* + * Pct_of_obj_in_mem + DISK_COST * proc->probyte_pct/100.00 * width + * Pct_of_obj_on_disk + + ARCH_COST * proc->probyte_pct/100.00 * width + * Pct_of_obj_on_arch + */ + ); + } +} + +/* + ** xfunc_width -- + ** recursively find the width of a expression + */ + +int xfunc_width(LispValue clause) +{ + Relation rd; /* Relation Descriptor */ + HeapTuple tupl; /* structure to hold a cached tuple */ + TypeTupleForm type; /* structure to hold a type tuple */ + int retval = 0; + + if (IsA(clause,Const)) { + /* base case: width is the width of this constant */ + retval = get_constlen((Const) clause); + goto exit; + }else if (IsA(clause,ArrayRef)) { + /* base case: width is width of the refelem within the array */ + retval = get_refelemlength((ArrayRef)clause); + goto exit; + }else if (IsA(clause,Var)) { + /* base case: width is width of this attribute */ + tupl = SearchSysCacheTuple(TYPOID, + PointerGetDatum(get_vartype((Var)clause)), + 0,0,0); + if (!HeapTupleIsValid(tupl)) + elog(WARN, "Cache lookup failed for type %d", + get_vartype((Var)clause)); + type = (TypeTupleForm) GETSTRUCT(tupl); + if (get_varattno((Var)clause) == 0) { + /* clause is a tuple. Get its width */ + rd = heap_open(type->typrelid); + retval = xfunc_tuple_width(rd); + heap_close(rd); + }else { + /* attribute is a base type */ + retval = type->typlen; + } + goto exit; + }else if (IsA(clause,Param)) { + if (typeid_get_relid(get_paramtype((Param)clause))) { + /* Param node returns a tuple. Find its width */ + rd = heap_open(typeid_get_relid(get_paramtype((Param)clause))); + retval = xfunc_tuple_width(rd); + heap_close(rd); + }else if (get_param_tlist((Param)clause) != LispNil) { + /* Param node projects a complex type */ + Assert(length(get_param_tlist((Param)clause)) == 1); /* sanity */ + retval = + xfunc_width((LispValue) + get_expr(lfirst(get_param_tlist((Param)clause)))); + }else { + /* Param node returns a base type */ + retval = tlen(get_id_type(get_paramtype((Param)clause))); + } + goto exit; + }else if (IsA(clause,Iter)) { + /* + ** An Iter returns a setof things, so return the width of a single + ** thing. + ** Note: THIS MAY NOT WORK RIGHT WHEN AGGS GET FIXED, + ** SINCE AGG FUNCTIONS CHEW ON THE WHOLE SETOF THINGS!!!! + ** This whole Iter business is bogus, anyway. + */ + retval = xfunc_width(get_iterexpr((Iter)clause)); + goto exit; + }else if (fast_is_clause(clause)) { + /* + ** get function associated with this Oper, and treat this as + ** a Func + */ + tupl = SearchSysCacheTuple(OPROID, + ObjectIdGetDatum(get_opno((Oper)get_op(clause))), + 0,0,0); + if (!HeapTupleIsValid(tupl)) + elog(WARN, "Cache lookup failed for procedure %d", + get_opno((Oper)get_op(clause))); + return(xfunc_func_width + ((RegProcedure)(((OperatorTupleForm)(GETSTRUCT(tupl)))->oprcode), + (LispValue)get_opargs(clause))); + }else if (fast_is_funcclause(clause)) { + Func func = (Func)get_function(clause); + if (get_func_tlist(func) != LispNil) { + /* this function has a projection on it. Get the length + of the projected attribute */ + Assert(length(get_func_tlist(func)) == 1); /* sanity */ + retval = + xfunc_width((LispValue) + get_expr(lfirst(get_func_tlist(func)))); + goto exit; + }else { + return(xfunc_func_width((RegProcedure)get_funcid(func), + (LispValue)get_funcargs(clause))); + } + }else { + elog(WARN, "Clause node of undetermined type"); + return(-1); + } + + exit: + if (retval == -1) + retval = VARLEN_DEFAULT; + return(retval); +} + +/* + ** xfunc_card_unreferenced: + ** find all relations not referenced in clause, and multiply their + ** cardinalities. Ignore relation of cardinality 0. + ** User may pass in referenced list, if they know it (useful + ** for joins). + */ +static Count +xfunc_card_unreferenced(Query *queryInfo, + LispValue clause, Relid referenced) +{ + Relid unreferenced, allrelids = LispNil; + LispValue temp; + + /* find all relids of base relations referenced in query */ + foreach (temp,queryInfo->base_relation_list_) + { + Assert(lnext(get_relids((Rel)lfirst(temp))) == LispNil); + allrelids = lappend(allrelids, + lfirst(get_relids((Rel)lfirst(temp)))); + } + + /* find all relids referenced in query but not in clause */ + if (!referenced) + referenced = xfunc_find_references(clause); + unreferenced = set_difference(allrelids, referenced); + + return(xfunc_card_product(unreferenced)); +} + +/* + ** xfunc_card_product + ** multiple together cardinalities of a list relations. + */ +Count xfunc_card_product(Query *queryInfo, Relid relids) +{ + LispValue cinfonode; + LispValue temp; + Rel currel; + Cost tuples; + Count retval = 0; + + foreach(temp,relids) { + currel = get_rel(lfirst(temp)); + tuples = get_tuples(currel); + + if (tuples) { /* not of cardinality 0 */ + /* factor in the selectivity of all zero-cost clauses */ + foreach (cinfonode, get_clauseinfo(currel)) { + if (!xfunc_expense(queryInfo,get_clause((CInfo)lfirst(cinfonode)))) + tuples *= + compute_clause_selec(queryInfo, + get_clause((CInfo)lfirst(cinfonode)), + LispNil); + } + + if (retval == 0) retval = tuples; + else retval *= tuples; + } + } + if (retval == 0) retval = 1; /* saves caller from dividing by zero */ + return(retval); +} + + +/* + ** xfunc_find_references: + ** Traverse a clause and find all relids referenced in the clause. + */ +List xfunc_find_references(LispValue clause) +{ + List retval = (List)LispNil; + LispValue tmpclause; + + /* Base cases */ + if (IsA(clause,Var)) + return(lispCons(lfirst(get_varid((Var)clause)), LispNil)); + else if (IsA(clause,Const) || IsA(clause,Param)) + return((List)LispNil); + + /* recursion */ + else if (IsA(clause,Iter)) + /* Too low. Should multiply by the expected number of iterations. maybe */ + return(xfunc_find_references(get_iterexpr((Iter)clause))); + else if (IsA(clause,ArrayRef)) + return(xfunc_find_references(get_refexpr((ArrayRef)clause))); + else if (fast_is_clause(clause)) { + /* string together result of all operands of Oper */ + for (tmpclause = (LispValue)get_opargs(clause); tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + retval = nconc(retval, xfunc_find_references(lfirst(tmpclause))); + return(retval); + }else if (fast_is_funcclause(clause)) { + /* string together result of all args of Func */ + for (tmpclause = (LispValue)get_funcargs(clause); + tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + retval = nconc(retval, xfunc_find_references(lfirst(tmpclause))); + return(retval); + }else if (fast_not_clause(clause)) + return(xfunc_find_references(lsecond(clause))); + else if (fast_or_clause(clause)) { + /* string together result of all operands of OR */ + for (tmpclause = lnext(clause); tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + retval = nconc(retval, xfunc_find_references(lfirst(tmpclause))); + return(retval); + }else { + elog(WARN, "Clause node of undetermined type"); + return((List)LispNil); + } +} + +/* + ** xfunc_primary_join: + ** Find the primary join clause: for Hash and Merge Joins, this is the + ** min rank Hash or Merge clause, while for Nested Loop it's the + ** min rank pathclause + */ +LispValue xfunc_primary_join(JoinPath pathnode) +{ + LispValue joinclauselist = get_pathclauseinfo(pathnode); + CInfo mincinfo; + LispValue tmplist; + LispValue minclause = LispNil; + Cost minrank, tmprank; + + if (IsA(pathnode,MergePath)) + { + for(tmplist = get_path_mergeclauses((MergePath)pathnode), + minclause = lfirst(tmplist), + minrank = xfunc_rank(minclause); + tmplist != LispNil; + tmplist = lnext(tmplist)) + if ((tmprank = xfunc_rank(lfirst(tmplist))) + < minrank) + { + minrank = tmprank; + minclause = lfirst(tmplist); + } + return(minclause); + } + else if (IsA(pathnode,HashPath)) + { + for(tmplist = get_path_hashclauses((HashPath)pathnode), + minclause = lfirst(tmplist), + minrank = xfunc_rank(minclause); + tmplist != LispNil; + tmplist = lnext(tmplist)) + if ((tmprank = xfunc_rank(lfirst(tmplist))) + < minrank) + { + minrank = tmprank; + minclause = lfirst(tmplist); + } + return(minclause); + } + + /* if we drop through, it's nested loop join */ + if (joinclauselist == LispNil) + return(LispNil); + + for(tmplist = joinclauselist, mincinfo = (CInfo) lfirst(joinclauselist), + minrank = xfunc_rank(get_clause((CInfo) lfirst(tmplist))); + tmplist != LispNil; + tmplist = lnext(tmplist)) + if ((tmprank = xfunc_rank(get_clause((CInfo) lfirst(tmplist)))) + < minrank) + { + minrank = tmprank; + mincinfo = (CInfo) lfirst(tmplist); + } + return((LispValue)get_clause(mincinfo)); +} + +/* + ** xfunc_get_path_cost + ** get the expensive function costs of the path + */ +Cost xfunc_get_path_cost(Query *queryInfo, Path pathnode) +{ + Cost cost = 0; + LispValue tmplist; + Cost selec = 1.0; + + /* + ** first add in the expensive local function costs. + ** We ensure that the clauses are sorted by rank, so that we + ** know (via selectivities) the number of tuples that will be checked + ** by each function. If we're not doing any optimization of expensive + ** functions, we don't sort. + */ + if (XfuncMode != XFUNC_OFF) + set_locclauseinfo(pathnode, lisp_qsort(get_locclauseinfo(pathnode), + xfunc_cinfo_compare)); + for(tmplist = get_locclauseinfo(pathnode), selec = 1.0; + tmplist != LispNil; + tmplist = lnext(tmplist)) + { + cost += (Cost)(xfunc_local_expense(get_clause((CInfo)lfirst(tmplist))) + * (Cost)get_tuples(get_parent(pathnode)) * selec); + selec *= compute_clause_selec(queryInfo, + get_clause((CInfo)lfirst(tmplist)), + LispNil); + } + + /* + ** Now add in any node-specific expensive function costs. + ** Again, we must ensure that the clauses are sorted by rank. + */ + if (IsA(pathnode,JoinPath)) + { + if (XfuncMode != XFUNC_OFF) + set_pathclauseinfo((JoinPath)pathnode, lisp_qsort + (get_pathclauseinfo((JoinPath)pathnode), + xfunc_cinfo_compare)); + for(tmplist = get_pathclauseinfo((JoinPath)pathnode), selec = 1.0; + tmplist != LispNil; + tmplist = lnext(tmplist)) + { + cost += (Cost)(xfunc_local_expense(get_clause((CInfo)lfirst(tmplist))) + * (Cost)get_tuples(get_parent(pathnode)) * selec); + selec *= compute_clause_selec(queryInfo, + get_clause((CInfo)lfirst(tmplist)), + LispNil); + } + } + if (IsA(pathnode,HashPath)) + { + if (XfuncMode != XFUNC_OFF) + set_path_hashclauses + ((HashPath)pathnode, + lisp_qsort(get_path_hashclauses((HashPath)pathnode), + xfunc_clause_compare)); + for(tmplist = get_path_hashclauses((HashPath)pathnode), selec = 1.0; + tmplist != LispNil; + tmplist = lnext(tmplist)) + { + cost += (Cost)(xfunc_local_expense(lfirst(tmplist)) + * (Cost)get_tuples(get_parent(pathnode)) * selec); + selec *= compute_clause_selec(queryInfo, + lfirst(tmplist), LispNil); + } + } + if (IsA(pathnode,MergePath)) + { + if (XfuncMode != XFUNC_OFF) + set_path_mergeclauses + ((MergePath)pathnode, + lisp_qsort(get_path_mergeclauses((MergePath)pathnode), + xfunc_clause_compare)); + for(tmplist = get_path_mergeclauses((MergePath)pathnode), selec = 1.0; + tmplist != LispNil; + tmplist = lnext(tmplist)) + { + cost += (Cost)(xfunc_local_expense(lfirst(tmplist)) + * (Cost)get_tuples(get_parent(pathnode)) * selec); + selec *= compute_clause_selec(queryInfo, + lfirst(tmplist), LispNil); + } + } + Assert(cost >= 0); + return(cost); +} + +/* + ** Recalculate the cost of a path node. This includes the basic cost of the + ** node, as well as the cost of its expensive functions. + ** We need to do this to the parent after pulling a clause from a child into a + ** parent. Thus we should only be calling this function on JoinPaths. + */ +Cost xfunc_total_path_cost(JoinPath pathnode) +{ + Cost cost = xfunc_get_path_cost((Path)pathnode); + + Assert(IsA(pathnode,JoinPath)); + if (IsA(pathnode,MergePath)) + { + MergePath mrgnode = (MergePath)pathnode; + cost += cost_mergesort(get_path_cost((Path)get_outerjoinpath(mrgnode)), + get_path_cost((Path)get_innerjoinpath(mrgnode)), + get_outersortkeys(mrgnode), + get_innersortkeys(mrgnode), + get_tuples(get_parent((Path)get_outerjoinpath + (mrgnode))), + get_tuples(get_parent((Path)get_innerjoinpath + (mrgnode))), + get_width(get_parent((Path)get_outerjoinpath + (mrgnode))), + get_width(get_parent((Path)get_innerjoinpath + (mrgnode)))); + Assert(cost >= 0); + return(cost); + } + else if (IsA(pathnode,HashPath)) + { + HashPath hashnode = (HashPath)pathnode; + cost += cost_hashjoin(get_path_cost((Path)get_outerjoinpath(hashnode)), + get_path_cost((Path)get_innerjoinpath(hashnode)), + get_outerhashkeys(hashnode), + get_innerhashkeys(hashnode), + get_tuples(get_parent((Path)get_outerjoinpath + (hashnode))), + get_tuples(get_parent((Path)get_innerjoinpath + (hashnode))), + get_width(get_parent((Path)get_outerjoinpath + (hashnode))), + get_width(get_parent((Path)get_innerjoinpath + (hashnode)))); + Assert (cost >= 0); + return(cost); + } + else /* Nested Loop Join */ + { + cost += cost_nestloop(get_path_cost((Path)get_outerjoinpath(pathnode)), + get_path_cost((Path)get_innerjoinpath(pathnode)), + get_tuples(get_parent((Path)get_outerjoinpath + (pathnode))), + get_tuples(get_parent((Path)get_innerjoinpath + (pathnode))), + get_pages(get_parent((Path)get_outerjoinpath + (pathnode))), + IsA(get_innerjoinpath(pathnode),IndexPath)); + Assert(cost >= 0); + return(cost); + } +} + + +/* + ** xfunc_expense_per_tuple -- + ** return the expense of the join *per-tuple* of the input relation. + ** The cost model here is that a join costs + ** k*card(outer)*card(inner) + l*card(outer) + m*card(inner) + n + ** + ** We treat the l and m terms by considering them to be like restrictions + ** constrained to be right under the join. Thus the cost per inner and + ** cost per outer of the join is different, reflecting these virtual nodes. + ** + ** The cost per tuple of outer is k + l/referenced(inner). Cost per tuple + ** of inner is k + m/referenced(outer). + ** The constants k, l, m and n depend on the join method. Measures here are + ** based on the costs in costsize.c, with fudging for HashJoin and Sorts to + ** make it fit our model (the 'q' in HashJoin results in a + ** card(outer)/card(inner) term, and sorting results in a log term. + + */ +Cost xfunc_expense_per_tuple(JoinPath joinnode, int whichchild) +{ + Rel outerrel = get_parent((Path)get_outerjoinpath(joinnode)); + Rel innerrel = get_parent((Path)get_innerjoinpath(joinnode)); + Count outerwidth = get_width(outerrel); + Count outers_per_page = ceil(BLCKSZ/(outerwidth + sizeof(HeapTupleData))); + + if (IsA(joinnode,HashPath)) + { + if (whichchild == INNER) + return((1 + _CPU_PAGE_WEIGHT_)*outers_per_page/NBuffers); + else + return(((1 + _CPU_PAGE_WEIGHT_)*outers_per_page/NBuffers) + + _CPU_PAGE_WEIGHT_ + / xfunc_card_product(get_relids(innerrel))); + } + else if (IsA(joinnode,MergePath)) + { + /* assumes sort exists, and costs one (I/O + CPU) per tuple */ + if (whichchild == INNER) + return((2*_CPU_PAGE_WEIGHT_ + 1) + / xfunc_card_product(get_relids(outerrel))); + else + return((2*_CPU_PAGE_WEIGHT_ + 1) + / xfunc_card_product(get_relids(innerrel))); + } + else /* nestloop */ + { + Assert(IsA(joinnode,JoinPath)); + return(_CPU_PAGE_WEIGHT_); + } +} + +/* + ** xfunc_fixvars -- + ** After pulling up a clause, we must walk its expression tree, fixing Var + ** nodes to point to the correct varno (either INNER or OUTER, depending + ** on which child the clause was pulled from), and the right varattno in the + ** target list of the child's former relation. If the target list of the + ** child Rel does not contain the attribute we need, we add it. + */ +void xfunc_fixvars(LispValue clause, /* clause being pulled up */ + Rel rel, /* rel it's being pulled from */ + int varno) /* whether rel is INNER or OUTER of join */ +{ + LispValue tmpclause; /* temporary variable */ + TargetEntry *tle; /* tlist member corresponding to var */ + + + if (IsA(clause,Const) || IsA(clause,Param)) return; + else if (IsA(clause,Var)) + { + /* here's the meat */ + tle = tlistentry_member((Var)clause, get_targetlist(rel)); + if (tle == LispNil) + { + /* + ** The attribute we need is not in the target list, + ** so we have to add it. + ** + */ + add_tl_element(rel, (Var)clause); + tle = tlistentry_member((Var)clause, get_targetlist(rel)); + } + set_varno(((Var)clause), varno); + set_varattno(((Var)clause), get_resno(get_resdom(get_entry(tle)))); + } + else if (IsA(clause,Iter)) + xfunc_fixvars(get_iterexpr((Iter)clause), rel, varno); + else if (fast_is_clause(clause)) + { + xfunc_fixvars(lfirst(lnext(clause)), rel, varno); + xfunc_fixvars(lfirst(lnext(lnext(clause))), rel, varno); + } + else if (fast_is_funcclause(clause)) + for (tmpclause = lnext(clause); tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + xfunc_fixvars(lfirst(tmpclause), rel, varno); + else if (fast_not_clause(clause)) + xfunc_fixvars(lsecond(clause), rel, varno); + else if (fast_or_clause(clause)) + for (tmpclause = lnext(clause); tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + xfunc_fixvars(lfirst(tmpclause), rel, varno); + else + { + elog(WARN, "Clause node of undetermined type"); + } +} + + +/* + ** Comparison function for lisp_qsort() on a list of CInfo's. + ** arg1 and arg2 should really be of type (CInfo *). + */ +int xfunc_cinfo_compare(void *arg1, void *arg2) +{ + CInfo info1 = *(CInfo *) arg1; + CInfo info2 = *(CInfo *) arg2; + + LispValue clause1 = (LispValue) get_clause(info1), + clause2 = (LispValue) get_clause(info2); + + return(xfunc_clause_compare((void *) &clause1, (void *) &clause2)); +} + +/* + ** xfunc_clause_compare: comparison function for lisp_qsort() that compares two + ** clauses based on expense/(1 - selectivity) + ** arg1 and arg2 are really pointers to clauses. + */ +int xfunc_clause_compare(void *arg1, void *arg2) +{ + LispValue clause1 = *(LispValue *) arg1; + LispValue clause2 = *(LispValue *) arg2; + Cost rank1, /* total xfunc rank of clause1 */ + rank2; /* total xfunc rank of clause2 */ + + rank1 = xfunc_rank(clause1); + rank2 = xfunc_rank(clause2); + + if ( rank1 < rank2) + return(-1); + else if (rank1 == rank2) + return(0); + else return(1); +} + +/* + ** xfunc_disjunct_sort -- + ** given a list of clauses, for each clause sort the disjuncts by cost + ** (this assumes the predicates have been converted to Conjunctive NF) + ** Modifies the clause list! + */ +void xfunc_disjunct_sort(LispValue clause_list) +{ + LispValue temp; + + foreach(temp, clause_list) + if(or_clause(lfirst(temp))) + lnext(lfirst(temp)) = + lisp_qsort(lnext(lfirst(temp)), xfunc_disjunct_compare); +} + + +/* + ** xfunc_disjunct_compare: comparison function for qsort() that compares two + ** disjuncts based on cost/selec. + ** arg1 and arg2 are really pointers to disjuncts + */ +int xfunc_disjunct_compare(Query* queryInfo, void *arg1, void *arg2) +{ + LispValue disjunct1 = *(LispValue *) arg1; + LispValue disjunct2 = *(LispValue *) arg2; + Cost cost1, /* total cost of disjunct1 */ + cost2, /* total cost of disjunct2 */ + selec1, + selec2; + Cost rank1, rank2; + + cost1 = xfunc_expense(queryInfo, disjunct1); + cost2 = xfunc_expense(queryInfo, disjunct2); + selec1 = compute_clause_selec(queryInfo, + disjunct1, LispNil); + selec2 = compute_clause_selec(queryInfo, + disjunct2, LispNil); + + if (selec1 == 0) + rank1 = MAXFLOAT; + else if (cost1 == 0) + rank1 = 0; + else + rank1 = cost1/selec1; + + if (selec2 == 0) + rank2 = MAXFLOAT; + else if (cost2 == 0) + rank2 = 0; + else + rank2 = cost2/selec2; + + if ( rank1 < rank2) + return(-1); + else if (rank1 == rank2) + return(0); + else return(1); +} + +/* ------------------------ UTILITY FUNCTIONS ------------------------------- */ +/* + ** xfunc_func_width -- + ** Given a function OID and operands, find the width of the return value. + */ +int xfunc_func_width(RegProcedure funcid, LispValue args) +{ + Relation rd; /* Relation Descriptor */ + HeapTuple tupl; /* structure to hold a cached tuple */ + Form_pg_proc proc; /* structure to hold the pg_proc tuple */ + TypeTupleForm type; /* structure to hold the pg_type tuple */ + LispValue tmpclause; + int retval; + + /* lookup function and find its return type */ + Assert(RegProcedureIsValid(funcid)); + tupl = SearchSysCacheTuple(PROOID, ObjectIdGetDatum(funcid), 0,0,0); + if (!HeapTupleIsValid(tupl)) + elog(WARN, "Cache lookup failed for procedure %d", funcid); + proc = (Form_pg_proc) GETSTRUCT(tupl); + + /* if function returns a tuple, get the width of that */ + if (typeid_get_relid(proc->prorettype)) + { + rd = heap_open(typeid_get_relid(proc->prorettype)); + retval = xfunc_tuple_width(rd); + heap_close(rd); + goto exit; + } + else /* function returns a base type */ + { + tupl = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(proc->prorettype), + 0,0,0); + if (!HeapTupleIsValid(tupl)) + elog(WARN, "Cache lookup failed for type %d", proc->prorettype); + type = (TypeTupleForm) GETSTRUCT(tupl); + /* if the type length is known, return that */ + if (type->typlen != -1) + { + retval = type->typlen; + goto exit; + } + else /* estimate the return size */ + { + /* find width of the function's arguments */ + for (tmpclause = args; tmpclause != LispNil; + tmpclause = lnext(tmpclause)) + retval += xfunc_width(lfirst(tmpclause)); + /* multiply by outin_ratio */ + retval = (int)(proc->prooutin_ratio/100.0 * retval); + goto exit; + } + } + exit: + return(retval); +} + +/* + ** xfunc_tuple_width -- + ** Return the sum of the lengths of all the attributes of a given relation + */ +int xfunc_tuple_width(Relation rd) +{ + int i; + int retval = 0; + TupleDesc tdesc = RelationGetTupleDescriptor(rd); + + for (i = 0; i < tdesc->natts; i++) + { + if (tdesc->attrs[i]->attlen != -1) + retval += tdesc->attrs[i]->attlen; + else retval += VARLEN_DEFAULT; + } + + return(retval); +} + +/* + ** xfunc_num_join_clauses -- + ** Find the number of join clauses associated with this join path + */ +int xfunc_num_join_clauses(JoinPath path) +{ + int num = length(get_pathclauseinfo(path)); + if (IsA(path,MergePath)) + return(num + length(get_path_mergeclauses((MergePath)path))); + else if (IsA(path,HashPath)) + return(num + length(get_path_hashclauses((HashPath)path))); + else return(num); +} + +/* + ** xfunc_LispRemove -- + ** Just like LispRemove, but it whines if the item to be removed ain't there + */ +LispValue xfunc_LispRemove(LispValue foo, List bar) +{ + LispValue temp = LispNil; + LispValue result = LispNil; + int sanity = false; + + for (temp = bar; !null(temp); temp = lnext(temp)) + if (! equal((Node)(foo),(Node)(lfirst(temp))) ) + { + result = lappend(result,lfirst(temp)); + } + else sanity = true; /* found a matching item to remove! */ + + if (!sanity) + elog(WARN, "xfunc_LispRemove: didn't find a match!"); + + return(result); +} + +#define Node_Copy(a, b, c, d) \ + if (NodeCopy((Node)((a)->d), (Node*)&((b)->d), c) != true) { \ + return false; \ + } + +/* + ** xfunc_copyrel -- + ** Just like _copyRel, but doesn't copy the paths + */ +bool xfunc_copyrel(Rel from, Rel *to) +{ + Rel newnode; + Pointer (*alloc)() = palloc; + + /* COPY_CHECKARGS() */ + if (to == NULL) + { + return false; + } + + /* COPY_CHECKNULL() */ + if (from == NULL) + { + (*to) = NULL; + return true; + } + + /* COPY_NEW(c) */ + newnode = (Rel)(*alloc)(classSize(Rel)); + if (newnode == NULL) + { + return false; + } + + /* ---------------- + * copy node superclass fields + * ---------------- + */ + CopyNodeFields((Node)from, (Node)newnode, alloc); + + /* ---------------- + * copy remainder of node + * ---------------- + */ + Node_Copy(from, newnode, alloc, relids); + + newnode->indexed = from->indexed; + newnode->pages = from->pages; + newnode->tuples = from->tuples; + newnode->size = from->size; + newnode->width = from->width; + + Node_Copy(from, newnode, alloc, targetlist); + /* No!!!! Node_Copy(from, newnode, alloc, pathlist); + Node_Copy(from, newnode, alloc, unorderedpath); + Node_Copy(from, newnode, alloc, cheapestpath); */ +#if 0 /* can't use Node_copy now. 2/95 -ay */ + Node_Copy(from, newnode, alloc, classlist); + Node_Copy(from, newnode, alloc, indexkeys); + Node_Copy(from, newnode, alloc, ordering); +#endif + Node_Copy(from, newnode, alloc, clauseinfo); + Node_Copy(from, newnode, alloc, joininfo); + Node_Copy(from, newnode, alloc, innerjoin); + Node_Copy(from, newnode, alloc, superrels); + + (*to) = newnode; + return true; +} diff --git a/src/backend/optimizer/pathnode.h b/src/backend/optimizer/pathnode.h new file mode 100644 index 00000000000..0617600d4eb --- /dev/null +++ b/src/backend/optimizer/pathnode.h @@ -0,0 +1,50 @@ +/*------------------------------------------------------------------------- + * + * pathnode.h-- + * prototypes for pathnode.c, indexnode.c, relnode.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: pathnode.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PATHNODE_H +#define PATHNODE_H + +/* + * prototypes for pathnode.c + */ +extern bool path_is_cheaper(Path *path1, Path *path2); +extern Path *set_cheapest(Rel *parent_rel, List *pathlist); +extern List *add_pathlist(Rel *parent_rel, List *unique_paths, + List *new_paths); +extern Path *create_seqscan_path(Rel *rel); +extern IndexPath *create_index_path(Query *root, Rel *rel, Rel *index, + List *restriction_clauses, bool is_join_scan); +extern JoinPath *create_nestloop_path(Rel *joinrel, Rel *outer_rel, + Path *outer_path, Path *inner_path, List *keys); +extern MergePath *create_mergesort_path(Rel *joinrel, int outersize, + int innersize, int outerwidth, int innerwidth, Path *outer_path, + Path *inner_path, List *keys, MergeOrder *order, + List *mergeclauses, List *outersortkeys, List *innersortkeys); + +extern HashPath *create_hashjoin_path(Rel *joinrel, int outersize, + int innersize, int outerwidth, int innerwidth, Path *outer_path, + Path *inner_path, List *keys, Oid operator, List *hashclauses, + List *outerkeys, List *innerkeys); + +/* + * prototypes for rel.c + */ +extern Rel *rel_member(List *relid, List *rels); +extern Rel *get_base_rel(Query* root, int relid); +extern Rel *get_join_rel(Query* root, List *relid); + +/* + * prototypes for indexnode.h + */ +extern List *find_relation_indices(Query *root,Rel *rel); + +#endif /* PATHNODE_H */ diff --git a/src/backend/optimizer/paths.h b/src/backend/optimizer/paths.h new file mode 100644 index 00000000000..62468041cfd --- /dev/null +++ b/src/backend/optimizer/paths.h @@ -0,0 +1,89 @@ +/*------------------------------------------------------------------------- + * + * paths.h-- + * prototypes for various files in optimizer/paths (were separate + * header files + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: paths.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PATHS_H +#define PATHS_H + +/* + * allpaths.h + */ +extern List *find_paths(Query *root, List *rels); + +/* + * indxpath.h + * routines to generate index paths + */ +extern List *find_index_paths(Query *root, Rel *rel, List *indices, + List *clauseinfo_list, + List *joininfo_list); + +/* + * joinpath.h + * routines to create join paths + */ +extern void find_all_join_paths(Query *root, List *joinrels); + + +/* + * orindxpath.h + */ +extern List *create_or_index_paths(Query *root, Rel *rel, List *clauses); + +/* + * hashutils.h + * routines to deal with hash keys and clauses + */ +extern List *group_clauses_by_hashop(List *clauseinfo_list, + int inner_relid); + +/* + * joinutils.h + * generic join method key/clause routines + */ +extern List *match_pathkeys_joinkeys(List *pathkeys, + List *joinkeys, List *joinclauses, int which_subkey, + List **matchedJoinClausesPtr); +extern List *extract_path_keys(List *joinkeys, List *tlist, + int which_subkey); +extern Path *match_paths_joinkeys(List *joinkeys, PathOrder *ordering, + List *paths, int which_subkey); +extern List *new_join_pathkeys(List *outer_pathkeys, + List *join_rel_tlist, List *joinclauses); + +/* + * mergeutils.h + * routines to deal with merge keys and clauses + */ +extern List *group_clauses_by_order(List *clauseinfo_list, + int inner_relid); +extern MInfo *match_order_mergeinfo(PathOrder *ordering, + List *mergeinfo_list); + +/* + * joinrels.h + * routines to determine which relations to join + */ +extern List *find_join_rels(Query *root, List *outer_rels); +extern void add_new_joininfos(Query *root, List *joinrels, List *outerrels); +extern List *final_join_rels(List *join_rel_list); + +/* + * prototypes for path/prune.c + */ +extern List *prune_joinrels(List *rel_list); +extern void prune_rel_paths(List *rel_list); +extern Path *prune_rel_path(Rel *rel, Path *unorderedpath); +extern List *merge_joinrels(List *rel_list1, List *rel_list2); +extern List *prune_oldrels(List *old_rels); + +#endif /* PATHS_H */ diff --git a/src/backend/optimizer/plan/Makefile.inc b/src/backend/optimizer/plan/Makefile.inc new file mode 100644 index 00000000000..eccd412e9ff --- /dev/null +++ b/src/backend/optimizer/plan/Makefile.inc @@ -0,0 +1,15 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for optimizer/plan +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/optimizer/plan/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= createplan.c initsplan.c planmain.c planner.c \ + setrefs.c diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c new file mode 100644 index 00000000000..70a5e6461bd --- /dev/null +++ b/src/backend/optimizer/plan/createplan.c @@ -0,0 +1,1097 @@ +/*------------------------------------------------------------------------- + * + * createplan.c-- + * Routines to create the desired plan for processing a query + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/createplan.c,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "c.h" + +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" +#include "nodes/nodeFuncs.h" + +#include "nodes/makefuncs.h" + +#include "utils/elog.h" +#include "utils/lsyscache.h" +#include "utils/palloc.h" +#include "utils/builtins.h" + +#include "parser/parse_query.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/clauses.h" +#include "optimizer/planmain.h" +#include "optimizer/tlist.h" +#include "optimizer/planner.h" +#include "optimizer/xfunc.h" +#include "optimizer/internal.h" + + +#define TEMP_SORT 1 +#define TEMP_MATERIAL 2 + +static List *switch_outer(List *clauses); +static Scan *create_scan_node(Path *best_path, List *tlist); +static Join *create_join_node(JoinPath *best_path, List *tlist); +static SeqScan *create_seqscan_node(Path *best_path, List *tlist, + List *scan_clauses); +static IndexScan *create_indexscan_node(IndexPath *best_path, List *tlist, + List *scan_clauses); +static NestLoop *create_nestloop_node(JoinPath *best_path, List *tlist, + List *clauses, Plan *outer_node, List *outer_tlist, + Plan *inner_node, List *inner_tlist); +static MergeJoin *create_mergejoin_node(MergePath *best_path, List *tlist, + List *clauses, Plan *outer_node, List *outer_tlist, + Plan *inner_node, List *inner_tlist); +static HashJoin *create_hashjoin_node(HashPath *best_path, List *tlist, + List *clauses, Plan *outer_node, List *outer_tlist, + Plan *inner_node, List *inner_tlist); +static Node *fix_indxqual_references(Node *clause, Path *index_path); +static Temp *make_temp(List *tlist, List *keys, Oid *operators, + Plan *plan_node, int temptype); +static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid, + List *indxid, List *indxqual); +static NestLoop *make_nestloop(List *qptlist, List *qpqual, Plan *lefttree, + Plan *righttree); +static HashJoin *make_hashjoin(List *tlist, List *qpqual, + List *hashclauses, Plan *lefttree, Plan *righttree); +static Hash *make_hash(List *tlist, Var *hashkey, Plan *lefttree); +static MergeJoin *make_mergesort(List * tlist, List *qpqual, + List *mergeclauses, Oid opcode, Oid *rightorder, + Oid *leftorder, Plan *righttree, Plan *lefttree); +static Material *make_material(List *tlist, Oid tempid, Plan *lefttree, + int keycount); + +/* + * create_plan-- + * Creates the access plan for a query by tracing backwards through the + * desired chain of pathnodes, starting at the node 'best-path'. For + * every pathnode found: + * (1) Create a corresponding plan node containing appropriate id, + * target list, and qualification information. + * (2) Modify ALL clauses so that attributes are referenced using + * relative values. + * (3) Target lists are not modified, but will be in another routine. + * + * best-path is the best access path + * + * Returns the optimal(?) access plan. + */ +Plan * +create_plan(Path *best_path) +{ + List *tlist; + Plan *plan_node = (Plan*)NULL; + Rel *parent_rel; + int size; + int width; + int pages; + int tuples; + + parent_rel = best_path->parent; + tlist = get_actual_tlist(parent_rel->targetlist); + size = parent_rel->size; + width = parent_rel->width; + pages = parent_rel->pages; + tuples = parent_rel->tuples; + + switch(best_path->pathtype) { + case T_IndexScan : + case T_SeqScan : + plan_node = (Plan*)create_scan_node(best_path, tlist); + break; + case T_HashJoin : + case T_MergeJoin : + case T_NestLoop: + plan_node = (Plan*)create_join_node((JoinPath*)best_path, tlist); + break; + default: + /* do nothing */ + break; + } + + plan_node->plan_size = size; + plan_node->plan_width = width; + if (pages == 0) pages = 1; + plan_node->plan_tupperpage = tuples/pages; + +#if 0 /* fix xfunc */ + /* sort clauses by cost/(1-selectivity) -- JMH 2/26/92 */ + if (XfuncMode != XFUNC_OFF) + { + set_qpqual((Plan) plan_node, + lisp_qsort( get_qpqual((Plan) plan_node), + xfunc_clause_compare)); + if (XfuncMode != XFUNC_NOR) + /* sort the disjuncts within each clause by cost -- JMH 3/4/92 */ + xfunc_disjunct_sort(plan_node->qpqual); + } +#endif + + return(plan_node); +} + +/* + * create_scan_node-- + * Create a scan path for the parent relation of 'best-path'. + * + * tlist is the targetlist for the base relation scanned by 'best-path' + * + * Returns the scan node. + */ +static Scan * +create_scan_node(Path *best_path, List *tlist) +{ + + Scan *node; + List *scan_clauses; + + /* + * Extract the relevant clauses from the parent relation and replace the + * operator OIDs with the corresponding regproc ids. + * + * now that local predicate clauses are copied into paths in + * find_rel_paths() and then (possibly) pulled up in xfunc_trypullup(), + * we get the relevant clauses from the path itself, not its parent + * relation. --- JMH, 6/15/92 + */ + scan_clauses = fix_opids(get_actual_clauses(best_path->locclauseinfo)); + + switch(best_path->pathtype) { + case T_SeqScan : + node = (Scan*)create_seqscan_node(best_path, tlist, scan_clauses); + break; + + case T_IndexScan: + node = (Scan*)create_indexscan_node((IndexPath*)best_path, + tlist, + scan_clauses); + break; + + default : + elog(WARN, "create_scan_node: unknown node type", + best_path->pathtype); + break; + } + + return node; +} + +/* + * create_join_node -- + * Create a join path for 'best-path' and(recursively) paths for its + * inner and outer paths. + * + * 'tlist' is the targetlist for the join relation corresponding to + * 'best-path' + * + * Returns the join node. + */ +static Join * +create_join_node(JoinPath *best_path, List *tlist) +{ + Plan *outer_node; + List *outer_tlist; + Plan *inner_node; + List *inner_tlist; + List *clauses; + Join *retval; + + outer_node = create_plan((Path*)best_path->outerjoinpath); + outer_tlist = outer_node->targetlist; + + inner_node = create_plan((Path*)best_path->innerjoinpath); + inner_tlist = inner_node->targetlist; + + clauses = get_actual_clauses(best_path->pathclauseinfo); + + switch(best_path->path.pathtype) { + case T_MergeJoin: + retval = (Join*)create_mergejoin_node((MergePath*)best_path, + tlist, + clauses, + outer_node, + outer_tlist, + inner_node, + inner_tlist); + break; + case T_HashJoin: + retval = (Join*)create_hashjoin_node((HashPath*)best_path, + tlist, + clauses, + outer_node, + outer_tlist, + inner_node, + inner_tlist); + break; + case T_NestLoop: + retval = (Join*)create_nestloop_node((JoinPath*)best_path, + tlist, + clauses, + outer_node, + outer_tlist, + inner_node, + inner_tlist); + break; + default: + /* do nothing */ + elog(WARN, "create_join_node: unknown node type", + best_path->path.pathtype); + } + +#if 0 + /* + ** Expensive function pullups may have pulled local predicates + ** into this path node. Put them in the qpqual of the plan node. + ** -- JMH, 6/15/92 + */ + if (get_locclauseinfo(best_path) != NIL) + set_qpqual((Plan)retval, + nconc(get_qpqual((Plan) retval), + fix_opids(get_actual_clauses + (get_locclauseinfo(best_path))))); +#endif + + return(retval); +} + +/***************************************************************************** + * + * BASE-RELATION SCAN METHODS + * + *****************************************************************************/ + + +/* + * create_seqscan_node-- + * Returns a seqscan node for the base relation scanned by 'best-path' + * with restriction clauses 'scan-clauses' and targetlist 'tlist'. + */ +static SeqScan * +create_seqscan_node(Path *best_path, List *tlist, List *scan_clauses) +{ + SeqScan *scan_node = (SeqScan*)NULL; + Index scan_relid = -1; + List *temp; + + temp = best_path->parent->relids; + if(temp == NULL) + elog(WARN,"scanrelid is empty"); + else + scan_relid = (Index)lfirst(temp); /* ??? who takes care of lnext? - ay */ + scan_node = make_seqscan(tlist, + scan_clauses, + scan_relid, + (Plan*)NULL); + + scan_node->plan.cost = best_path->path_cost; + + return(scan_node); +} + +/* + * create_indexscan_node-- + * Returns a indexscan node for the base relation scanned by 'best-path' + * with restriction clauses 'scan-clauses' and targetlist 'tlist'. + */ +static IndexScan * +create_indexscan_node(IndexPath *best_path, + List *tlist, + List *scan_clauses) +{ + /* + * Extract the(first if conjunct, only if disjunct) clause from the + * clauseinfo list. + */ + Expr *index_clause = (Expr*)NULL; + List *indxqual = NIL; + List *qpqual = NIL; + List *fixed_indxqual = NIL; + IndexScan *scan_node = (IndexScan*)NULL; + + + /* + * If an 'or' clause is to be used with this index, the indxqual + * field will contain a list of the 'or' clause arguments, e.g., the + * clause(OR a b c) will generate: ((a) (b) (c)). Otherwise, the + * indxqual will simply contain one conjunctive qualification: ((a)). + */ + if (best_path->indexqual != NULL) + /* added call to fix_opids, JMH 6/23/92 */ + index_clause = (Expr*) + lfirst(fix_opids(get_actual_clauses(best_path->indexqual))); + + if (or_clause((Node*)index_clause)) { + List *temp = NIL; + + foreach(temp, index_clause->args) + indxqual = lappend(indxqual, lcons(lfirst(temp), NIL)); + } else { + indxqual = lcons(get_actual_clauses(best_path->indexqual), + NIL); + } + + /* + * The qpqual field contains all restrictions except the indxqual. + */ + if(or_clause((Node*)index_clause)) + qpqual = set_difference(scan_clauses, + lcons(index_clause,NIL)); + else + qpqual = set_difference(scan_clauses, lfirst(indxqual)); + + fixed_indxqual = + (List*)fix_indxqual_references((Node*)indxqual,(Path*)best_path); + + scan_node = + make_indexscan(tlist, + qpqual, + lfirsti(best_path->path.parent->relids), + best_path->indexid, + fixed_indxqual); + + scan_node->scan.plan.cost = best_path->path.path_cost; + + return(scan_node); +} + +/***************************************************************************** + * + * JOIN METHODS + * + *****************************************************************************/ + +static NestLoop * +create_nestloop_node(JoinPath *best_path, + List *tlist, + List *clauses, + Plan *outer_node, + List *outer_tlist, + Plan *inner_node, + List *inner_tlist) +{ + NestLoop *join_node = (NestLoop*)NULL; + + if (IsA(inner_node,IndexScan)) { + /* An index is being used to reduce the number of tuples scanned in + * the inner relation. + * There will never be more than one index used in the inner + * scan path, so we need only consider the first set of + * qualifications in indxqual. + */ + + List *inner_indxqual = lfirst(((IndexScan*)inner_node)->indxqual); + List *inner_qual = (inner_indxqual == NULL)? NULL:lfirst(inner_indxqual); + + /* If we have in fact found a join index qualification, remove these + * index clauses from the nestloop's join clauses and reset the + * inner(index) scan's qualification so that the var nodes refer to + * the proper outer join relation attributes. + */ + if (!(qual_clause_p((Node*)inner_qual))) { + List *new_inner_qual = NIL; + + clauses = set_difference(clauses,inner_indxqual); + new_inner_qual = + index_outerjoin_references(inner_indxqual, + outer_node->targetlist, + ((Scan*)inner_node)->scanrelid); + ((IndexScan*)inner_node)->indxqual = + lcons(new_inner_qual,NIL); + } + }else if (IsA_Join(inner_node)) { + inner_node = (Plan*)make_temp(inner_tlist, + NIL, + NULL, + inner_node, + TEMP_MATERIAL); + } + + join_node = make_nestloop(tlist, + join_references(clauses, + outer_tlist, + inner_tlist), + outer_node, + inner_node); + + join_node->join.cost = best_path->path.path_cost; + + return(join_node); +} + +static MergeJoin * +create_mergejoin_node(MergePath *best_path, + List *tlist, + List *clauses, + Plan *outer_node, + List *outer_tlist, + Plan *inner_node, + List *inner_tlist) +{ + List *qpqual, *mergeclauses; + RegProcedure opcode; + Oid *outer_order, *inner_order; + MergeJoin *join_node; + + + /* Separate the mergeclauses from the other join qualification + * clauses and set those clauses to contain references to lower + * attributes. + */ + qpqual = join_references(set_difference(clauses, + best_path->path_mergeclauses), + outer_tlist, + inner_tlist); + + /* Now set the references in the mergeclauses and rearrange them so + * that the outer variable is always on the left. + */ + mergeclauses = switch_outer(join_references(best_path->path_mergeclauses, + outer_tlist, + inner_tlist)); + + opcode = + get_opcode((best_path->jpath.path.p_ordering.ord.merge)->join_operator); + + outer_order = (Oid *)palloc(sizeof(Oid)*2); + outer_order[0] = + (best_path->jpath.path.p_ordering.ord.merge)->left_operator; + outer_order[1] = 0; + + inner_order = (Oid *)palloc(sizeof(Oid)*2); + inner_order[0] = + (best_path->jpath.path.p_ordering.ord.merge)->right_operator; + inner_order[1] = 0; + + /* Create explicit sort paths for the outer and inner join paths if + * necessary. The sort cost was already accounted for in the path. + */ + if (best_path->outersortkeys) { + Temp *sorted_outer_node = make_temp(outer_tlist, + best_path->outersortkeys, + outer_order, + outer_node, + TEMP_SORT); + sorted_outer_node->plan.cost = outer_node->cost; + outer_node = (Plan*)sorted_outer_node; + } + + if (best_path->innersortkeys) { + Temp *sorted_inner_node = make_temp(inner_tlist, + best_path->innersortkeys, + inner_order, + inner_node, + TEMP_SORT); + sorted_inner_node->plan.cost = outer_node->cost; + inner_node = (Plan*)sorted_inner_node; + } + + join_node = make_mergesort(tlist, + qpqual, + mergeclauses, + opcode, + inner_order, + outer_order, + inner_node, + outer_node); + + join_node->join.cost = best_path->jpath.path.path_cost; + + return(join_node); +} + +/* + * create_hashjoin_node-- XXX HASH + * + * Returns a new hashjoin node. + * + * XXX hash join ops are totally bogus -- how the hell do we choose + * these?? at runtime? what about a hash index? + */ +static HashJoin * +create_hashjoin_node(HashPath *best_path, + List *tlist, + List *clauses, + Plan *outer_node, + List *outer_tlist, + Plan *inner_node, + List *inner_tlist) +{ + List *qpqual; + List *hashclauses; + HashJoin *join_node; + Hash *hash_node; + Var *innerhashkey; + + /* Separate the hashclauses from the other join qualification clauses + * and set those clauses to contain references to lower attributes. + */ + qpqual = + join_references(set_difference(clauses, + best_path->path_hashclauses), + outer_tlist, + inner_tlist); + + /* Now set the references in the hashclauses and rearrange them so + * that the outer variable is always on the left. + */ + hashclauses = + switch_outer(join_references(best_path->path_hashclauses, + outer_tlist, + inner_tlist)); + + innerhashkey = get_rightop(lfirst(hashclauses)); + + hash_node = make_hash(inner_tlist, innerhashkey, inner_node); + join_node = make_hashjoin(tlist, + qpqual, + hashclauses, + outer_node, + (Plan*)hash_node); + join_node->join.cost = best_path->jpath.path.path_cost; + + return(join_node); +} + + +/***************************************************************************** + * + * SUPPORTING ROUTINES + * + *****************************************************************************/ + +static Node * +fix_indxqual_references(Node *clause, Path *index_path) +{ + Node *newclause; + + if (IsA(clause,Var)) { + if (lfirsti(index_path->parent->relids) == ((Var*)clause)->varno) { + int pos = 0; + int varatt = ((Var*)clause)->varattno; + int *indexkeys = index_path->parent->indexkeys; + + if (indexkeys) { + while (indexkeys[pos] != 0) { + if(varatt == indexkeys[pos]) { + break; + } + pos++; + } + } + newclause = copyObject((Node*)clause); + ((Var*)newclause)->varattno = pos + 1; + return (newclause); + } else { + return (clause); + } + } else if(IsA(clause,Const)) { + return(clause); + } else if(is_opclause(clause) && + is_funcclause((Node*)get_leftop((Expr*)clause)) && + ((Func*)((Expr*)get_leftop((Expr*)clause))->oper)->funcisindex){ + Var *newvar = + makeVar((Index)lfirst(index_path->parent->relids), + 1, /* func indices have one key */ + ((Func*)((Expr*)clause)->oper)->functype, + (Index)lfirst(index_path->parent->relids), + 0); + + return + ((Node*)make_opclause((Oper*)((Expr*)clause)->oper, + newvar, + get_rightop((Expr*)clause))); + + } else if (IsA(clause,Expr)) { + Expr *expr = (Expr*)clause; + List *new_subclauses = NIL; + Node *subclause = NULL; + List *i = NIL; + + foreach(i, expr->args) { + subclause = lfirst(i); + if(subclause) + new_subclauses = + lappend(new_subclauses, + fix_indxqual_references(subclause, + index_path)); + + } + + /* XXX new_subclauses should be a list of the form: + * ( (var var) (var const) ...) ? + */ + if(new_subclauses) { + return (Node*) + make_clause(expr->opType, expr->oper, new_subclauses); + } else { + return(clause); + } + } else { + List *oldclauses = (List*)clause; + List *new_subclauses = NIL; + Node *subclause = NULL; + List *i = NIL; + + foreach(i, oldclauses) { + subclause = lfirst(i); + if(subclause) + new_subclauses = + lappend(new_subclauses, + fix_indxqual_references(subclause, + index_path)); + + } + + /* XXX new_subclauses should be a list of the form: + * ( (var var) (var const) ...) ? + */ + if(new_subclauses) { + return (Node*)new_subclauses; + } else { + return (clause); + } + } +} + + +/* + * switch_outer-- + * Given a list of merge clauses, rearranges the elements within the + * clauses so the outer join variable is on the left and the inner is on + * the right. + * + * Returns the rearranged list ? + * + * XXX Shouldn't the operator be commuted?! + */ +static List * +switch_outer(List *clauses) +{ + List *t_list = NIL; + Expr *temp = NULL; + List *i = NIL; + Expr *clause; + + foreach(i,clauses) { + clause = lfirst(i); + if(var_is_outer(get_rightop(clause))) { + temp = make_clause(clause->opType, clause->oper, + lcons(get_rightop(clause), + lcons(get_leftop(clause), + NIL))); + t_list = lappend(t_list,temp); + } + else + t_list = lappend(t_list,clause); + } + return(t_list); +} + +/* + * set-temp-tlist-operators-- + * Sets the key and keyop fields of resdom nodes in a target list. + * + * 'tlist' is the target list + * 'pathkeys' is a list of N keys in the form((key1) (key2)...(keyn)), + * corresponding to vars in the target list that are to + * be sorted or hashed + * 'operators' is the corresponding list of N sort or hash operators + * 'keyno' is the first key number + * XXX - keyno ? doesn't exist - jeff + * + * Returns the modified target list. + */ +static List * +set_temp_tlist_operators(List *tlist, List *pathkeys, Oid *operators) +{ + Node *keys = NULL; + int keyno = 1; + Resdom *resdom = (Resdom*)NULL ; + List *i = NIL; + + foreach(i, pathkeys) { + keys = lfirst((List*)lfirst(i)); + resdom = tlist_member((Var*)keys, tlist); + if (resdom) { + + /* Order the resdom keys and replace the operator OID for each + * key with the regproc OID. + * + * XXX Note that the optimizer only generates merge joins + * with 1 operator (see create_mergejoin_node) - ay 2/95 + */ + resdom->reskey = keyno; + resdom->reskeyop = get_opcode(operators[0]); + } + keyno += 1; + } + return(tlist); +} + +/***************************************************************************** + * + * + *****************************************************************************/ + +/* + * make_temp-- + * Create plan nodes to sort or materialize relations into temporaries. The + * result returned for a sort will look like (SEQSCAN(SORT(plan-node))) + * or (SEQSCAN(MATERIAL(plan-node))) + * + * 'tlist' is the target list of the scan to be sorted or hashed + * 'keys' is the list of keys which the sort or hash will be done on + * 'operators' is the operators with which the sort or hash is to be done + * (a list of operator OIDs) + * 'plan-node' is the node which yields tuples for the sort + * 'temptype' indicates which operation(sort or hash) to perform + */ +static Temp * +make_temp(List *tlist, + List *keys, + Oid *operators, + Plan *plan_node, + int temptype) +{ + List *temp_tlist; + Temp *retval; + + /* Create a new target list for the temporary, with keys set. */ + temp_tlist = set_temp_tlist_operators(new_unsorted_tlist(tlist), + keys, + operators); + switch(temptype) { + case TEMP_SORT : + retval = (Temp*)make_seqscan(tlist, + NIL, + _TEMP_RELATION_ID_, + (Plan*)make_sort(temp_tlist, + _TEMP_RELATION_ID_, + plan_node, + length(keys))); + break; + + case TEMP_MATERIAL : + retval = (Temp*)make_seqscan(tlist, + NIL, + _TEMP_RELATION_ID_, + (Plan*)make_material(temp_tlist, + _TEMP_RELATION_ID_, + plan_node, + length(keys))); + break; + + default: + elog(WARN,"make_temp: unknown temp type %d", temptype); + + } + return(retval); +} + + +SeqScan * +make_seqscan(List *qptlist, + List *qpqual, + Index scanrelid, + Plan *lefttree) +{ + SeqScan *node = makeNode(SeqScan); + Plan *plan = &node->plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = qptlist; + plan->qual = qpqual; + plan->lefttree = lefttree; + plan->righttree = NULL; + node->scanrelid = scanrelid; + node->scanstate = (CommonScanState *)NULL; + + return(node); +} + +static IndexScan * +make_indexscan(List *qptlist, + List *qpqual, + Index scanrelid, + List *indxid, + List *indxqual) +{ + IndexScan *node = makeNode(IndexScan); + Plan *plan = &node->scan.plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = qptlist; + plan->qual = qpqual; + plan->lefttree = NULL; + plan->righttree = NULL; + node->scan.scanrelid = scanrelid; + node->indxid = indxid; + node->indxqual = indxqual; + node->scan.scanstate = (CommonScanState *)NULL; + + return(node); +} + + +static NestLoop * +make_nestloop(List *qptlist, + List *qpqual, + Plan *lefttree, + Plan *righttree) +{ + NestLoop *node = makeNode(NestLoop); + Plan *plan = &node->join; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = qptlist; + plan->qual = qpqual; + plan->lefttree = lefttree; + plan->righttree = righttree; + node->nlstate = (NestLoopState*)NULL; + + return(node); +} + +static HashJoin * +make_hashjoin(List *tlist, + List *qpqual, + List *hashclauses, + Plan *lefttree, + Plan *righttree) +{ + HashJoin *node = makeNode(HashJoin); + Plan *plan = &node->join; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = qpqual; + plan->lefttree = lefttree; + plan->righttree = righttree; + node->hashclauses = hashclauses; + node->hashjointable = NULL; + node->hashjointablekey = 0; + node->hashjointablesize = 0; + node->hashdone = false; + + return(node); +} + +static Hash * +make_hash(List *tlist, Var *hashkey, Plan *lefttree) +{ + Hash *node = makeNode(Hash); + Plan *plan = &node->plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = NULL; + plan->lefttree = lefttree; + plan->righttree = NULL; + node->hashkey = hashkey; + node->hashtable = NULL; + node->hashtablekey = 0; + node->hashtablesize = 0; + + return(node); +} + +static MergeJoin * +make_mergesort(List *tlist, + List *qpqual, + List *mergeclauses, + Oid opcode, + Oid *rightorder, + Oid *leftorder, + Plan *righttree, + Plan *lefttree) +{ + MergeJoin *node = makeNode(MergeJoin); + Plan *plan = &node->join; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = qpqual; + plan->lefttree = lefttree; + plan->righttree = righttree; + node->mergeclauses = mergeclauses; + node->mergesortop = opcode; + node->mergerightorder = rightorder; + node->mergeleftorder = leftorder; + + return(node); +} + +Sort * +make_sort(List *tlist, Oid tempid, Plan *lefttree, int keycount) +{ + Sort *node = makeNode(Sort); + Plan *plan = &node->plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = NIL; + plan->lefttree = lefttree; + plan->righttree = NULL; + node->tempid = tempid; + node->keycount = keycount; + + return(node); +} + +static Material * +make_material(List *tlist, + Oid tempid, + Plan *lefttree, + int keycount) +{ + Material *node = makeNode(Material); + Plan *plan = &node->plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = NIL; + plan->lefttree = lefttree; + plan->righttree = NULL; + node->tempid = tempid; + node->keycount = keycount; + + return(node); +} + +Agg * +make_agg(List *tlist, int nagg, Aggreg **aggs) +{ + Agg *node = makeNode(Agg); + + node->plan.cost = 0.0; + node->plan.state = (EState*)NULL; + node->plan.qual = NULL; + node->plan.targetlist = tlist; + node->plan.lefttree = (Plan*)NULL; + node->plan.righttree = (Plan*)NULL; + node->numAgg = nagg; + node->aggs = aggs; + + return(node); +} + +Group * +make_group(List *tlist, + bool tuplePerGroup, + int ngrp, + AttrNumber *grpColIdx, + Sort *lefttree) +{ + Group *node = makeNode(Group); + + node->plan.cost = 0.0; + node->plan.state = (EState*)NULL; + node->plan.qual = NULL; + node->plan.targetlist = tlist; + node->plan.lefttree = (Plan*)lefttree; + node->plan.righttree = (Plan*)NULL; + node->tuplePerGroup = tuplePerGroup; + node->numCols = ngrp; + node->grpColIdx = grpColIdx; + + return(node); +} + +/* + * A unique node always has a SORT node in the lefttree. + * + * the uniqueAttr argument must be a null-terminated string, + * either the name of the attribute to select unique on + * or "*" + */ + +Unique * +make_unique(List *tlist, Plan *lefttree, char* uniqueAttr) +{ + Unique *node = makeNode(Unique); + Plan *plan = &node->plan; + + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->qual = NIL; + plan->lefttree = lefttree; + plan->righttree = NULL; + node->tempid = _TEMP_RELATION_ID_; + node->keycount = 0; + if (strcmp(uniqueAttr,"*") == 0) + node->uniqueAttr = NULL; + else + { + node->uniqueAttr=pstrdup(uniqueAttr); + } + return(node); +} + +List *generate_fjoin(List *tlist) +{ +#if 0 + List tlistP; + List newTlist = NIL; + List fjoinList = NIL; + int nIters = 0; + + /* + * Break the target list into elements with Iter nodes, + * and those without them. + */ + foreach(tlistP, tlist) { + List tlistElem; + + tlistElem = lfirst(tlistP); + if (IsA(lsecond(tlistElem),Iter)) { + nIters++; + fjoinList = lappend(fjoinList, tlistElem); + } else { + newTlist = lappend(newTlist, tlistElem); + } + } + + /* + * if we have an Iter node then we need to flatten. + */ + if (nIters > 0) { + List *inner; + List *tempList; + Fjoin *fjoinNode; + DatumPtr results = (DatumPtr)palloc(nIters*sizeof(Datum)); + BoolPtr alwaysDone = (BoolPtr)palloc(nIters*sizeof(bool)); + + inner = lfirst(fjoinList); + fjoinList = lnext(fjoinList); + fjoinNode = (Fjoin)MakeFjoin(false, + nIters, + inner, + results, + alwaysDone); + tempList = lcons(fjoinNode, NIL); + tempList = nconc(tempList, fjoinList); + newTlist = lappend(newTlist, tempList); + } + return newTlist; +#endif + return tlist; /* do nothing for now - ay 10/94 */ +} diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c new file mode 100644 index 00000000000..fc80402a050 --- /dev/null +++ b/src/backend/optimizer/plan/initsplan.c @@ -0,0 +1,391 @@ +/*------------------------------------------------------------------------- + * + * initsplan.c-- + * Target list, qualification, joininfo initialization routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/initsplan.c,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/relation.h" +#include "nodes/makefuncs.h" + +#include "utils/lsyscache.h" +#include "utils/palloc.h" + +#include "optimizer/internal.h" +#include "optimizer/planmain.h" +#include "optimizer/joininfo.h" +#include "optimizer/pathnode.h" +#include "optimizer/tlist.h" +#include "optimizer/var.h" +#include "optimizer/clauses.h" +#include "optimizer/cost.h" + +extern int Quiet; + +static void add_clause_to_rels(Query *root, List *clause); +static void add_join_clause_info_to_rels(Query *root, CInfo *clauseinfo, + List *join_relids); +static void add_vars_to_rels(Query *root, List *vars, List *join_relids); + +static MergeOrder *mergesortop(Expr *clause); +static Oid hashjoinop(Expr *clause); + + +/***************************************************************************** + * + * TARGET LISTS + * + *****************************************************************************/ + +/* + * initialize_rel_nodes-- + * Creates rel nodes for every relation mentioned in the target list + * 'tlist' (if a node hasn't already been created) and adds them to + * *query-relation-list*. Creates targetlist entries for each member of + * 'tlist' and adds them to the tlist field of the appropriate rel node. + * + * Returns nothing. + */ +void +initialize_base_rels_list(Query *root, List *tlist) +{ + List *tlist_vars = NIL; + List *l = NIL; + List *tvar = NIL; + + foreach (l, tlist) { + TargetEntry *entry = (TargetEntry *) lfirst(l); + + tlist_vars = append(tlist_vars, pull_var_clause(entry->expr)); + } + + /* now, the target list only contains Var nodes */ + foreach (tvar, tlist_vars) { + Var *var; + Index varno; + Rel *result; + + var = (Var*)lfirst(tvar); + varno = var->varno; + result = get_base_rel(root, varno); + + add_tl_element(result, var); + } +} + +/* + * add_missing_variables_to_base_rels - + * If we have range variable(s) in the FROM clause that does not appear + * in the target list nor qualifications, we add it to the base relation + * list. For instance, "select f.x from foo f, foo f2" is a join of f and + * f2. Note that if we have "select foo.x from foo f", it also gets turned + * into a join. + */ +void +add_missing_vars_to_base_rels(Query *root, List *tlist) +{ + List *l; + int varno; + + varno = 1; + foreach (l, root->rtable) { + RangeTblEntry *rte = (RangeTblEntry *)lfirst(l); + List *relids; + Rel *result; + Var *var; + + relids = lconsi(varno, NIL); + if (rte->inFromCl && + !rel_member(relids, root->base_relation_list_)) { + + var = makeVar(varno, -2 , 26, varno, -2); + /* add it to base_relation_list_ */ + result = get_base_rel(root, varno); + add_tl_element(result, var); + } + pfree(relids); + varno++; + } + + return; +} + +/***************************************************************************** + * + * QUALIFICATIONS + * + *****************************************************************************/ + + + +/* + * initialize-qualification-- + * Initializes ClauseInfo and JoinInfo fields of relation entries for all + * relations appearing within clauses. Creates new relation entries if + * necessary, adding them to *query-relation-list*. + * + * Returns nothing of interest. + */ +void +initialize_base_rels_jinfo(Query *root, List *clauses) +{ + List *clause; + + foreach (clause, clauses) { + add_clause_to_rels(root, lfirst(clause)); + } + return; +} + +/* + * add-clause-to-rels-- + * Add clause information to either the 'ClauseInfo' or 'JoinInfo' field + * of a relation entry(depending on whether or not the clause is a join) + * by creating a new ClauseInfo node and setting appropriate fields + * within the nodes. + * + * Returns nothing of interest. + */ +static void +add_clause_to_rels(Query *root, List *clause) +{ + List *relids; + List *vars; + CInfo *clauseinfo = makeNode(CInfo); + + /* + * Retrieve all relids and vars contained within the clause. + */ + clause_relids_vars((Node*)clause, &relids, &vars); + + + clauseinfo->clause = (Expr*)clause; + clauseinfo->notclause = contains_not((Node*)clause); + clauseinfo->selectivity = 0; + clauseinfo->indexids = NIL; + clauseinfo->mergesortorder = (MergeOrder*)NULL; + clauseinfo->hashjoinoperator = (Oid)0; + + + + if(length(relids) == 1) { + Rel *rel = get_base_rel(root, lfirsti(relids)); + + /* + * There is only one relation participating in 'clause', + * so 'clause' must be a restriction clause. + */ + + /* the selectivity of the clause must be computed + regardless of whether it's a restriction or a join clause */ + if (is_funcclause((Node*)clause)) + { + /* + * XXX If we have a func clause set selectivity to 1/3, + * really need a true selectivity function. + */ + clauseinfo->selectivity = (Cost)0.3333333; + } + else + { + clauseinfo->selectivity = + compute_clause_selec(root, (Node*)clause, + NIL); + } + rel->clauseinfo = lcons(clauseinfo, + rel->clauseinfo); + } else { + /* + * 'clause' is a join clause, since there is more than one + * atom in the relid list. + */ + + if (is_funcclause((Node*)clause)) + { + /* + * XXX If we have a func clause set selectivity to 1/3, + * really need a true selectivity function. + */ + clauseinfo->selectivity = (Cost)0.3333333; + } + else + { + clauseinfo->selectivity = + compute_clause_selec(root, (Node*)clause, + NIL); + } + add_join_clause_info_to_rels(root, clauseinfo, relids); + add_vars_to_rels(root,vars, relids); + } +} + +/* + * add-join-clause-info-to-rels-- + * For every relation participating in a join clause, add 'clauseinfo' to + * the appropriate joininfo node(creating a new one and adding it to the + * appropriate rel node if necessary). + * + * 'clauseinfo' describes the join clause + * 'join-relids' is the list of relations participating in the join clause + * + * Returns nothing. + * + */ +static void +add_join_clause_info_to_rels(Query *root, CInfo *clauseinfo, List *join_relids) +{ + List *join_relid; + + foreach (join_relid, join_relids) { + JInfo *joininfo = + find_joininfo_node(get_base_rel(root, lfirsti(join_relid)), + intLispRemove((int)lfirst(join_relid), + join_relids)); + joininfo->jinfoclauseinfo = + lcons(clauseinfo, joininfo->jinfoclauseinfo); + + } +} + +/* + * add-vars-to-rels-- + * For each variable appearing in a clause, + * (1) If a targetlist entry for the variable is not already present in + * the appropriate relation's target list, add one. + * (2) If a targetlist entry is already present, but the var is part of a + * join clause, add the relids of the join relations to the JoinList + * entry of the targetlist entry. + * + * 'vars' is the list of var nodes + * 'join-relids' is the list of relids appearing in the join clause + * (if this is a join clause) + * + * Returns nothing. + */ +static void +add_vars_to_rels(Query *root, List *vars, List *join_relids) +{ + Var *var; + List *temp = NIL; + Rel *rel = (Rel*)NULL; + TargetEntry *tlistentry; + + foreach (temp, vars) { + var = (Var*)lfirst(temp); + rel = get_base_rel(root, var->varno); + tlistentry = tlistentry_member(var, rel->targetlist); + if(tlistentry==NULL) + /* add a new entry */ + add_tl_element(rel, var); + } +} + +/***************************************************************************** + * + * JOININFO + * + *****************************************************************************/ + +/* + * initialize-join-clause-info-- + * Set the MergeSortable or HashJoinable field for every joininfo node + * (within a rel node) and the MergeSortOrder or HashJoinOp field for + * each clauseinfo node(within a joininfo node) for all relations in a + * query. + * + * Returns nothing. + */ +void +initialize_join_clause_info(List *rel_list) +{ + List *x, *y, *z; + Rel *rel; + JInfo *joininfo; + CInfo *clauseinfo; + Expr *clause; + + foreach (x, rel_list) { + rel = (Rel*)lfirst(x); + foreach (y, rel->joininfo) { + joininfo = (JInfo*)lfirst(y); + foreach (z, joininfo->jinfoclauseinfo) { + clauseinfo = (CInfo*)lfirst(z); + clause = clauseinfo->clause; + if(join_clause_p((Node*)clause)) { + MergeOrder *sortop = (MergeOrder*)NULL; + Oid hashop = (Oid)NULL; + + if (_enable_mergesort_) + sortop = mergesortop(clause); + if (_enable_hashjoin_) + hashop = hashjoinop(clause); + + if (sortop) { + clauseinfo->mergesortorder = sortop; + joininfo->mergesortable = true; + } + if (hashop) { + clauseinfo->hashjoinoperator = hashop; + joininfo->hashjoinable = true; + } + } + } + } + } +} + +/* + * mergesortop-- + * Returns the mergesort operator of an operator iff 'clause' is + * mergesortable, i.e., both operands are single vars and the operator is + * a mergesortable operator. + */ +static MergeOrder * +mergesortop(Expr *clause) +{ + Oid leftOp, rightOp; + bool sortable; + + sortable = op_mergesortable(((Oper*)clause->oper)->opno, + (get_leftop(clause))->vartype, + (get_rightop(clause))->vartype, + &leftOp, + &rightOp); + + if (sortable) { + MergeOrder *morder = makeNode(MergeOrder); + + morder->join_operator = ((Oper*)clause->oper)->opno; + morder->left_operator = leftOp; + morder->right_operator = rightOp; + morder->left_type = (get_leftop(clause))->vartype; + morder->right_type = (get_rightop(clause))->vartype; + return (morder); + } else + return(NULL); +} + +/* + * hashjoinop-- + * Returns the hashjoin operator of an operator iff 'clause' is + * hashjoinable, i.e., both operands are single vars and the operator is + * a hashjoinable operator. + */ +static Oid +hashjoinop(Expr *clause) +{ + return(op_hashjoinable(((Oper*)clause->oper)->opno, + (get_leftop(clause))->vartype, + (get_rightop(clause))->vartype)); +} diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c new file mode 100644 index 00000000000..5740b83a2d3 --- /dev/null +++ b/src/backend/optimizer/plan/planmain.c @@ -0,0 +1,422 @@ +/*------------------------------------------------------------------------- + * + * planmain.c-- + * Routines to plan a single query + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planmain.c,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/relation.h" + +#include "optimizer/planmain.h" +#include "optimizer/internal.h" +#include "optimizer/paths.h" +#include "optimizer/clauses.h" +#include "optimizer/keys.h" +#include "optimizer/tlist.h" +#include "optimizer/xfunc.h" +#include "optimizer/cost.h" + +#include "tcop/dest.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "nodes/memnodes.h" +#include "utils/mcxt.h" +#include "utils/lsyscache.h" + +static Plan *subplanner(Query *root, List *flat_tlist, List *qual); +static Result *make_result(List *tlist, Node *resconstantqual, Plan *subplan); + +static Plan *make_groupPlan(List *tlist, bool tuplePerGroup, + List *groupClause, Plan *subplan); + +/* + * query_planner-- + * Routine to create a query plan. It does so by first creating a + * subplan for the topmost level of attributes in the query. Then, + * it modifies all target list and qualifications to consider the next + * level of nesting and creates a plan for this modified query by + * recursively calling itself. The two pieces are then merged together + * by creating a result node that indicates which attributes should + * be placed where and any relation level qualifications to be + * satisfied. + * + * command-type is the query command, e.g., retrieve, delete, etc. + * tlist is the target list of the query + * qual is the qualification of the query + * + * Returns a query plan. + */ +Plan * +query_planner(Query *root, + int command_type, + List *tlist, + List *qual) +{ + List *constant_qual = NIL; + List *flattened_tlist = NIL; + List *level_tlist = NIL; + Plan *subplan = (Plan*)NULL; + Agg *aggplan = NULL; + + /* + * A command without a target list or qualification is an error, + * except for "delete foo". + */ + if (tlist==NIL && qual==NULL) { + if (command_type == CMD_DELETE || + /* Total hack here. I don't know how to handle + statements like notify in action bodies. + Notify doesn't return anything but + scans a system table. */ + command_type == CMD_NOTIFY) { + return ((Plan*)make_seqscan(NIL, + NIL, + root->resultRelation, + (Plan*)NULL)); + } else + return((Plan*)NULL); + } + + /* + * Pull out any non-variable qualifications so these can be put in + * the topmost result node. The opids for the remaining + * qualifications will be changed to regprocs later. + */ + qual = pull_constant_clauses(qual, &constant_qual); + fix_opids(constant_qual); + + /* + * Create a target list that consists solely of (resdom var) target + * list entries, i.e., contains no arbitrary expressions. + */ + flattened_tlist = flatten_tlist(tlist); + if (flattened_tlist) { + level_tlist = flattened_tlist; + } else { + /* from old code. the logic is beyond me. - ay 2/95 */ + level_tlist = tlist; + } + + /* + * Needs to add the group attribute(s) to the target list so that they + * are available to either the Group node or the Agg node. (The target + * list may not contain the group attribute(s).) + */ + if (root->groupClause) { + AddGroupAttrToTlist(level_tlist, root->groupClause); + } + + if (root->qry_aggs) { + aggplan = make_agg(tlist, root->qry_numAgg, root->qry_aggs); + tlist = level_tlist; + } + + /* + * A query may have a non-variable target list and a non-variable + * qualification only under certain conditions: + * - the query creates all-new tuples, or + * - the query is a replace (a scan must still be done in this case). + */ + if (flattened_tlist==NULL && qual==NULL) { + + switch (command_type) { + case CMD_SELECT: + case CMD_INSERT: + return ((Plan*)make_result(tlist, + (Node*)constant_qual, + (Plan*)NULL)); + break; + + case CMD_DELETE: + case CMD_UPDATE: + { + SeqScan *scan = make_seqscan(tlist, + (List *)NULL, + root->resultRelation, + (Plan*)NULL); + if (constant_qual!=NULL) { + return ((Plan*)make_result(tlist, + (Node*)constant_qual, + (Plan*)scan)); + } else { + return ((Plan*)scan); + } + } + break; + + default: + return ((Plan*)NULL); + } + } + + /* + * Find the subplan (access path) and destructively modify the + * target list of the newly created subplan to contain the appropriate + * join references. + */ + subplan = subplanner(root, level_tlist, qual); + + set_tlist_references(subplan); + + /* + * If we have a GROUP BY clause, insert a group node (with the appropriate + * sort node.) + */ + if (root->groupClause != NULL) { + bool tuplePerGroup; + + /* + * decide whether how many tuples per group the Group node needs + * to return. (Needs only one tuple per group if no aggregate is + * present. Otherwise, need every tuple from the group to do the + * aggregation.) + */ + tuplePerGroup = (aggplan == NULL) ? FALSE : TRUE; + + subplan = + make_groupPlan(tlist, tuplePerGroup, root->groupClause, subplan); + + /* XXX fake it: this works for the Group node too! very very ugly, + please change me -ay 2/95 */ + set_agg_tlist_references((Agg*)subplan); + } + + /* + * If aggregate is present, insert the agg node + */ + if (aggplan != NULL) { + aggplan->plan.lefttree = subplan; + subplan = (Plan*)aggplan; + + /* + * set the varno/attno entries to the appropriate references to + * the result tuple of the subplans. (We need to set those in the + * array of aggreg's in the Agg node also. Even though they're + * pointers, after a few dozen's of copying, they're not the same as + * those in the target list.) + */ + set_agg_tlist_references((Agg*)subplan); + set_agg_agglist_references((Agg*)subplan); + + tlist = aggplan->plan.targetlist; + } + + /* + * Build a result node linking the plan if we have constant quals + */ + if (constant_qual) { + Plan *plan; + + plan = (Plan*)make_result(tlist, + (Node*)constant_qual, + subplan); + /* + * Change all varno's of the Result's node target list. + */ + set_result_tlist_references((Result*)plan); + + return (plan); + } + + /* + * fix up the flattened target list of the plan root node so that + * expressions are evaluated. this forces expression evaluations + * that may involve expensive function calls to be delayed to + * the very last stage of query execution. this could be bad. + * but it is joey's responsibility to optimally push these + * expressions down the plan tree. -- Wei + */ + subplan->targetlist = flatten_tlist_vars(tlist, + subplan->targetlist); + + /* + * Destructively modify the query plan's targetlist to add fjoin + * lists to flatten functions that return sets of base types + */ + subplan->targetlist = generate_fjoin(subplan->targetlist); + + return (subplan); +} + +/* + * subplanner + * + * Subplanner creates an entire plan consisting of joins and scans + * for processing a single level of attributes. + * + * flat-tlist is the flattened target list + * qual is the qualification to be satisfied + * + * Returns a subplan. + * + */ +static Plan * +subplanner(Query *root, + List *flat_tlist, + List *qual) +{ + Rel *final_relation; + List *final_relation_list; + + /* Initialize the targetlist and qualification, adding entries to + * *query-relation-list* as relation references are found (e.g., in the + * qualification, the targetlist, etc.) + */ + root->base_relation_list_ = NIL; + root->join_relation_list_ = NIL; + initialize_base_rels_list(root, flat_tlist); + initialize_base_rels_jinfo(root, qual); + add_missing_vars_to_base_rels(root, flat_tlist); + + /* Find all possible scan and join paths. + * Mark all the clauses and relations that can be processed using special + * join methods, then do the exhaustive path search. + */ + initialize_join_clause_info(root->base_relation_list_); + final_relation_list = find_paths(root, + root->base_relation_list_); + + if (final_relation_list) + final_relation = (Rel*)lfirst (final_relation_list); + else + final_relation = (Rel*)NIL; + +#if 0 /* fix xfunc */ + /* + * Perform Predicate Migration on each path, to optimize and correctly + * assess the cost of each before choosing the cheapest one. + * -- JMH, 11/16/92 + * + * Needn't do so if the top rel is pruneable: that means there's no + * expensive functions left to pull up. -- JMH, 11/22/92 + */ + if (XfuncMode != XFUNC_OFF && XfuncMode != XFUNC_NOPM && + XfuncMode != XFUNC_NOPULL && !final_relation->pruneable) + { + List *pathnode; + foreach(pathnode, final_relation->pathlist) + { + if (xfunc_do_predmig((Path*)lfirst(pathnode))) + set_cheapest(final_relation, final_relation->pathlist); + } + } +#endif + + /* + * Determine the cheapest path and create a subplan corresponding to it. + */ + if (final_relation) { + return (create_plan ((Path*)final_relation->cheapestpath)); + }else { + elog(NOTICE, "final relation is nil"); + return(create_plan ((Path*)NULL)); + } + +} + +/***************************************************************************** + * + *****************************************************************************/ + +static Result * +make_result(List *tlist, + Node *resconstantqual, + Plan *subplan) +{ + Result *node = makeNode(Result); + Plan *plan = &node->plan; + + tlist = generate_fjoin(tlist); + plan->cost = 0.0; + plan->state = (EState *)NULL; + plan->targetlist = tlist; + plan->lefttree = subplan; + plan->righttree = NULL; + node->resconstantqual = resconstantqual; + node->resstate = NULL; + + return(node); +} + +/***************************************************************************** + * + *****************************************************************************/ + +static Plan * +make_groupPlan(List *tlist, + bool tuplePerGroup, + List *groupClause, + Plan *subplan) +{ + List *sort_tlist; + List *gl; + int keyno; + Sort *sortplan; + Group *grpplan; + int numCols; + AttrNumber *grpColIdx; + + numCols = length(groupClause); + grpColIdx = (AttrNumber *)palloc(sizeof(AttrNumber)*numCols); + + /* + * first, make a sort node. Group node expects the tuples it gets + * from the subplan is in the order as specified by the group columns. + */ + keyno = 1; + sort_tlist = new_unsorted_tlist(subplan->targetlist); + + { + /* if this is a mergejoin node, varno could be OUTER/INNER */ + List *l; + foreach(l, sort_tlist) { + TargetEntry *tle; + tle = lfirst(l); + ((Var*)tle->expr)->varno = 1; + } + } + + foreach (gl, groupClause) { + GroupClause *grpcl = (GroupClause*)lfirst(gl); + TargetEntry *tle; + + tle = match_varid(grpcl->grpAttr, sort_tlist); + /* + * the parser should have checked to make sure the group attribute + * is valid but the optimizer might have screwed up and hence we + * check again. + */ + if (tle==NULL) { + elog(WARN, "group attribute disappeared from target list"); + } + tle->resdom->reskey = keyno; + tle->resdom->reskeyop = get_opcode(grpcl->grpOpoid); + + grpColIdx[keyno-1] = tle->resdom->resno; + keyno++; + } + sortplan = make_sort(sort_tlist, + _TEMP_RELATION_ID_, + subplan, + numCols); + sortplan->plan.cost = subplan->cost; /* XXX assume no cost */ + + /* + * make the Group node + */ + tlist = copyObject(tlist); /* make a copy */ + grpplan = make_group(tlist, tuplePerGroup, numCols, grpColIdx, sortplan); + + return (Plan*)grpplan; +} diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c new file mode 100644 index 00000000000..4e57af8aa27 --- /dev/null +++ b/src/backend/optimizer/plan/planner.c @@ -0,0 +1,408 @@ +/*------------------------------------------------------------------------- + * + * planner.c-- + * The query optimizer external interface. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/planner.c,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/relation.h" + +#include "parser/catalog_utils.h" +#include "parser/parse_query.h" +#include "utils/elog.h" +#include "utils/lsyscache.h" +#include "access/heapam.h" + +#include "optimizer/internal.h" +#include "optimizer/planner.h" +#include "optimizer/plancat.h" +#include "optimizer/prep.h" +#include "optimizer/planmain.h" +#include "optimizer/paths.h" +#include "optimizer/cost.h" + +/* DATA STRUCTURE CREATION/MANIPULATION ROUTINES */ +#include "nodes/relation.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/joininfo.h" +#include "optimizer/keys.h" +#include "optimizer/ordering.h" +#include "optimizer/pathnode.h" +#include "optimizer/clauses.h" +#include "optimizer/tlist.h" +#include "optimizer/var.h" + +#include "executor/executor.h" + +static Plan *make_sortplan(List *tlist, List *sortcls, Plan *plannode); +static Plan *init_query_planner(Query *parse); +static Existential *make_existential(Plan *left, Plan *right); + +/***************************************************************************** + * + * Query optimizer entry point + * + *****************************************************************************/ + + +/* + * planner-- + * Main query optimizer routine. + * + * Invokes the planner on union queries if there are any left, + * recursing if necessary to get them all, then processes normal plans. + * + * Returns a query plan. + * + */ +Plan* +planner(Query *parse) +{ + List *tlist = parse->targetList; + List *rangetable = parse->rtable; + char* uniqueflag = parse->uniqueFlag; + List *sortclause = parse->sortClause; + Plan *special_plans = (Plan*)NULL; + + Plan *result_plan = (Plan*) NULL; + + int rt_index; + + /* + * plan inheritance + */ + rt_index = first_matching_rt_entry(rangetable, INHERITS_FLAG); + if (rt_index != -1) { + special_plans = (Plan *)plan_union_queries((Index)rt_index, + parse, + INHERITS_FLAG); + } + + /* + * plan archive queries + */ + rt_index = first_matching_rt_entry(rangetable, ARCHIVE_FLAG); + if (rt_index != -1) { + special_plans = (Plan *)plan_union_queries((Index)rt_index, + parse, + ARCHIVE_FLAG); + } + + if (special_plans) + result_plan = special_plans; + else + result_plan = init_query_planner(parse); /* regular plans */ + + /* + * For now, before we hand back the plan, check to see if there + * is a user-specified sort that needs to be done. Eventually, this + * will be moved into the guts of the planner s.t. user specified + * sorts will be considered as part of the planning process. + * Since we can only make use of user-specified sorts in + * special cases, we can do the optimization step later. + */ + + if (uniqueflag) { + Plan *sortplan = make_sortplan(tlist, sortclause, result_plan); + + return((Plan*)make_unique(tlist,sortplan,uniqueflag)); + } else { + if (sortclause) + return(make_sortplan(tlist,sortclause,result_plan)); + else + return((Plan*)result_plan); + } + +} + +/* + * make_sortplan-- + * Returns a sortplan which is basically a SORT node attached to the + * top of the plan returned from the planner. It also adds the + * cost of sorting into the plan. + * + * sortkeys: ( resdom1 resdom2 resdom3 ...) + * sortops: (sortop1 sortop2 sortop3 ...) + */ +static Plan * +make_sortplan(List *tlist, List *sortcls, Plan *plannode) +{ + Plan *sortplan = (Plan*)NULL; + List *temp_tlist = NIL; + List *i = NIL; + Resdom *resnode = (Resdom*)NULL; + Resdom *resdom = (Resdom*)NULL; + int keyno =1; + + /* First make a copy of the tlist so that we don't corrupt the + * the original . + */ + + temp_tlist = new_unsorted_tlist(tlist); + + foreach (i, sortcls) { + SortClause *sortcl = (SortClause*)lfirst(i); + + resnode = sortcl->resdom; + resdom = tlist_resdom(temp_tlist, resnode); + + /* Order the resdom keys and replace the operator OID for each + * key with the regproc OID. + */ + resdom->reskey = keyno; + resdom->reskeyop = get_opcode(sortcl->opoid); + keyno += 1; + } + + sortplan = (Plan*)make_sort(temp_tlist, + _TEMP_RELATION_ID_, + (Plan*)plannode, + length(sortcls)); + + /* + * XXX Assuming that an internal sort has no. cost. + * This is wrong, but given that at this point, we don't + * know the no. of tuples returned, etc, we can't do + * better than to add a constant cost. + * This will be fixed once we move the sort further into the planner, + * but for now ... functionality.... + */ + + sortplan->cost = plannode->cost; + + return(sortplan); +} + + +/* + * init-query-planner-- + * Deals with all non-union preprocessing, including existential + * qualifications and CNFifying the qualifications. + * + * Returns a query plan. + * MODIFIES: tlist,qual + * + */ +static Plan * +init_query_planner(Query *root) +{ + List *primary_qual; + List *existential_qual; + Existential *exist_plan; + List *tlist = root->targetList; + + tlist = preprocess_targetlist(tlist, + root->commandType, + root->resultRelation, + root->rtable); + + primary_qual = + preprocess_qualification((Expr*)root->qual, + tlist, + &existential_qual); + + if(existential_qual==NULL) { + return(query_planner(root, + root->commandType, + tlist, + primary_qual)); + } else { + int temp = root->commandType; + Plan *existential_plan; + + root->commandType = CMD_SELECT; + existential_plan = query_planner(root, + temp, + NIL, + existential_qual); + + exist_plan = make_existential(existential_plan, + query_planner(root, + root->commandType, + tlist, + primary_qual)); + return((Plan*)exist_plan); + } +} + +/* + * make_existential-- + * Instantiates an existential plan node and fills in + * the left and right subtree slots. + */ +static Existential * +make_existential(Plan *left, Plan *right) +{ + Existential *node = makeNode(Existential); + + node->lefttree = left; + node->righttree = left; + return(node); +} + +/* + * pg_checkretval() -- check return value of a list of sql parse + * trees. + * + * The return value of a sql function is the value returned by + * the final query in the function. We do some ad-hoc define-time + * type checking here to be sure that the user is returning the + * type he claims. + */ +void +pg_checkretval(Oid rettype, QueryTreeList *queryTreeList) +{ + Query *parse; + List *tlist; + List *rt; + int cmd; + Type typ; + Resdom *resnode; + Relation reln; + Oid relid; + Oid tletype; + int relnatts; + int i; + + /* find the final query */ + parse = queryTreeList->qtrees[queryTreeList->len - 1]; + + /* + * test 1: if the last query is a utility invocation, then there + * had better not be a return value declared. + */ + if (parse->commandType == CMD_UTILITY) { + if (rettype == InvalidOid) + return; + else + elog(WARN, "return type mismatch in function decl: final query is a catalog utility"); + } + + /* okay, it's an ordinary query */ + tlist = parse->targetList; + rt = parse->rtable; + cmd = parse->commandType; + + /* + * test 2: if the function is declared to return no value, then the + * final query had better not be a retrieve. + */ + if (rettype == InvalidOid) { + if (cmd == CMD_SELECT) + elog(WARN, + "function declared with no return type, but final query is a retrieve"); + else + return; + } + + /* by here, the function is declared to return some type */ + if ((typ = (Type)get_id_type(rettype)) == NULL) + elog(WARN, "can't find return type %d for function\n", rettype); + + /* + * test 3: if the function is declared to return a value, then the + * final query had better be a retrieve. + */ + if (cmd != CMD_SELECT) + elog(WARN, "function declared to return type %s, but final query is not a retrieve", tname(typ)); + + /* + * test 4: for base type returns, the target list should have exactly + * one entry, and its type should agree with what the user declared. + */ + + if (get_typrelid(typ) == InvalidOid) { + if (exec_tlist_length(tlist) > 1) + elog(WARN, "function declared to return %s returns multiple values in final retrieve", tname(typ)); + + resnode = (Resdom*) ((TargetEntry*)lfirst(tlist))->resdom; + if (resnode->restype != rettype) + elog(WARN, "return type mismatch in function: declared to return %s, returns %s", tname(typ), tname(get_id_type(resnode->restype))); + + /* by here, base return types match */ + return; + } + + /* + * If the target list is of length 1, and the type of the varnode + * in the target list is the same as the declared return type, this + * is okay. This can happen, for example, where the body of the + * function is 'retrieve (x = func2())', where func2 has the same + * return type as the function that's calling it. + */ + if (exec_tlist_length(tlist) == 1) { + resnode = (Resdom*) ((TargetEntry*)lfirst(tlist))->resdom; + if (resnode->restype == rettype) + return; + } + + /* + * By here, the procedure returns a (set of) tuples. This part of + * the typechecking is a hack. We look up the relation that is + * the declared return type, and be sure that attributes 1 .. n + * in the target list match the declared types. + */ + reln = heap_open(get_typrelid(typ)); + + if (!RelationIsValid(reln)) + elog(WARN, "cannot open relation relid %d", get_typrelid(typ)); + + relid = reln->rd_id; + relnatts = reln->rd_rel->relnatts; + + if (exec_tlist_length(tlist) != relnatts) + elog(WARN, "function declared to return type %s does not retrieve (%s.*)", tname(typ), tname(typ)); + + /* expect attributes 1 .. n in order */ + for (i = 1; i <= relnatts; i++) { + TargetEntry *tle = lfirst(tlist); + Node *thenode = tle->expr; + + tlist = lnext(tlist); + tletype = exprType(thenode); + +#if 0 /* fix me */ + /* this is tedious */ + if (IsA(thenode,Var)) + tletype = (Oid) ((Var*)thenode)->vartype; + else if (IsA(thenode,Const)) + tletype = (Oid) ((Const*)thenode)->consttype; + else if (IsA(thenode,Param)) { + tletype = (Oid) ((Param*)thenode)->paramtype; + else if (IsA(thenode,Expr)) { + tletype = Expr + } + } else if (IsA(thenode,LispList)) { + thenode = lfirst(thenode); + if (IsA(thenode,Oper)) + tletype = (Oid) get_opresulttype((Oper*)thenode); + else if (IsA(thenode,Func)) + tletype = (Oid) get_functype((Func*)thenode); + else + elog(WARN, "function declared to return type %s does not retrieve (%s.all)", tname(typ), tname(typ)); +#endif +/* + } else + elog(WARN, "function declared to return type %s does not retrieve (%s.all)", tname(typ), tname(typ)); +*/ + /* reach right in there, why don't you? */ + if (tletype != reln->rd_att->attrs[i-1]->atttypid) + elog(WARN, "function declared to return type %s does not retrieve (%s.all)", tname(typ), tname(typ)); + } + + heap_close(reln); + + /* success */ + return; +} diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c new file mode 100644 index 00000000000..e698c930e1d --- /dev/null +++ b/src/backend/optimizer/plan/setrefs.c @@ -0,0 +1,706 @@ +/*------------------------------------------------------------------------- + * + * setrefs.c-- + * Routines to change varno/attno entries to contain references + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/plan/setrefs.c,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/plannodes.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" + +#include "utils/elog.h" +#include "nodes/nodeFuncs.h" +#include "nodes/makefuncs.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/keys.h" +#include "optimizer/planmain.h" +#include "optimizer/tlist.h" +#include "optimizer/var.h" +#include "optimizer/tlist.h" + +static void set_join_tlist_references(Join *join); +static void set_tempscan_tlist_references(SeqScan *tempscan); +static void set_temp_tlist_references(Temp *temp); +static List *replace_clause_joinvar_refs(Expr *clause, + List *outer_tlist, List *inner_tlist); +static List *replace_subclause_joinvar_refs(List *clauses, + List *outer_tlist, List *inner_tlist); +static Var *replace_joinvar_refs(Var *var, List *outer_tlist, List *inner_tlist); +static List *tlist_temp_references(Oid tempid, List *tlist); +static void replace_result_clause(List *clause, List *subplanTargetList); +static bool OperandIsInner(Node *opnd, int inner_relid); +static void replace_agg_clause(Node *expr, List *targetlist); + +/***************************************************************************** + * + * SUBPLAN REFERENCES + * + *****************************************************************************/ + +/* + * set-tlist-references-- + * Modifies the target list of nodes in a plan to reference target lists + * at lower levels. + * + * 'plan' is the plan whose target list and children's target lists will + * be modified + * + * Returns nothing of interest, but modifies internal fields of nodes. + * + */ +void +set_tlist_references(Plan *plan) +{ + if(plan==NULL) + return; + + if (IsA_Join(plan)) { + set_join_tlist_references((Join*)plan); + } else if (IsA(plan,SeqScan) && plan->lefttree && + IsA_Temp(plan->lefttree)) { + set_tempscan_tlist_references((SeqScan*)plan); + } else if (IsA(plan,Sort)) { + set_temp_tlist_references ((Temp*)plan); + } else if (IsA(plan,Result)) { + set_result_tlist_references((Result*)plan); + } else if (IsA(plan,Hash)) { + set_tlist_references(plan->lefttree); + } else if (IsA(plan,Choose)) { + List *x; + foreach (x, ((Choose*)plan)->chooseplanlist) { + set_tlist_references((Plan*)lfirst(x)); + } + } +} + +/* + * set-join-tlist-references-- + * Modifies the target list of a join node by setting the varnos and + * varattnos to reference the target list of the outer and inner join + * relations. + * + * Creates a target list for a join node to contain references by setting + * varno values to OUTER or INNER and setting attno values to the + * result domain number of either the corresponding outer or inner join + * tuple. + * + * 'join' is a join plan node + * + * Returns nothing of interest, but modifies internal fields of nodes. + * + */ +static void +set_join_tlist_references(Join *join) +{ + Plan *outer = ((Plan*)join)->lefttree; + Plan *inner = ((Plan*)join)->righttree; + List *new_join_targetlist = NIL; + TargetEntry *temp = (TargetEntry *)NULL; + List *entry = NIL; + List *inner_tlist = NULL; + List *outer_tlist = NULL; + TargetEntry *xtl = (TargetEntry *)NULL; + List *qptlist = ((Plan*)join)->targetlist; + + foreach(entry, qptlist) { + List *joinvar; + + xtl = (TargetEntry *)lfirst(entry); + inner_tlist = ((inner==NULL) ? NIL : inner->targetlist); + outer_tlist = ((outer==NULL) ? NIL : outer->targetlist); + joinvar = replace_clause_joinvar_refs((Expr*)get_expr(xtl), + outer_tlist, + inner_tlist); + + temp = MakeTLE(xtl->resdom, (Node*)joinvar); + new_join_targetlist = lappend(new_join_targetlist,temp); + } + + ((Plan*)join)->targetlist = new_join_targetlist; + if (outer!=NULL) + set_tlist_references(outer); + if (inner!=NULL) + set_tlist_references(inner); +} + +/* + * set-tempscan-tlist-references-- + * Modifies the target list of a node that scans a temp relation (i.e., a + * sort or hash node) so that the varnos refer to the child temporary. + * + * 'tempscan' is a seqscan node + * + * Returns nothing of interest, but modifies internal fields of nodes. + * + */ +static void +set_tempscan_tlist_references(SeqScan *tempscan) +{ + Temp *temp = (Temp*)((Plan*)tempscan)->lefttree; + + ((Plan*)tempscan)->targetlist = + tlist_temp_references(temp->tempid, + ((Plan*)tempscan)->targetlist); + set_temp_tlist_references(temp); +} + +/* + * set-temp-tlist-references-- + * The temp's vars are made consistent with (actually, identical to) the + * modified version of the target list of the node from which temp node + * receives its tuples. + * + * 'temp' is a temp (e.g., sort, hash) plan node + * + * Returns nothing of interest, but modifies internal fields of nodes. + * + */ +static void +set_temp_tlist_references(Temp *temp) +{ + Plan *source = ((Plan*)temp)->lefttree; + + if (source!=NULL) { + set_tlist_references(source); + ((Plan*)temp)->targetlist = + copy_vars(((Plan*)temp)->targetlist , + (source)->targetlist); + } else { + elog(WARN, "calling set_temp_tlist_references with empty lefttree"); + } +} + +/* + * join-references-- + * Creates a new set of join clauses by replacing the varno/varattno + * values of variables in the clauses to reference target list values + * from the outer and inner join relation target lists. + * + * 'clauses' is the list of join clauses + * 'outer-tlist' is the target list of the outer join relation + * 'inner-tlist' is the target list of the inner join relation + * + * Returns the new join clauses. + * + */ +List * +join_references(List *clauses, + List *outer_tlist, + List *inner_tlist) +{ + return (replace_subclause_joinvar_refs(clauses, + outer_tlist, + inner_tlist)); +} + +/* + * index-outerjoin-references-- + * Given a list of join clauses, replace the operand corresponding to the + * outer relation in the join with references to the corresponding target + * list element in 'outer-tlist' (the outer is rather obscurely + * identified as the side that doesn't contain a var whose varno equals + * 'inner-relid'). + * + * As a side effect, the operator is replaced by the regproc id. + * + * 'inner-indxqual' is the list of join clauses (so-called because they + * are used as qualifications for the inner (inbex) scan of a nestloop) + * + * Returns the new list of clauses. + * + */ +List * +index_outerjoin_references(List *inner_indxqual, + List *outer_tlist, + Index inner_relid) +{ + List *t_list = NIL; + Expr *temp = NULL; + List *t_clause = NIL; + Expr *clause = NULL; + + foreach (t_clause,inner_indxqual) { + clause = lfirst(t_clause); + /* + * if inner scan on the right. + */ + if (OperandIsInner((Node*)get_rightop(clause), inner_relid)) { + Var *joinvar = (Var*) + replace_clause_joinvar_refs((Expr*)get_leftop(clause), + outer_tlist, + NIL); + temp = make_opclause(replace_opid((Oper*)((Expr*)clause)->oper), + joinvar, + get_rightop(clause)); + t_list = lappend(t_list,temp); + } else { + /* inner scan on left */ + Var *joinvar = (Var*) + replace_clause_joinvar_refs((Expr*)get_rightop(clause), + outer_tlist, + NIL); + temp = make_opclause(replace_opid((Oper*)((Expr*)clause)->oper), + joinvar, + get_leftop(clause)); + t_list = lappend(t_list,temp); + } + + } + return(t_list); +} + +/* + * replace-clause-joinvar-refs + * replace-subclause-joinvar-refs + * replace-joinvar-refs + * + * Replaces all variables within a join clause with a new var node + * whose varno/varattno fields contain a reference to a target list + * element from either the outer or inner join relation. + * + * 'clause' is the join clause + * 'outer-tlist' is the target list of the outer join relation + * 'inner-tlist' is the target list of the inner join relation + * + * Returns the new join clause. + * + */ +static List * +replace_clause_joinvar_refs(Expr *clause, + List *outer_tlist, + List *inner_tlist) +{ + List *temp = NULL; + + if(IsA (clause,Var)) { + temp = (List*)replace_joinvar_refs((Var*)clause, + outer_tlist,inner_tlist); + if(temp) + return(temp); + else + if (clause != NULL) + return((List*)clause); + else + return(NIL); + } else if (single_node((Node*)clause)) { + return ((List*)clause); + } else if (or_clause((Node*)clause)) { + List *orclause = + replace_subclause_joinvar_refs(((Expr*)clause)->args, + outer_tlist, + inner_tlist); + return ((List*)make_orclause(orclause)); + } else if (IsA(clause,ArrayRef)) { + ArrayRef *aref = (ArrayRef *)clause; + + temp = replace_subclause_joinvar_refs(aref->refupperindexpr, + outer_tlist, + inner_tlist); + aref->refupperindexpr = (List*)temp; + temp = replace_subclause_joinvar_refs(aref->reflowerindexpr, + outer_tlist, + inner_tlist); + aref->reflowerindexpr = (List*)temp; + temp = replace_clause_joinvar_refs((Expr*)aref->refexpr, + outer_tlist, + inner_tlist); + aref->refexpr = (Node*)temp; + + /* + * no need to set refassgnexpr. we only set that in the + * target list on replaces, and this is an array reference + * in the qualification. if we got this far, it's 0x0 in + * the ArrayRef structure 'clause'. + */ + + return((List*)clause); + } else if (is_funcclause((Node*)clause)) { + List *funcclause = + replace_subclause_joinvar_refs(((Expr*)clause)->args, + outer_tlist, + inner_tlist); + return ((List*)make_funcclause((Func*)((Expr*)clause)->oper, + funcclause)); + } else if (not_clause((Node*)clause)) { + List *notclause = + replace_clause_joinvar_refs(get_notclausearg(clause), + outer_tlist, + inner_tlist); + return ((List*)make_notclause((Expr*)notclause)); + } else if (is_opclause((Node*)clause)) { + Var *leftvar = + (Var*)replace_clause_joinvar_refs((Expr*)get_leftop(clause), + outer_tlist, + inner_tlist); + Var *rightvar = + (Var*)replace_clause_joinvar_refs((Expr*)get_rightop(clause), + outer_tlist, + inner_tlist); + return ((List*)make_opclause(replace_opid((Oper*)((Expr*)clause)->oper), + leftvar, + rightvar)); + } + /* shouldn't reach here */ + return NULL; +} + +static List * +replace_subclause_joinvar_refs(List *clauses, + List *outer_tlist, + List *inner_tlist) +{ + List *t_list = NIL; + List *temp = NIL; + List *clause = NIL; + + foreach (clause,clauses) { + temp = replace_clause_joinvar_refs(lfirst(clause), + outer_tlist, + inner_tlist); + t_list = lappend(t_list,temp); + } + return(t_list); +} + +static Var * +replace_joinvar_refs(Var *var, List *outer_tlist, List *inner_tlist) +{ + Resdom *outer_resdom =(Resdom*)NULL; + + outer_resdom= tlist_member(var,outer_tlist); + + if (outer_resdom!=NULL && IsA (outer_resdom,Resdom) ) { + return (makeVar (OUTER, + outer_resdom->resno, + var->vartype, + var->varnoold, + var->varoattno)); + } else { + Resdom *inner_resdom; + inner_resdom = tlist_member(var,inner_tlist); + if ( inner_resdom!=NULL && IsA (inner_resdom,Resdom) ) { + return (makeVar (INNER, + inner_resdom->resno, + var->vartype, + var->varnoold, + var->varoattno)); + } + } + return (Var*)NULL; +} + +/* + * tlist-temp-references-- + * Creates a new target list for a node that scans a temp relation, + * setting the varnos to the id of the temp relation and setting varids + * if necessary (varids are only needed if this is a targetlist internal + * to the tree, in which case the targetlist entry always contains a var + * node, so we can just copy it from the temp). + * + * 'tempid' is the id of the temp relation + * 'tlist' is the target list to be modified + * + * Returns new target list + * + */ +static List * +tlist_temp_references(Oid tempid, + List *tlist) +{ + List *t_list = NIL; + TargetEntry *temp = (TargetEntry *)NULL; + TargetEntry *xtl = NULL; + List *entry; + + foreach (entry, tlist) { + AttrNumber oattno; + + xtl = lfirst(entry); + if (IsA(get_expr(xtl), Var)) + oattno = ((Var*)xtl->expr)->varoattno; + else + oattno = 0; + + temp = MakeTLE(xtl->resdom, + (Node*)makeVar(tempid, + xtl->resdom->resno, + xtl->resdom->restype, + tempid, + oattno)); + + t_list = lappend(t_list,temp); + } + return(t_list); +} + +/*--------------------------------------------------------- + * + * set_result_tlist_references + * + * Change the target list of a Result node, so that it correctly + * addresses the tuples returned by its left tree subplan. + * + * NOTE: + * 1) we ignore the right tree! (in the current implementation + * it is always nil + * 2) this routine will probably *NOT* work with nested dot + * fields.... + */ +void +set_result_tlist_references(Result *resultNode) +{ + Plan *subplan; + List *resultTargetList; + List *subplanTargetList; + List *t; + TargetEntry *entry; + Expr *expr; + + resultTargetList= ((Plan*)resultNode)->targetlist; + + /* + * NOTE: we only consider the left tree subplan. + * This is usually a seq scan. + */ + subplan = ((Plan*)resultNode)->lefttree; + if (subplan != NULL) { + subplanTargetList = subplan->targetlist; + } else { + subplanTargetList = NIL; + } + + /* + * now for traverse all the entris of the target list. + * These should be of the form (Resdom_Node Expression). + * For every expression clause, call "replace_result_clause()" + * to appropriatelly change all the Var nodes. + */ + foreach (t, resultTargetList) { + entry = (TargetEntry *)lfirst(t); + expr = (Expr*) get_expr(entry); + replace_result_clause((List*)expr, subplanTargetList); + } +} + +/*--------------------------------------------------------- + * + * replace_result_clause + * + * This routine is called from set_result_tlist_references(). + * and modifies the expressions of the target list of a Result + * node so that all Var nodes reference the target list of its subplan. + * + */ +static void +replace_result_clause(List *clause, + List *subplanTargetList) /* target list of the + subplan */ +{ + List *t; + List *subClause; + TargetEntry *subplanVar; + + if (IsA(clause,Var)) { + /* + * Ha! A Var node! + */ + subplanVar = match_varid((Var*)clause, subplanTargetList); + /* + * Change the varno & varattno fields of the + * var node. + * + */ + ((Var*)clause)->varno = (Index)OUTER; + ((Var*)clause)->varattno = subplanVar->resdom->resno; + } else if (is_funcclause((Node*)clause)) { + /* + * This is a function. Recursively call this routine + * for its arguments... + */ + subClause = ((Expr*)clause)->args; + foreach (t, subClause) { + replace_result_clause(lfirst(t),subplanTargetList); + } + } else if (IsA(clause,ArrayRef)) { + ArrayRef *aref = (ArrayRef *)clause; + /* + * This is an arrayref. Recursively call this routine + * for its expression and its index expression... + */ + subClause = aref->refupperindexpr; + foreach (t, subClause) { + replace_result_clause(lfirst(t),subplanTargetList); + } + subClause = aref->reflowerindexpr; + foreach (t, subClause) { + replace_result_clause(lfirst(t),subplanTargetList); + } + replace_result_clause((List*)aref->refexpr, + subplanTargetList); + replace_result_clause((List*)aref->refassgnexpr, + subplanTargetList); + } else if (is_opclause((Node*)clause)) { + /* + * This is an operator. Recursively call this routine + * for both its left and right operands + */ + subClause = (List*)get_leftop((Expr*)clause); + replace_result_clause(subClause,subplanTargetList); + subClause = (List*)get_rightop((Expr*)clause); + replace_result_clause(subClause,subplanTargetList); + } else if (IsA(clause,Param) || IsA(clause,Const)) { + /* do nothing! */ + } else { + /* + * Ooops! we can not handle that! + */ + elog(WARN,"replace_result_clause: Can not handle this tlist!\n"); + } +} + +static +bool OperandIsInner(Node *opnd, int inner_relid) +{ + /* + * Can be the inner scan if its a varnode or a function and the + * inner_relid is equal to the varnode's var number or in the + * case of a function the first argument's var number (all args + * in a functional index are from the same relation). + */ + if ( IsA (opnd,Var) && + (inner_relid == ((Var*)opnd)->varno) ) + { + return true; + } + if (is_funcclause(opnd)) + { + List *firstArg = lfirst(((Expr*)opnd)->args); + + if ( IsA (firstArg,Var) && + (inner_relid == ((Var*)firstArg)->varno) ) + { + return true; + } + } + return false; +} + +/***************************************************************************** + * + *****************************************************************************/ + +/*--------------------------------------------------------- + * + * set_agg_tlist_references - + * changes the target list of an Agg node so that it points to + * the tuples returned by its left tree subplan. + * + */ +void +set_agg_tlist_references(Agg *aggNode) +{ + List *aggTargetList; + List *subplanTargetList; + List *tl; + + aggTargetList = aggNode->plan.targetlist; + subplanTargetList = aggNode->plan.lefttree->targetlist; + + foreach (tl, aggTargetList) { + TargetEntry *tle = lfirst(tl); + + replace_agg_clause(tle->expr, subplanTargetList); + } +} + +void +set_agg_agglist_references(Agg *aggNode) +{ + List *subplanTargetList; + Aggreg **aggs; + int i; + + aggs = aggNode->aggs; + subplanTargetList = aggNode->plan.lefttree->targetlist; + + for (i = 0; i < aggNode->numAgg; i++) { + replace_agg_clause(aggs[i]->target, subplanTargetList); + } +} + +static void +replace_agg_clause(Node *clause, List *subplanTargetList) +{ + List *t; + TargetEntry *subplanVar; + + if (IsA(clause,Var)) { + /* + * Ha! A Var node! + */ + subplanVar = match_varid((Var*)clause, subplanTargetList); + /* + * Change the varno & varattno fields of the + * var node. + * + */ + ((Var*)clause)->varattno = subplanVar->resdom->resno; + } else if (is_funcclause(clause)) { + /* + * This is a function. Recursively call this routine + * for its arguments... + */ + foreach (t, ((Expr*)clause)->args) { + replace_agg_clause(lfirst(t), subplanTargetList); + } + } else if (IsA(clause,Aggreg)) { + replace_agg_clause(((Aggreg*)clause)->target, subplanTargetList); + } else if (IsA(clause,ArrayRef)) { + ArrayRef *aref = (ArrayRef *)clause; + + /* + * This is an arrayref. Recursively call this routine + * for its expression and its index expression... + */ + foreach (t, aref->refupperindexpr) { + replace_agg_clause(lfirst(t),subplanTargetList); + } + foreach (t, aref->reflowerindexpr) { + replace_agg_clause(lfirst(t),subplanTargetList); + } + replace_agg_clause(aref->refexpr, subplanTargetList); + replace_agg_clause(aref->refassgnexpr, subplanTargetList); + } else if (is_opclause(clause)) { + /* + * This is an operator. Recursively call this routine + * for both its left and right operands + */ + replace_agg_clause((Node*)get_leftop((Expr*)clause), + subplanTargetList); + replace_agg_clause((Node*)get_rightop((Expr*)clause), + subplanTargetList); + } else if (IsA(clause,Param) || IsA(clause,Const)) { + /* do nothing! */ + } else { + /* + * Ooops! we can not handle that! + */ + elog(WARN,"replace_agg_clause: Can not handle this tlist!\n"); + } + +} + + diff --git a/src/backend/optimizer/plancat.h b/src/backend/optimizer/plancat.h new file mode 100644 index 00000000000..426778577de --- /dev/null +++ b/src/backend/optimizer/plancat.h @@ -0,0 +1,65 @@ +/*------------------------------------------------------------------------- + * + * plancat.h-- + * prototypes for plancat.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: plancat.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PLANCAT_H +#define PLANCAT_H + +#include "c.h" + +/* + * transient data structure to hold return value of index_info. Note that + * indexkeys, orderOprs and classlist is "null-terminated". + */ +typedef struct IdxInfoRetval { + Oid relid; /* OID of the index relation (not the OID + * of the relation being indexed) + */ + Oid relam; /* OID of the pg_am of this index */ + int pages; /* number of pages in the index relation */ + int tuples; /* number of tuples in the index relation */ + int *indexkeys; /* keys over which we're indexing */ + Oid *orderOprs; /* operators used for ordering purposes */ + Oid *classlist; /* classes of AM operators */ + Oid indproc; + Node *indpred; +} IdxInfoRetval; + + +extern void relation_info(Query *root, + Oid relid, + bool *hashindex, int *pages, + int *tuples); + +extern bool index_info(Query *root, + bool first, int relid, IdxInfoRetval *info); + +extern Cost +restriction_selectivity(Oid functionObjectId, + Oid operatorObjectId, + Oid relationObjectId, + AttrNumber attributeNumber, + char *constValue, + int32 constFlag); + +extern void +index_selectivity(Oid indid, Oid *classes, List *opnos, + Oid relid, List *attnos, List *values, List *flags, + int32 nkeys, float *idxPages, float *idxSelec); + +extern Cost join_selectivity(Oid functionObjectId, Oid operatorObjectId, + Oid relationObjectId1, AttrNumber attributeNumber1, + Oid relationObjectId2, AttrNumber attributeNumber2); + +extern List *find_inheritance_children(Oid inhparent); +extern List *VersionGetParents(Oid verrelid); + +#endif /* PLANCAT_H */ diff --git a/src/backend/optimizer/planmain.h b/src/backend/optimizer/planmain.h new file mode 100644 index 00000000000..b224e89550e --- /dev/null +++ b/src/backend/optimizer/planmain.h @@ -0,0 +1,60 @@ +/*------------------------------------------------------------------------- + * + * planmain.h-- + * prototypes for various files in optimizer/plan + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: planmain.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PLANMAIN_H +#define PLANMAIN_H + + +/* + * prototypes for plan/planmain.c + */ +extern Plan *query_planner(Query *root, + int command_type, List *tlist, List *qual); + + +/* + * prototypes for plan/createplan.c + */ +extern Plan *create_plan(Path *best_path); +extern SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid, + Plan *lefttree); +extern Sort *make_sort(List *tlist, Oid tempid, Plan *lefttree, + int keycount); +extern Agg *make_agg(List *tlist, int nagg, Aggreg **aggs); +extern Group *make_group(List *tlist, bool tuplePerGroup, int ngrp, + AttrNumber *grpColIdx, Sort *lefttree); +extern Unique *make_unique(List *tlist, Plan *lefttree, char *uniqueAttr); +extern List *generate_fjoin(List *tlist); + + +/* + * prototypes for plan/initsplan.c + */ +extern void initialize_base_rels_list(Query *root, List *tlist); +extern void initialize_base_rels_jinfo(Query *root, List *clauses); +extern void initialize_join_clause_info(List *rel_list); +extern void add_missing_vars_to_base_rels(Query *root, List *tlist); + +/* + * prototypes for plan/setrefs.c + */ +extern void set_tlist_references(Plan *plan); +extern List *join_references(List *clauses, List *outer_tlist, + List *inner_tlist); +extern List *index_outerjoin_references(List *inner_indxqual, + List *outer_tlist, Index inner_relid); +extern void set_result_tlist_references(Result *resultNode); +extern void set_agg_tlist_references(Agg *aggNode); +extern void set_agg_agglist_references(Agg *aggNode); + + +#endif /* PLANMAIN_H */ diff --git a/src/backend/optimizer/planner.h b/src/backend/optimizer/planner.h new file mode 100644 index 00000000000..5f049494802 --- /dev/null +++ b/src/backend/optimizer/planner.h @@ -0,0 +1,24 @@ +/*------------------------------------------------------------------------- + * + * planner.h-- + * prototypes for planner.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: planner.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PLANNER_H +#define PLANNER_H + +/* +#include "optimizer/internal.h" +#include "parser/parse_query.h" +*/ + +extern Plan *planner(Query *parse); +extern void pg_checkretval(Oid rettype, QueryTreeList *querytree_list); + +#endif /* PLANNER_H */ diff --git a/src/backend/optimizer/prep.h b/src/backend/optimizer/prep.h new file mode 100644 index 00000000000..679097641fe --- /dev/null +++ b/src/backend/optimizer/prep.h @@ -0,0 +1,51 @@ +/*------------------------------------------------------------------------- + * + * prep.h-- + * prototypes for files in prep.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: prep.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PREP_H +#define PREP_H + +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" + +/* + * prototypes for archive.h + */ +extern void plan_archive(List *rt); +extern List *find_archive_rels(Oid relid); + +/* + * prototypes for prepqual.h + */ +extern List *preprocess_qualification(Expr *qual, List *tlist, + List **existentialQualPtr); +extern List *cnfify(Expr *qual, bool removeAndFlag); + +/* + * prototypes for preptlist.h + */ +extern List *preprocess_targetlist(List *tlist, int command_type, + Index result_relation, List *range_table); + +/* + * prototypes for prepunion.h + */ +typedef enum UnionFlag { + INHERITS_FLAG, ARCHIVE_FLAG, VERSION_FLAG +} UnionFlag; + +extern List *find_all_inheritors(List *unexamined_relids, + List *examined_relids); +extern int first_matching_rt_entry(List *rangetable, UnionFlag flag); +extern Append *plan_union_queries(Index rt_index, Query *parse, + UnionFlag flag); + +#endif /* PREP_H */ diff --git a/src/backend/optimizer/prep/Makefile.inc b/src/backend/optimizer/prep/Makefile.inc new file mode 100644 index 00000000000..40026716c9e --- /dev/null +++ b/src/backend/optimizer/prep/Makefile.inc @@ -0,0 +1,14 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for optimizer/prep +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/optimizer/prep/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:37 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= archive.c prepqual.c preptlist.c prepunion.c diff --git a/src/backend/optimizer/prep/archive.c b/src/backend/optimizer/prep/archive.c new file mode 100644 index 00000000000..0303eca70f1 --- /dev/null +++ b/src/backend/optimizer/prep/archive.c @@ -0,0 +1,66 @@ +/*------------------------------------------------------------------------- + * + * archive.c-- + * Support for planning scans on archived relations + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/Attic/archive.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include /* for sprintf() */ +#include /* for u_int in relcache.h */ +#include "postgres.h" + +#include "utils/rel.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/relcache.h" +#include "catalog/pg_class.h" +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "optimizer/prep.h" +#include "commands/creatinh.h" + +void +plan_archive(List *rt) +{ + List *rtitem; + RangeTblEntry *rte; + TimeRange *trange; + Relation r; + Oid reloid; + + foreach(rtitem, rt) { + rte = lfirst(rtitem); + trange = rte->timeRange; + if (trange) { + reloid = rte->relid; + r = RelationIdGetRelation(reloid); + if (r->rd_rel->relarch != 'n') { + rte->archive = true; + } + } + } +} + + +/* + * find_archive_rels -- Given a particular relid, find the archive + * relation's relid. + */ +List * +find_archive_rels(Oid relid) +{ + Relation arel; + char *arelName; + + arelName = MakeArchiveName(relid); + arel = RelationNameGetRelation(arelName); + pfree(arelName); + + return lconsi(arel->rd_id, lconsi(relid, NIL)); +} diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c new file mode 100644 index 00000000000..e1aafa7db1e --- /dev/null +++ b/src/backend/optimizer/prep/prepqual.c @@ -0,0 +1,582 @@ +/*------------------------------------------------------------------------- + * + * prepqual.c-- + * Routines for preprocessing the parse tree qualification + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepqual.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/makefuncs.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/prep.h" + +#include "utils/lsyscache.h" + +static Expr *pull_args(Expr *qual); +static List *pull_ors(List *orlist); +static List *pull_ands(List *andlist); +static Expr *find_nots(Expr *qual); +static Expr *push_nots(Expr *qual); +static Expr *normalize(Expr *qual); +static List *or_normalize(List *orlist); +static List *distribute_args(List *item, List *args); +static List *qualcleanup(Expr *qual); +static List *remove_ands(Expr *qual); +static List *remove_duplicates(List *list); + +/* + * preprocess-qualification-- + * Driver routine for modifying the parse tree qualification. + * + * Returns the new base qualification and the existential qualification + * in existentialQualPtr. + * + * XXX right now, update_clauses() does nothing so + * preprocess-qualification simply converts the qual in conjunctive + * normal form (see cnfify() below ) + */ +List * +preprocess_qualification(Expr *qual, List *tlist, List **existentialQualPtr) +{ + List *cnf_qual = cnfify(qual, true); +/* + List *existential_qual = + update_clauses(intCons(_query_result_relation_, + update_relations(tlist)), + cnf_qual, + _query_command_type_); + if (existential_qual) { + *existentialQualPtr = existential_qual; + return set_difference(cnf_qual, existential_qual); + } else { + *existentialQualPtr = NIL; + return cnf_qual; + } +*/ + /* update_clauses() is not working right now */ + *existentialQualPtr = NIL; + return cnf_qual; + +} + +/***************************************************************************** + * + * CNF CONVERSION ROUTINES + * + * NOTES: + * The basic algorithms for normalizing the qualification are taken + * from ingres/source/qrymod/norml.c + * + * Remember that the initial qualification may consist of ARBITRARY + * combinations of clauses. In addition, before this routine is called, + * the qualification will contain explicit "AND"s. + * + *****************************************************************************/ + + +/* + * cnfify-- + * Convert a qualification to conjunctive normal form by applying + * successive normalizations. + * + * Returns the modified qualification with an extra level of nesting. + * + * If 'removeAndFlag' is true then it removes the explicit ANDs. + * + * NOTE: this routine is called by the planner (removeAndFlag = true) + * and from the rule manager (removeAndFlag = false). + * + */ +List * +cnfify(Expr *qual, bool removeAndFlag) +{ + Expr *newqual = NULL; + + if (qual != NULL) { + newqual = find_nots(pull_args(qual)); + newqual = normalize(pull_args(newqual)); + newqual = (Expr*)qualcleanup(pull_args(newqual)); + newqual = pull_args(newqual);; + + if (removeAndFlag) { + if(and_clause((Node*)newqual)) + newqual=(Expr*)remove_ands(newqual); + else + newqual=(Expr*)remove_ands(make_andclause(lcons(newqual,NIL))); + } + } + else if (qual!=NULL) + newqual = (Expr*)lcons(qual, NIL); + + return (List*)(newqual); +} + +/* + * pull-args-- + * Given a qualification, eliminate nested 'and' and 'or' clauses. + * + * Returns the modified qualification. + * + */ +static Expr * +pull_args(Expr *qual) +{ + if (qual==NULL) + return (NULL); + + if (is_opclause((Node*)qual)) { + return(make_clause(qual->opType, qual->oper, + lcons(pull_args((Expr*)get_leftop(qual)), + lcons(pull_args((Expr*)get_rightop(qual)), + NIL)))); + } else if (and_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach (temp, qual->args) + t_list = lappend (t_list, pull_args(lfirst(temp))); + return (make_andclause (pull_ands (t_list))); + }else if (or_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach (temp, qual->args) + t_list = lappend (t_list, pull_args(lfirst(temp))); + return (make_orclause (pull_ors (t_list))); + } else if (not_clause((Node*)qual)) { + return (make_notclause (pull_args (get_notclausearg (qual)))); + } else { + return (qual); + } +} + +/* + * pull-ors-- + * Pull the arguments of an 'or' clause nested within another 'or' + * clause up into the argument list of the parent. + * + * Returns the modified list. + */ +static List * +pull_ors(List *orlist) +{ + if (orlist==NIL) + return (NIL); + + if (or_clause(lfirst(orlist))) { + List *args = ((Expr*)lfirst(orlist))->args; + return (pull_ors(nconc(copyObject((Node*)args), + copyObject((Node*)lnext(orlist))))); + } else { + return (lcons(lfirst(orlist), pull_ors(lnext(orlist)))); + } +} + +/* + * pull-ands-- + * Pull the arguments of an 'and' clause nested within another 'and' + * clause up into the argument list of the parent. + * + * Returns the modified list. + */ +static List * +pull_ands(List *andlist) +{ + if (andlist==NIL) + return (NIL); + + if (and_clause (lfirst(andlist))) { + List *args = ((Expr*)lfirst(andlist))->args; + return (pull_ands(nconc(copyObject((Node*)args), + copyObject((Node*)lnext(andlist))))); + } else { + return (lcons(lfirst(andlist), pull_ands(lnext(andlist)))); + } +} + +/* + * find-nots-- + * Traverse the qualification, looking for 'not's to take care of. + * For 'not' clauses, remove the 'not' and push it down to the clauses' + * descendants. + * For all other clause types, simply recurse. + * + * Returns the modified qualification. + * + */ +static Expr * +find_nots(Expr *qual) +{ + if (qual==NULL) + return (NULL); + + if (is_opclause((Node*)qual)) { + return (make_clause(qual->opType, qual->oper, + lcons(find_nots((Expr*)get_leftop(qual)), + lcons(find_nots((Expr*)get_rightop(qual)), + NIL)))); + } else if (and_clause ((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach (temp, qual->args) { + t_list = lappend(t_list,find_nots(lfirst(temp))); + } + + return (make_andclause(t_list)); + } else if (or_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach (temp, qual->args) { + t_list = lappend(t_list,find_nots(lfirst(temp))); + } + return (make_orclause (t_list)); + } else if (not_clause((Node*)qual)) + return (push_nots(get_notclausearg (qual))); + else + return (qual); +} + +/* + * push-nots-- + * Negate the descendants of a 'not' clause. + * + * Returns the modified qualification. + * + */ +static Expr * +push_nots(Expr *qual) +{ + if (qual==NULL) + return (NULL); + + /* + * Negate an operator clause if possible: + * ("NOT" (< A B)) => (> A B) + * Otherwise, retain the clause as it is (the 'not' can't be pushed + * down any farther). + */ + if (is_opclause((Node*)qual)) { + Oper *oper = (Oper*)((Expr*)qual)->oper; + Oid negator = get_negator(oper->opno); + + if(negator) { + Oper *op = (Oper*) makeOper(negator, + InvalidOid, + oper->opresulttype, + 0, NULL); + op->op_fcache = (FunctionCache *) NULL; + return + (make_opclause(op, get_leftop(qual), get_rightop(qual))); + } else { + return (make_notclause(qual)); + } + } else if (and_clause((Node*)qual)) { + /* Apply DeMorgan's Laws: + * ("NOT" ("AND" A B)) => ("OR" ("NOT" A) ("NOT" B)) + * ("NOT" ("OR" A B)) => ("AND" ("NOT" A) ("NOT" B)) + * i.e., continue negating down through the clause's descendants. + */ + List *temp = NIL; + List *t_list = NIL; + + foreach(temp, qual->args) { + t_list = lappend(t_list,push_nots(lfirst(temp))); + } + return (make_orclause (t_list)); + } else if (or_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach(temp, qual->args) { + t_list = lappend(t_list,push_nots(lfirst(temp))); + } + return (make_andclause (t_list)); + } else if (not_clause((Node*)qual)) + /* Another 'not' cancels this 'not', so eliminate the 'not' and + * stop negating this branch. + */ + return (find_nots (get_notclausearg (qual))); + else + /* We don't know how to negate anything else, place a 'not' at this + * level. + */ + return (make_notclause (qual)); +} + +/* + * normalize-- + * Given a qualification tree with the 'not's pushed down, convert it + * to a tree in CNF by repeatedly applying the rule: + * ("OR" A ("AND" B C)) => ("AND" ("OR" A B) ("OR" A C)) + * bottom-up. + * Note that 'or' clauses will always be turned into 'and' clauses. + * + * Returns the modified qualification. + * + */ +static Expr * +normalize(Expr *qual) +{ + if (qual==NULL) + return (NULL); + + if (is_opclause((Node*)qual)) { + Expr *expr = (Expr*)qual; + return (make_clause(expr->opType, expr->oper, + lcons(normalize((Expr*)get_leftop(qual)), + lcons(normalize((Expr*)get_rightop(qual)), + NIL)))); + } else if (and_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + + foreach (temp, qual->args) { + t_list = lappend(t_list,normalize(lfirst(temp))); + } + return (make_andclause (t_list)); + } else if (or_clause((Node*)qual)) { + /* XXX - let form, maybe incorrect */ + List *orlist = NIL; + List *temp = NIL; + bool has_andclause = FALSE; + + foreach(temp, qual->args) { + orlist = lappend(orlist,normalize(lfirst(temp))); + } + foreach (temp, orlist) { + if (and_clause (lfirst(temp))) { + has_andclause = TRUE; + break; + } + } + if (has_andclause == TRUE) + return (make_andclause(or_normalize(orlist))); + else + return (make_orclause(orlist)); + + } else if (not_clause((Node*)qual)) + return (make_notclause (normalize (get_notclausearg (qual)))); + else + return (qual); +} + +/* + * or-normalize-- + * Given a list of exprs which are 'or'ed together, distribute any + * 'and' clauses. + * + * Returns the modified list. + * + */ +static List * +or_normalize(List *orlist) +{ + List *distributable = NIL; + List *new_orlist = NIL; + List *temp = NIL; + + if (orlist==NIL) + return NIL; + + foreach(temp, orlist) { + if (and_clause(lfirst(temp))) + distributable = lfirst(temp); + } + if (distributable) + new_orlist = LispRemove(distributable,orlist); + + if(new_orlist) { + return + (or_normalize(lcons(distribute_args(lfirst(new_orlist), + ((Expr*)distributable)->args), + lnext(new_orlist)))); + }else { + return (orlist); + } +} + +/* + * distribute-args-- + * Create new 'or' clauses by or'ing 'item' with each element of 'args'. + * E.g.: (distribute-args A ("AND" B C)) => ("AND" ("OR" A B) ("OR" A C)) + * + * Returns an 'and' clause. + * + */ +static List * +distribute_args(List *item, List *args) +{ + List *or_list = NIL; + List *n_list = NIL; + List *temp = NIL; + List *t_list = NIL; + + if (args==NULL) + return (item); + + foreach (temp,args) { + n_list = or_normalize(pull_ors(lcons(item, + lcons(lfirst(temp),NIL)))); + or_list = (List*)make_orclause(n_list); + t_list = lappend(t_list,or_list); + } + return ((List*)make_andclause(t_list)); +} + +/* + * qualcleanup-- + * Fix up a qualification by removing duplicate entries (left over from + * normalization), and by removing 'and' and 'or' clauses which have only + * one valid expr (e.g., ("AND" A) => A). + * + * Returns the modified qualfication. + * + */ +static List * +qualcleanup(Expr *qual) +{ + if (qual==NULL) + return (NIL); + + if (is_opclause((Node*)qual)) { + return ((List*)make_clause(qual->opType, qual->oper, + lcons(qualcleanup((Expr*)get_leftop(qual)), + lcons(qualcleanup((Expr*)get_rightop(qual)), + NIL)))); + } else if (and_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + List *new_and_args = NIL; + + foreach(temp, qual->args) + t_list = lappend(t_list,qualcleanup(lfirst(temp))); + + new_and_args = remove_duplicates(t_list); + + if(length (new_and_args) > 1) + return ((List*)make_andclause(new_and_args)); + else + return (lfirst(new_and_args)); + } + else if (or_clause((Node*)qual)) { + List *temp = NIL; + List *t_list = NIL; + List *new_or_args = NIL; + + foreach (temp, qual->args) + t_list = lappend(t_list,qualcleanup(lfirst(temp))); + + new_or_args = remove_duplicates(t_list); + + + if(length (new_or_args) > 1) + return ((List*)make_orclause (new_or_args)); + else + return (lfirst (new_or_args)); + } else if (not_clause((Node*)qual)) + return ((List*)make_notclause((Expr*)qualcleanup((Expr*)get_notclausearg(qual)))); + + else + return ((List*)qual); +} + +/* + * remove-ands-- + * Remove the explicit "AND"s from the qualification: + * ("AND" A B) => (A B) + * + * RETURNS : qual + * MODIFIES: qual + */ +static List * +remove_ands(Expr *qual) +{ + List *t_list = NIL; + + if (qual==NULL) + return (NIL); + if (is_opclause((Node*)qual)) { + return ((List*)make_clause(qual->opType, qual->oper, + lcons(remove_ands((Expr*)get_leftop(qual)), + lcons(remove_ands((Expr*)get_rightop(qual)), + NIL)))); + } else if (and_clause((Node*)qual)) { + List *temp = NIL; + foreach (temp, qual->args) + t_list = lappend(t_list,remove_ands(lfirst(temp))); + return(t_list); + } else if (or_clause((Node*)qual)) { + List *temp = NIL; + foreach (temp, qual->args) + t_list = lappend(t_list,remove_ands(lfirst(temp))); + return ((List*)make_orclause((List*)t_list)); + } else if (not_clause((Node*)qual)) { + return ((List*)make_notclause((Expr*)remove_ands((Expr*)get_notclausearg (qual)))); + } else { + return ((List*)qual); + } +} + +/***************************************************************************** + * + * EXISTENTIAL QUALIFICATIONS + * + *****************************************************************************/ + +/* + * update-relations-- + * Returns the range table indices (i.e., varnos) for all relations which + * are referenced in the target list. + * + */ +#if 0 +static List * +update_relations(List *tlist) +{ + return(NIL); +} +#endif + +/***************************************************************************** + * + * + * + *****************************************************************************/ + +static List * +remove_duplicates(List *list) +{ + List *i; + List *j; + List *result = NIL; + bool there_exists_duplicate = false; + + if (length(list) == 1) + return(list); + + foreach (i, list) { + if (i != NIL) { + foreach (j, lnext(i)) { + if (equal(lfirst(i), lfirst(j))) + there_exists_duplicate = true; + } + if (!there_exists_duplicate) + result = lappend(result, lfirst(i)); + + there_exists_duplicate = false; + } + } + return(result); +} diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c new file mode 100644 index 00000000000..fe1c2c92ba2 --- /dev/null +++ b/src/backend/optimizer/prep/preptlist.c @@ -0,0 +1,322 @@ +/*------------------------------------------------------------------------- + * + * preptlist.c-- + * Routines to preprocess the parse tree target list + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/preptlist.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/relation.h" +#include "nodes/primnodes.h" +#include "nodes/parsenodes.h" + +#include "nodes/makefuncs.h" + +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "utils/palloc.h" + +#include "parser/parsetree.h" /* for getrelid() */ +#include "parser/catalog_utils.h" + +#include "optimizer/internal.h" +#include "optimizer/prep.h" +#include "optimizer/clauses.h" +#include "optimizer/tlist.h" + +static List *expand_targetlist(List *tlist, Oid relid, int command_type, + Index result_relation); +static List *replace_matching_resname(List *new_tlist, + List *old_tlist); +static List *new_relation_targetlist(Oid relid, Index rt_index, + NodeTag node_type); + + +/* + * preprocess-targetlist-- + * Driver for preprocessing the parse tree targetlist. + * + * 1. Deal with appends and replaces by filling missing attributes + * in the target list. + * 2. Reset operator OIDs to the appropriate regproc ids. + * + * Returns the new targetlist. + */ +List * +preprocess_targetlist(List *tlist, + int command_type, + Index result_relation, + List *range_table) +{ + List *expanded_tlist = NIL; + Oid relid = InvalidOid; + List *t_list = NIL; + List *temp = NIL; + + if (result_relation>=1 && command_type != CMD_SELECT) { + relid = getrelid(result_relation, range_table); + } + + /* + * for heap_formtuple to work, the targetlist must match the exact + * order of the attributes. We also need to fill in the missing + * attributes here. -ay 10/94 + */ + expanded_tlist = + expand_targetlist(tlist, relid, command_type, result_relation); + + /* XXX should the fix-opids be this early?? */ + /* was mapCAR */ + foreach (temp,expanded_tlist) { + TargetEntry *tle = lfirst(temp); + if (tle->expr) + fix_opid(tle->expr); + } + t_list = copyObject(expanded_tlist); + + /* ------------------ + * for "replace" or "delete" queries, add ctid of the result + * relation into the target list so that the ctid can get + * propogate through the execution and in the end ExecReplace() + * will find the right tuple to replace or delete. This + * extra field will be removed in ExecReplace(). + * For convinient, we append this extra field to the end of + * the target list. + * ------------------ + */ + if (command_type == CMD_UPDATE || command_type == CMD_DELETE) { + TargetEntry *ctid; + Resdom *resdom; + Var *var; + + resdom = makeResdom(length(t_list) + 1, + 27, + 6, + "ctid", + 0, + 0, + 1); + + var = makeVar(result_relation, -1, 27, result_relation, -1); + + ctid = makeNode(TargetEntry); + ctid->resdom = resdom; + ctid->expr = (Node *)var; + t_list = lappend(t_list, ctid); + } + + return(t_list); +} + +/***************************************************************************** + * + * TARGETLIST EXPANSION + * + *****************************************************************************/ + +/* + * expand-targetlist-- + * Given a target list as generated by the parser and a result relation, + * add targetlist entries for the attributes which have not been used. + * + * XXX This code is only supposed to work with unnested relations. + * + * 'tlist' is the original target list + * 'relid' is the relid of the result relation + * 'command' is the update command + * + * Returns the expanded target list, sorted in resno order. + */ +static List * +expand_targetlist(List *tlist, + Oid relid, + int command_type, + Index result_relation) +{ + NodeTag node_type = T_Invalid; + + switch (command_type) { + case CMD_INSERT: + node_type = (NodeTag)T_Const; + break; + case CMD_UPDATE: + node_type = (NodeTag)T_Var; + break; + } + + if(node_type != T_Invalid) { + List *ntlist = new_relation_targetlist(relid, + result_relation, + node_type); + + return (replace_matching_resname(ntlist, tlist)); + } else { + return (tlist); + } + +} + + +static List * +replace_matching_resname(List *new_tlist, List *old_tlist) +{ + List *temp, *i; + List *t_list = NIL; + + foreach (i,new_tlist) { + TargetEntry *new_tle = (TargetEntry *)lfirst(i); + TargetEntry *matching_old_tl = NULL; + + foreach (temp, old_tlist) { + TargetEntry *old_tle = (TargetEntry *)lfirst(temp); + + old_tle = lfirst(temp); + if (!strcmp(old_tle->resdom->resname, + new_tle->resdom->resname)) { + matching_old_tl = old_tle; + break; + } + } + + if(matching_old_tl) { + matching_old_tl->resdom->resno = + new_tle->resdom->resno; + t_list = lappend(t_list, matching_old_tl); + } + else { + t_list = lappend(t_list, new_tle); + } + } + + /* + * It is possible that 'old_tlist' has some negative + * attributes (i.e. negative resnos). This only happens + * if this is a replace/append command and we explicitly + * specify a system attribute. Of course this is not a very good + * idea if this is a user query, but on the other hand the rule + * manager uses this mechanism to replace rule locks. + * + * So, copy all these entries to the end of the target list + * and set their 'resjunk' value to 1 to show that these are + * special attributes and have to be treated specially by the + * executor! + */ + foreach (temp, old_tlist) { + TargetEntry *old_tle, *new_tl; + Resdom *newresno; + + old_tle = lfirst(temp); + if (old_tle->resdom->resno < 0) { + newresno = (Resdom*) copyObject((Node*)old_tle->resdom); + newresno->resno = length(t_list) +1; + newresno->resjunk = 1; + new_tl = MakeTLE(newresno, old_tle->expr); + t_list = lappend(t_list, new_tl); + } + } + + return (t_list); +} + +/* + * new-relation-targetlist-- + * Generate a targetlist for the relation with relation OID 'relid' + * and rangetable index 'rt-index'. + * + * Returns the new targetlist. + */ +static List * +new_relation_targetlist(Oid relid, Index rt_index, NodeTag node_type) +{ + AttrNumber attno; + List *t_list = NIL; + char *attname; + Oid atttype = 0; + int16 typlen = 0; + bool attisset = false; +/* Oid type_id; */ +/* type_id = RelationIdGetTypeId(relid); */ + + for(attno=1; attno <= get_relnatts(relid); attno++) { + attname = get_attname(/*type_id,*/ relid, attno); + atttype = get_atttype(/*type_id,*/ relid, attno); + /* + * Since this is an append or replace, the size of any set + * attribute is the size of the OID used to represent it. + */ + attisset = get_attisset(/* type_id,*/ relid, attname); + if (attisset) { + typlen = tlen(type("oid")); + } else { + typlen = get_typlen(atttype); + } + + switch (node_type) { + case T_Const: + { + struct varlena *typedefault = get_typdefault(atttype); + int temp = 0; + Const *temp2 = (Const*)NULL; + TargetEntry *temp3 = (TargetEntry *)NULL; + + if (typedefault==NULL) + temp = 0; + else + temp = typlen; + + temp2 = makeConst (atttype, + temp, + (Datum)typedefault, + (typedefault == (struct varlena *)NULL), + /* XXX this is bullshit */ + false, + false /* not a set */); + + temp3 = MakeTLE (makeResdom(attno, + atttype, + typlen, + attname, + 0, + (Oid)0, + 0), + (Node*)temp2); + t_list = lappend(t_list,temp3); + break; + } + case T_Var: + { + Var *temp_var = (Var*)NULL; + TargetEntry *temp_list = NULL; + + temp_var = + makeVar(rt_index, attno, atttype, rt_index, attno); + + temp_list = MakeTLE(makeResdom(attno, + atttype, + typlen, + attname, + 0, + (Oid)0, + 0), + (Node*)temp_var); + t_list = lappend(t_list,temp_list); + break; + } + default: /* do nothing */ + break; + } + } + + return(t_list); +} + + diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c new file mode 100644 index 00000000000..8e1491940db --- /dev/null +++ b/src/backend/optimizer/prep/prepunion.c @@ -0,0 +1,400 @@ +/*------------------------------------------------------------------------- + * + * prepunion.c-- + * Routines to plan archive, inheritance, union, and version queries + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/prep/prepunion.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/nodes.h" +#include "nodes/pg_list.h" +#include "nodes/execnodes.h" +#include "nodes/plannodes.h" +#include "nodes/relation.h" + +#include "parser/parse_query.h" +#include "parser/parsetree.h" + +#include "utils/elog.h" +#include "utils/lsyscache.h" + +#include "optimizer/internal.h" +#include "optimizer/prep.h" +#include "optimizer/plancat.h" +#include "optimizer/planner.h" +#include "optimizer/prep.h" + +static List *plan_union_query(List *relids, Index rt_index, + RangeTblEntry *rt_entry, Query *parse, UnionFlag flag, + List **union_rtentriesPtr); +static RangeTblEntry *new_rangetable_entry(Oid new_relid, + RangeTblEntry *old_entry); +static Query *subst_rangetable(Query *root, Index index, + RangeTblEntry *new_entry); +static void fix_parsetree_attnums(Index rt_index, Oid old_relid, + Oid new_relid, Query *parsetree); +static Append *make_append(List *unionplans, Index rt_index, + List *union_rt_entries, List *tlist); + + +/* + * find-all-inheritors - + * Returns a list of relids corresponding to relations that inherit + * attributes from any relations listed in either of the argument relid + * lists. + */ +List * +find_all_inheritors(List *unexamined_relids, + List *examined_relids) +{ + List *new_inheritors = NIL; + List *new_examined_relids = NIL; + List *new_unexamined_relids = NIL; + + /* Find all relations which inherit from members of + * 'unexamined-relids' and store them in 'new-inheritors'. + */ + List *rels = NIL; + List *newrels = NIL; + + foreach(rels,unexamined_relids) { + newrels = (List*)LispUnioni(find_inheritance_children(lfirsti(rels)), + newrels); + } + new_inheritors = newrels; + + new_examined_relids = (List*)LispUnioni(examined_relids,unexamined_relids); + new_unexamined_relids = set_differencei(new_inheritors, + new_examined_relids); + + if (new_unexamined_relids==NULL) { + return(new_examined_relids); + } else { + return (find_all_inheritors (new_unexamined_relids, + new_examined_relids)); + } +} + +/* + * first-matching-rt-entry - + * Given a rangetable, find the first rangetable entry that represents + * the appropriate special case. + * + * Returns a rangetable index., Returns -1 if no matches + */ +int +first_matching_rt_entry (List *rangetable, UnionFlag flag) +{ + int count = 0; + List *temp = NIL; + + foreach(temp, rangetable) { + RangeTblEntry *rt_entry = lfirst(temp); + + switch(flag) { + case INHERITS_FLAG: + if (rt_entry->inh) + return count+1; + break; + case ARCHIVE_FLAG: + if (rt_entry->archive) + return count+1; + break; + default: + break; + } + count++; + } + + return(-1); +} + + +/* + * plan-union-queries-- + * + * Plans the queries for a given parent relation. + * + * Returns a list containing a list of plans and a list of rangetable + * entries to be inserted into an APPEND node. + * XXX - what exactly does this mean, look for make_append + */ +Append * +plan_union_queries(Index rt_index, + Query *parse, + UnionFlag flag) +{ + List *rangetable = parse->rtable; + RangeTblEntry *rt_entry = rt_fetch(rt_index,rangetable); + List *union_relids = NIL; + List *union_plans = NIL; + List *union_rt_entries = NIL; + + switch (flag) { + case INHERITS_FLAG: + union_relids = + find_all_inheritors(lconsi(rt_entry->relid, + NIL), + NIL); + break; + +#if 0 + case UNION_FLAG: + { + Index rt_index = 0; + union_plans = handleunion(root,rangetable,tlist,qual); + return (make_append (union_plans, + rt_index, rangetable, + ((Plan*)lfirst(union_plans))->targetlist )); + } + break; +#endif + + case VERSION_FLAG: + union_relids = VersionGetParents(rt_entry->relid); + break; + + case ARCHIVE_FLAG: + union_relids = find_archive_rels(rt_entry->relid); + break; + + default: + /* do nothing */ + break; + } + + /* + * Remove the flag for this relation, since we're about to handle it + * (do it before recursing!). + * XXX destructive parse tree change + */ + switch(flag) { + case INHERITS_FLAG: + rt_fetch(rt_index,rangetable)->inh = false; + break; + case ARCHIVE_FLAG: + rt_fetch(rt_index,rangetable)->archive = false; + break; + default: + break; + } + + /* XXX - can't find any reason to sort union-relids + * as paul did, so we're leaving it out for now + * (maybe forever) - jeff & lp + * + * [maybe so. btw, jeff & lp did the lisp conversion, according to Paul. + * -- ay 10/94.] + */ + union_plans = plan_union_query(union_relids, rt_index, rt_entry, + parse, flag, &union_rt_entries); + + return (make_append(union_plans, + rt_index, + union_rt_entries, + ((Plan*)lfirst(union_plans))->targetlist)); +} + + +/* + * plan-union-query-- + * Returns a list of plans for 'relids' and a list of range table entries + * in union_rtentries. + */ +static List * +plan_union_query(List *relids, + Index rt_index, + RangeTblEntry *rt_entry, + Query *root, + UnionFlag flag, + List **union_rtentriesPtr) +{ + List *i; + List *union_plans = NIL; + List *union_rtentries = NIL; + + foreach (i, relids) { + int relid = lfirsti(i); + RangeTblEntry *new_rt_entry = new_rangetable_entry(relid, + rt_entry); + Query *new_root = subst_rangetable(root, + rt_index, + new_rt_entry); + + /* reset the uniqueflag and sortclause in parse tree root, so that + * sorting will only be done once after append + */ +/* new_root->uniqueFlag = false; */ + new_root->uniqueFlag = NULL; + new_root->sortClause = NULL; + if (flag == ARCHIVE_FLAG) { + /* + * the entire union query uses the same (most recent) schema. + * to do otherwise would require either ragged tuples or careful + * archiving and interpretation of pg_attribute... + */ + } else { + fix_parsetree_attnums(rt_index, + rt_entry->relid, + relid, + new_root); + } + + union_plans = lappend(union_plans, planner(new_root)); + union_rtentries = lappend(union_rtentries, new_rt_entry); + } + + *union_rtentriesPtr = union_rtentries; + return(union_plans); +} + +/* + * new-rangetable-entry - + * Replaces the name and relid of 'old-entry' with the values for + * 'new-relid'. + * + * Returns a copy of 'old-entry' with the parameters substituted. + */ +static RangeTblEntry * +new_rangetable_entry(Oid new_relid, RangeTblEntry *old_entry) +{ + RangeTblEntry *new_entry = copyObject(old_entry); + + /* ??? someone tell me what the following is doing! - ay 11/94 */ + if (!strcmp(new_entry->refname, "*CURRENT*") || + !strcmp(new_entry->refname, "*NEW*")) + new_entry->refname = get_rel_name(new_relid); + else + new_entry->relname = get_rel_name(new_relid); + + new_entry->relid = new_relid; + return(new_entry); +} + +/* + * subst-rangetable-- + * Replaces the 'index'th rangetable entry in 'root' with 'new-entry'. + * + * Returns a new copy of 'root'. + */ +static Query * +subst_rangetable(Query *root, Index index, RangeTblEntry *new_entry) +{ + Query *new_root = copyObject(root); + List *temp = NIL; + int i = 0; + + for(temp = new_root->rtable,i =1; i < index; temp =lnext(temp),i++) + ; + lfirst(temp) = new_entry; + + return (new_root); +} + +static void +fix_parsetree_attnums_nodes(Index rt_index, + Oid old_relid, + Oid new_relid, + Node *node) +{ + if (node==NULL) + return; + + switch(nodeTag(node)) { + case T_TargetEntry: + { + TargetEntry *tle = (TargetEntry *)node; + + fix_parsetree_attnums_nodes(rt_index, old_relid, new_relid, + tle->expr); + } + break; + case T_Expr: + { + Expr *expr = (Expr *)node; + fix_parsetree_attnums_nodes(rt_index, old_relid, new_relid, + (Node*)expr->args); + } + break; + case T_Var: + { + Var *var = (Var *)node; + Oid old_typeid, new_typeid; + +/* old_typeid = RelationIdGetTypeId(old_relid);*/ +/* new_typeid = RelationIdGetTypeId(new_relid);*/ + old_typeid = old_relid; + new_typeid = new_relid; + + if (var->varno == rt_index && var->varattno != 0) { + var->varattno = + get_attnum(new_typeid, + get_attname(old_typeid, var->varattno)); + } + } + break; + case T_List: + { + List *l; + foreach(l, (List*)node) { + fix_parsetree_attnums_nodes(rt_index, old_relid, new_relid, + (Node*)lfirst(l)); + } + } + break; + default: + break; + } +} + +/* + * fix-parsetree-attnums-- + * Replaces attribute numbers from the relation represented by + * 'old-relid' in 'parsetree' with the attribute numbers from + * 'new-relid'. + * + * Returns the destructively-modified parsetree. + * + */ +static void +fix_parsetree_attnums(Index rt_index, + Oid old_relid, + Oid new_relid, + Query *parsetree) +{ + if (old_relid == new_relid) + return; + + fix_parsetree_attnums_nodes(rt_index, old_relid, new_relid, + (Node*)parsetree->targetList); + fix_parsetree_attnums_nodes(rt_index, old_relid, new_relid, + parsetree->qual); +} + +static Append * +make_append(List *unionplans, + Index rt_index, + List *union_rt_entries, + List *tlist) +{ + Append *node = makeNode(Append); + + node->unionplans = unionplans; + node->unionrelid = rt_index; + node->unionrtentries = union_rt_entries; + node->plan.cost = 0.0; + node->plan.state = (EState*)NULL; + node->plan.targetlist = tlist; + node->plan.qual = NIL; + node->plan.lefttree = (Plan*)NULL; + node->plan.righttree = (Plan*)NULL; + + return(node); +} diff --git a/src/backend/optimizer/tlist.h b/src/backend/optimizer/tlist.h new file mode 100644 index 00000000000..8906460de91 --- /dev/null +++ b/src/backend/optimizer/tlist.h @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * + * tlist.h-- + * prototypes for tlist.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: tlist.h,v 1.1.1.1 1996/07/09 06:21:34 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef TLIST_H +#define TLIST_H + +extern int exec_tlist_length(List *targelist); +extern TargetEntry *tlistentry_member(Var *var, List *targetlist); +extern Expr *matching_tlvar(Var *var, List *targetlist); +extern void add_tl_element(Rel *rel, Var *var); +extern TargetEntry *create_tl_element(Var *var, int resdomno); +extern List *get_actual_tlist(List *tlist); +extern Resdom *tlist_member(Var *var, List *tlist); +extern Resdom *tlist_resdom(List *tlist, Resdom *resnode); + +extern TargetEntry *MakeTLE(Resdom *resdom, Node *expr); +extern Var *get_expr(TargetEntry *tle); + +extern TargetEntry *match_varid(Var *test_var, List *tlist); +extern List *new_unsorted_tlist(List *targetlist); +extern List *copy_vars(List *target, List *source); +extern List *flatten_tlist(List *tlist); +extern List *flatten_tlist_vars(List *full_tlist, + List *flat_tlist); +extern void AddGroupAttrToTlist(List *tlist, List *grpCl); + +#endif /* TLIST_H */ diff --git a/src/backend/optimizer/util/Makefile.inc b/src/backend/optimizer/util/Makefile.inc new file mode 100644 index 00000000000..18955d282c8 --- /dev/null +++ b/src/backend/optimizer/util/Makefile.inc @@ -0,0 +1,15 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for optimizer/util +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ +# +#------------------------------------------------------------------------- + +SUBSRCS+= clauseinfo.c clauses.c indexnode.c internal.c plancat.c \ + joininfo.c keys.c ordering.c pathnode.c relnode.c tlist.c var.c diff --git a/src/backend/optimizer/util/clauseinfo.c b/src/backend/optimizer/util/clauseinfo.c new file mode 100644 index 00000000000..1ab747ee176 --- /dev/null +++ b/src/backend/optimizer/util/clauseinfo.c @@ -0,0 +1,187 @@ +/*------------------------------------------------------------------------- + * + * clauseinfo.c-- + * ClauseInfo node manipulation routines. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/clauseinfo.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/relation.h" +#include "nodes/nodeFuncs.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/clauseinfo.h" + +/* + * valid-or-clause-- + * + * Returns t iff the clauseinfo node contains a 'normal' 'or' clause. + * + */ +bool +valid_or_clause(CInfo *clauseinfo) +{ + if (clauseinfo != NULL && + !single_node((Node*)clauseinfo->clause) && + !clauseinfo->notclause && + or_clause((Node*)clauseinfo->clause)) + return(true); + else + return(false); +} + +/* + * get-actual-clauses-- + * + * Returns a list containing the clauses from 'clauseinfo-list'. + * + */ +List * +get_actual_clauses(List *clauseinfo_list) +{ + List *temp = NIL; + List *result = NIL; + CInfo *clause = (CInfo *)NULL; + + foreach(temp,clauseinfo_list) { + clause = (CInfo *)lfirst(temp); + result = lappend(result,clause->clause); + } + return(result); +} + +/* + * XXX NOTE: + * The following routines must return their contents in the same order + * (e.g., the first clause's info should be first, and so on) or else + * get_index_sel() won't work. + * + */ + +/* + * get_relattvals-- + * For each member of a list of clauseinfo nodes to be used with an + * index, create a vectori-long specifying: + * the attnos, + * the values of the clause constants, and + * flags indicating the type and location of the constant within + * each clause. + * Each clause is of the form (op var some_type_of_constant), thus the + * flag indicating whether the constant is on the left or right should + * always be *SELEC-CONSTANT-RIGHT*. + * + * 'clauseinfo-list' is a list of clauseinfo nodes + * + * Returns a list of vectori-longs. + * + */ +void +get_relattvals(List *clauseinfo_list, + List **attnos, + List **values, + List **flags) +{ + List *result1 = NIL; + List *result2 = NIL; + List *result3 = NIL; + CInfo *temp = (CInfo *)NULL; + List *i = NIL; + + foreach (i,clauseinfo_list) { + int dummy; + AttrNumber attno; + Datum constval; + int flag; + + temp = (CInfo *)lfirst(i); + get_relattval((Node*)temp->clause, &dummy, &attno, &constval, &flag); + result1 = lappendi(result1, attno); + result2 = lappendi(result2, constval); + result3 = lappendi(result3, flag); + } + + *attnos = result1; + *values = result2; + *flags = result3; + return; +} + +/* + * get_joinvars -- + * Given a list of join clauseinfo nodes to be used with the index + * of an inner join relation, return three lists consisting of: + * the attributes corresponding to the inner join relation + * the value of the inner var clause (always "") + * whether the attribute appears on the left or right side of + * the operator. + * + * 'relid' is the inner join relation + * 'clauseinfo-list' is a list of qualification clauses to be used with + * 'rel' + * + */ +void +get_joinvars(Oid relid, + List *clauseinfo_list, + List **attnos, + List **values, + List **flags) +{ + List *result1 = NIL; + List *result2 = NIL; + List *result3 = NIL; + List *temp; + + foreach(temp, clauseinfo_list) { + CInfo *clauseinfo = lfirst(temp); + Expr *clause = clauseinfo->clause; + + if( IsA (get_leftop(clause),Var) && + (relid == (get_leftop(clause))->varno)) { + + result1 = lappendi(result1, (get_leftop(clause))->varattno); + result2 = lappend(result2, ""); + result3 = lappendi(result3, _SELEC_CONSTANT_RIGHT_); + } else { + result1 = lappendi(result1, (get_rightop(clause))->varattno); + result2 = lappend(result2, ""); + result3 = lappendi(result3, _SELEC_CONSTANT_LEFT_); + } + } + *attnos = result1; + *values = result2; + *flags = result3; + return; +} + +/* + * get_opnos-- + * Create and return a list containing the clause operators of each member + * of a list of clauseinfo nodes to be used with an index. + * + */ +List * +get_opnos(List *clauseinfo_list) +{ + CInfo *temp = (CInfo *)NULL; + List *result = NIL; + List *i = NIL; + + foreach(i,clauseinfo_list) { + temp = (CInfo *)lfirst(i); + result = + lappendi(result, + (((Oper*)temp->clause->oper)->opno)); + } + return(result); +} + + diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c new file mode 100644 index 00000000000..daba4d8fdb0 --- /dev/null +++ b/src/backend/optimizer/util/clauses.c @@ -0,0 +1,736 @@ +/*------------------------------------------------------------------------- + * + * clauses.c-- + * routines to manipulate qualification clauses + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/clauses.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Nov 3, 1994 clause.c and clauses.c combined + * + *------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/relation.h" +#include "nodes/parsenodes.h" +#include "nodes/makefuncs.h" +#include "nodes/nodeFuncs.h" + +#include "catalog/pg_aggregate.h" + +#include "utils/elog.h" +#include "utils/syscache.h" +#include "utils/lsyscache.h" + +#include "optimizer/clauses.h" +#include "optimizer/internal.h" +#include "optimizer/var.h" + + +Expr * +make_clause(int type, Node *oper, List *args) +{ + if (type == AND_EXPR || type == OR_EXPR || type == NOT_EXPR || + type == OP_EXPR || type == FUNC_EXPR) { + Expr *expr = makeNode(Expr); + + /* + * assume type checking already done and we don't need the type of + * the expr any more. + */ + expr->typeOid = InvalidOid; + expr->opType = type; + expr->oper = oper; /* ignored for AND, OR, NOT */ + expr->args = args; + return expr; + }else { + /* will this ever happen? translated from lispy C code - ay 10/94 */ + return((Expr*)args); + } +} + + +/***************************************************************************** + * OPERATOR clause functions + *****************************************************************************/ + + +/* + * is_opclause-- + * + * Returns t iff the clause is an operator clause: + * (op expr expr) or (op expr). + * + * [historical note: is_clause has the exact functionality and is used + * throughout the code. They're renamed to is_opclause for clarity. + * - ay 10/94.] + */ +bool +is_opclause(Node *clause) +{ + return + (clause!=NULL && + nodeTag(clause)==T_Expr && ((Expr*)clause)->opType==OP_EXPR); +} + +/* + * make_opclause-- + * Creates a clause given its operator left operand and right + * operand (if it is non-null). + * + */ +Expr * +make_opclause(Oper *op, Var *leftop, Var *rightop) +{ + Expr *expr = makeNode(Expr); + + expr->typeOid = InvalidOid; /* assume type checking done */ + expr->opType = OP_EXPR; + expr->oper = (Node*)op; + expr->args = makeList(leftop, rightop, -1); + return expr; +} + +/* + * get_leftop-- + * + * Returns the left operand of a clause of the form (op expr expr) + * or (op expr) + * NB: it is assumed (for now) that all expr must be Var nodes + */ +Var * +get_leftop(Expr *clause) +{ + if (clause->args!=NULL) + return(lfirst(clause->args)); + else + return NULL; +} + +/* + * get_rightop + * + * Returns the right operand in a clause of the form (op expr expr). + * + */ +Var * +get_rightop(Expr *clause) +{ + if (clause->args!=NULL && lnext(clause->args)!=NULL) + return (lfirst(lnext(clause->args))); + else + return NULL; +} + +/***************************************************************************** + * AGG clause functions + *****************************************************************************/ + +bool +agg_clause(Node *clause) +{ + return + (clause!=NULL && nodeTag(clause)==T_Aggreg); +} + +/***************************************************************************** + * FUNC clause functions + *****************************************************************************/ + +/* + * is_funcclause-- + * + * Returns t iff the clause is a function clause: (func { expr }). + * + */ +bool +is_funcclause(Node *clause) +{ + return + (clause!=NULL && + nodeTag(clause)==T_Expr && ((Expr*)clause)->opType==FUNC_EXPR); +} + +/* + * make_funcclause-- + * + * Creates a function clause given the FUNC node and the functional + * arguments. + * + */ +Expr * +make_funcclause(Func *func, List *funcargs) +{ + Expr *expr = makeNode(Expr); + + expr->typeOid = InvalidOid; /* assume type checking done */ + expr->opType = FUNC_EXPR; + expr->oper = (Node*)func; + expr->args = funcargs; + return expr; +} + +/***************************************************************************** + * OR clause functions + *****************************************************************************/ + +/* + * or_clause-- + * + * Returns t iff the clause is an 'or' clause: (OR { expr }). + * + */ +bool +or_clause(Node *clause) +{ + return + (clause!=NULL && + nodeTag(clause)==T_Expr && ((Expr*)clause)->opType==OR_EXPR); +} + +/* + * make_orclause-- + * + * Creates an 'or' clause given a list of its subclauses. + * + */ +Expr * +make_orclause(List *orclauses) +{ + Expr *expr = makeNode(Expr); + + expr->typeOid = InvalidOid; /* assume type checking done */ + expr->opType = OR_EXPR; + expr->oper = NULL; + expr->args = orclauses; + return expr; +} + +/***************************************************************************** + * NOT clause functions + *****************************************************************************/ + +/* + * not_clause-- + * + * Returns t iff this is a 'not' clause: (NOT expr). + * + */ +bool +not_clause(Node *clause) +{ + return + (clause!=NULL && + nodeTag(clause)==T_Expr && ((Expr*)clause)->opType == NOT_EXPR); +} + +/* + * make_notclause-- + * + * Create a 'not' clause given the expression to be negated. + * + */ +Expr * +make_notclause(Expr *notclause) +{ + Expr *expr = makeNode(Expr); + + expr->typeOid = InvalidOid; /* assume type checking done */ + expr->opType = NOT_EXPR; + expr->oper = NULL; + expr->args = lcons(notclause, NIL); + return expr; +} + +/* + * get_notclausearg-- + * + * Retrieve the clause within a 'not' clause + * + */ +Expr * +get_notclausearg(Expr *notclause) +{ + return(lfirst(notclause->args)); +} + +/***************************************************************************** + * AND clause functions + *****************************************************************************/ + + +/* + * and_clause-- + * + * Returns t iff its argument is an 'and' clause: (AND { expr }). + * + */ +bool +and_clause(Node *clause) +{ + return + (clause!=NULL && + nodeTag(clause)==T_Expr && ((Expr*)clause)->opType == AND_EXPR); +} +/* + * make_andclause-- + * + * Create an 'and' clause given its arguments in a list. + * + */ +Expr * +make_andclause(List *andclauses) +{ + Expr *expr = makeNode(Expr); + + expr->typeOid = InvalidOid; /* assume type checking done */ + expr->opType = AND_EXPR; + expr->oper = NULL; + expr->args = andclauses; + return expr; +} + +/***************************************************************************** + * * + * * + * * + *****************************************************************************/ + + +/* + * pull-constant-clauses-- + * Scans through a list of qualifications and find those that + * contain no variables. + * + * Returns a list of the constant clauses in constantQual and the remaining + * quals as the return value. + * + */ +List * +pull_constant_clauses(List *quals, List **constantQual) +{ + List *q; + List *constqual=NIL; + List *restqual=NIL; + + foreach(q, quals) { + if (!contain_var_clause(lfirst(q))) { + constqual = lcons(lfirst(q), constqual); + }else { + restqual = lcons(lfirst(q), restqual); + } + } + freeList(quals); + *constantQual = constqual; + return restqual; +} + +/* + * clause-relids-vars-- + * Retrieves relids and vars appearing within a clause. + * Returns ((relid1 relid2 ... relidn) (var1 var2 ... varm)) where + * vars appear in the clause this is done by recursively searching + * through the left and right operands of a clause. + * + * Returns the list of relids and vars. + * + * XXX take the nreverse's out later + * + */ +void +clause_relids_vars(Node *clause, List **relids, List **vars) +{ + List *clvars = pull_var_clause(clause); + List *var_list = NIL; + List *varno_list = NIL; + List *i = NIL; + + foreach (i, clvars) { + Var *var = (Var *)lfirst(i); + + if (!intMember(var->varno, varno_list)) { + varno_list = lappendi(varno_list, var->varno); + var_list = lappend(var_list, var); + } + } + + *relids = varno_list; + *vars = var_list; + return; +} + +/* + * NumRelids-- + * (formerly clause-relids) + * + * Returns the number of different relations referenced in 'clause'. + */ +int +NumRelids(Node *clause) +{ + List *vars = pull_var_clause(clause); + List *i = NIL; + List *var_list = NIL; + + foreach (i, vars) { + Var *var = (Var *)lfirst(i); + + if (!intMember(var->varno, var_list)) { + var_list = lconsi(var->varno, var_list); + } + } + + return(length(var_list)); +} + +/* + * contains-not-- + * + * Returns t iff the clause is a 'not' clause or if any of the + * subclauses within an 'or' clause contain 'not's. + * + */ +bool +contains_not(Node *clause) +{ + if (single_node(clause)) + return (false); + + if (not_clause(clause)) + return (true); + + if (or_clause(clause)) { + List *a; + foreach(a, ((Expr*)clause)->args) { + if (contains_not(lfirst(a))) + return (true); + } + } + + return(false); +} + +/* + * join-clause-p-- + * + * Returns t iff 'clause' is a valid join clause. + * + */ +bool +join_clause_p(Node *clause) +{ + Node *leftop, *rightop; + + if (!is_opclause(clause)) + return false; + + leftop = (Node*)get_leftop((Expr*)clause); + rightop = (Node*)get_rightop((Expr*)clause); + + /* + * One side of the clause (i.e. left or right operands) + * must either be a var node ... + */ + if (IsA(leftop,Var) || IsA(rightop,Var)) + return true; + + /* + * ... or a func node. + */ + if (is_funcclause(leftop) || is_funcclause(rightop)) + return(true); + + return(false); +} + +/* + * qual-clause-p-- + * + * Returns t iff 'clause' is a valid qualification clause. + * + */ +bool +qual_clause_p(Node *clause) +{ + if (!is_opclause(clause)) + return false; + + if (IsA (get_leftop((Expr*)clause),Var) && + IsA (get_rightop((Expr*)clause),Const)) + { + return(true); + } + else if (IsA (get_rightop((Expr*)clause),Var) && + IsA (get_leftop((Expr*)clause),Const)) + { + return(true); + } + return(false); +} + +/* + * fix-opid-- + * Calculate the opfid from the opno... + * + * Returns nothing. + * + */ +void +fix_opid(Node *clause) +{ + if (clause==NULL || single_node(clause)) { + ; + } + else if (or_clause (clause)) { + fix_opids(((Expr*)clause)->args); + } + else if (is_funcclause (clause)) { + fix_opids(((Expr*)clause)->args); + } + else if (IsA(clause,ArrayRef)) { + ArrayRef *aref = (ArrayRef *)clause; + + fix_opids(aref->refupperindexpr); + fix_opids(aref->reflowerindexpr); + fix_opid(aref->refexpr); + fix_opid(aref->refassgnexpr); + } + else if (not_clause(clause)) { + fix_opid((Node*)get_notclausearg((Expr*)clause)); + } + else if (is_opclause (clause)) { + replace_opid((Oper*)((Expr*)clause)->oper); + fix_opid((Node*)get_leftop((Expr*)clause)); + fix_opid((Node*)get_rightop((Expr*)clause)); + } + +} + +/* + * fix-opids-- + * Calculate the opfid from the opno for all the clauses... + * + * Returns its argument. + * + */ +List * +fix_opids(List *clauses) +{ + List *clause; + + foreach(clause, clauses) + fix_opid(lfirst(clause)); + + return(clauses); +} + +/* + * get_relattval-- + * For a non-join clause, returns a list consisting of the + * relid, + * attno, + * value of the CONST node (if any), and a + * flag indicating whether the value appears on the left or right + * of the operator and whether the value varied. + * + * OLD OBSOLETE COMMENT FOLLOWS: + * If 'clause' is not of the format (op var node) or (op node var), + * or if the var refers to a nested attribute, then -1's are returned for + * everything but the value a blank string "" (pointer to \0) is + * returned for the value if it is unknown or null. + * END OF OLD OBSOLETE COMMENT. + * NEW COMMENT: + * when defining rules one of the attibutes of the operator can + * be a Param node (which is supposed to be treated as a constant). + * However as there is no value specified for a parameter until run time + * this routine used to return "" as value, which made 'compute_selec' + * to bomb (because it was expecting a lisp integer and got back a lisp + * string). Now the code returns a plain old good "lispInteger(0)". + * + */ +void +get_relattval(Node *clause, + int *relid, + AttrNumber *attno, + Datum *constval, + int *flag) +{ + Var *left = get_leftop((Expr*)clause); + Var *right = get_rightop((Expr*)clause); + + if(is_opclause(clause) && IsA(left,Var) && + IsA(right,Const)) { + + if(right!=NULL) { + + *relid = left->varno; + *attno = left->varattno; + *constval = ((Const *)right)->constvalue; + *flag = (_SELEC_CONSTANT_RIGHT_ | _SELEC_IS_CONSTANT_); + + } else { + + *relid = left->varno; + *attno = left->varattno; + *constval = 0; + *flag = (_SELEC_CONSTANT_RIGHT_ | _SELEC_NOT_CONSTANT_); + + } + }else if (is_opclause(clause) && + is_funcclause((Node*)left) && + IsA(right,Const)) { + List *args = ((Expr*)left)->args; + + + *relid = ((Var*)lfirst(args))->varno; + *attno = InvalidAttrNumber; + *constval = ((Const*)right)->constvalue; + *flag = (_SELEC_CONSTANT_RIGHT_ | _SELEC_IS_CONSTANT_); + + /* + * XXX both of these func clause handling if's seem wrong to me. + * they assume that the first argument is the Var. It could + * not handle (for example) f(1, emp.name). I think I may have + * been assuming no constants in functional index scans when I + * implemented this originally (still currently true). + * -mer 10 Aug 1992 + */ + } else if (is_opclause(clause) && + is_funcclause((Node*)right) && + IsA(left,Const)) { + List *args = ((Expr*)right)->args; + + *relid = ((Var*)lfirst(args))->varno; + *attno = InvalidAttrNumber; + *constval = ((Const*)left)->constvalue; + *flag = ( _SELEC_IS_CONSTANT_); + + } else if (is_opclause (clause) && IsA (right,Var) && + IsA (left,Const)) { + if (left!=NULL) { + + *relid = right->varno; + *attno = right->varattno; + *constval = ((Const*)left)->constvalue; + *flag = (_SELEC_IS_CONSTANT_); + } else { + + *relid = right->varno; + *attno = right->varattno; + *constval = 0; + *flag = (_SELEC_NOT_CONSTANT_); + } + } else { + /* One or more of the operands are expressions + * (e.g., oper clauses) + */ + *relid = _SELEC_VALUE_UNKNOWN_; + *attno = _SELEC_VALUE_UNKNOWN_; + *constval = 0; + *flag = (_SELEC_NOT_CONSTANT_); + } +} + +/* + * get_relsatts-- + * + * Returns a list + * ( relid1 attno1 relid2 attno2 ) + * for a joinclause. + * + * If the clause is not of the form (op var var) or if any of the vars + * refer to nested attributes, then -1's are returned. + * + */ +void +get_rels_atts(Node *clause, + int *relid1, + AttrNumber *attno1, + int *relid2, + AttrNumber *attno2) +{ + Var *left = get_leftop((Expr*)clause); + Var *right = get_rightop((Expr*)clause); + bool var_left = (IsA(left,Var)); + bool var_right = (IsA(right,Var)); + bool varexpr_left = (bool)((IsA(left,Func) || IsA (left,Oper)) && + contain_var_clause((Node*)left)); + bool varexpr_right = (bool)(( IsA(right,Func) || IsA (right,Oper)) && + contain_var_clause((Node*)right)); + + if (is_opclause(clause)) { + if(var_left && var_right) { + + *relid1 = left->varno; + *attno1 = left->varoattno; + *relid2 = right->varno; + *attno2 = right->varoattno; + return; + } else if (var_left && varexpr_right ) { + + *relid1 = left->varno; + *attno1 = left->varoattno; + *relid2 = _SELEC_VALUE_UNKNOWN_; + *attno2 = _SELEC_VALUE_UNKNOWN_; + return; + } else if (varexpr_left && var_right) { + + *relid1 = _SELEC_VALUE_UNKNOWN_; + *attno1 = _SELEC_VALUE_UNKNOWN_; + *relid2 = right->varno; + *attno2 = right->varoattno; + return; + } + } + + *relid1 = _SELEC_VALUE_UNKNOWN_; + *attno1 = _SELEC_VALUE_UNKNOWN_; + *relid2 = _SELEC_VALUE_UNKNOWN_; + *attno2 = _SELEC_VALUE_UNKNOWN_; + return; +} + +void +CommuteClause(Node *clause) +{ + Node *temp; + Oper *commu; + OperatorTupleForm commuTup; + HeapTuple heapTup; + + if (!is_opclause(clause)) + return; + + heapTup = (HeapTuple) + get_operator_tuple(get_commutator(((Oper*)((Expr*)clause)->oper)->opno)); + + if (heapTup == (HeapTuple)NULL) + return; + + commuTup = (OperatorTupleForm)GETSTRUCT(heapTup); + + commu = makeOper(heapTup->t_oid, + InvalidOid, + commuTup->oprresult, + ((Oper*)((Expr*)clause)->oper)->opsize, + NULL); + + /* + * reform the clause -> (operator func/var constant) + */ + ((Expr*)clause)->oper = (Node*)commu; + temp = lfirst(((Expr*)clause)->args); + lfirst(((Expr*)clause)->args) = lsecond(((Expr*)clause)->args); + lsecond(((Expr*)clause)->args) = temp; +} + + diff --git a/src/backend/optimizer/util/indexnode.c b/src/backend/optimizer/util/indexnode.c new file mode 100644 index 00000000000..7fd74889202 --- /dev/null +++ b/src/backend/optimizer/util/indexnode.c @@ -0,0 +1,92 @@ +/*------------------------------------------------------------------------- + * + * indexnode.c-- + * Routines to find all indices on a relation + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/indexnode.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/plannodes.h" +#include "nodes/parsenodes.h" +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/plancat.h" +#include "optimizer/pathnode.h" /* where the decls go */ + + +static List *find_secondary_index(Query *root, Oid relid); + +/* + * find-relation-indices-- + * Returns a list of index nodes containing appropriate information for + * each (secondary) index defined on a relation. + * + */ +List * +find_relation_indices(Query *root, Rel *rel) +{ + if (rel->indexed) { + return (find_secondary_index(root, lfirsti(rel->relids))); + } else { + return (NIL); + } +} + +/* + * find-secondary-index-- + * Creates a list of index path nodes containing information for each + * secondary index defined on a relation by searching through the index + * catalog. + * + * 'relid' is the OID of the relation for which indices are being located + * + * Returns a list of new index nodes. + * + */ +static List * +find_secondary_index(Query *root, Oid relid) +{ + IdxInfoRetval indexinfo; + List *indexes = NIL; + bool first = TRUE; + + while (index_info(root, first, relid,&indexinfo)) { + Rel *indexnode = makeNode(Rel); + + indexnode->relids = lconsi(indexinfo.relid,NIL); + indexnode->relam = indexinfo.relam; + indexnode->pages = indexinfo.pages; + indexnode->tuples = indexinfo.tuples; + indexnode->indexkeys = indexinfo.indexkeys; + indexnode->ordering = indexinfo.orderOprs; + indexnode->classlist = indexinfo.classlist; + indexnode->indproc= indexinfo.indproc; + indexnode->indpred = (List*)indexinfo.indpred; + + indexnode->indexed= false; /* not indexed itself */ + indexnode->size = 0; + indexnode->width= 0; + indexnode->targetlist= NIL; + indexnode->pathlist= NIL; + indexnode->unorderedpath= NULL; + indexnode->cheapestpath= NULL; + indexnode->pruneable= true; + indexnode->clauseinfo= NIL; + indexnode->joininfo= NIL; + indexnode->innerjoin= NIL; + + indexes = lcons(indexnode, indexes); + first = FALSE; + } + + return indexes; +} + diff --git a/src/backend/optimizer/util/internal.c b/src/backend/optimizer/util/internal.c new file mode 100644 index 00000000000..1db22f2b949 --- /dev/null +++ b/src/backend/optimizer/util/internal.c @@ -0,0 +1,61 @@ +/*------------------------------------------------------------------------- + * + * internal.c-- + * Definitions required throughout the query optimizer. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/internal.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +/* + * ---------- SHARED MACROS + * + * Macros common to modules for creating, accessing, and modifying + * query tree and query plan components. + * Shared with the executor. + * + */ + + +#include "optimizer/internal.h" + +#include "nodes/relation.h" +#include "nodes/plannodes.h" +#include "nodes/primnodes.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +#if 0 +/***************************************************************************** + * + *****************************************************************************/ + +/* the following should probably be moved elsewhere -ay */ + +TargetEntry * +MakeTLE(Resdom *resdom, Node *expr) +{ + TargetEntry *rt = makeNode(TargetEntry); + rt->resdom = resdom; + rt->expr = expr; + return rt; +} + +Var * +get_expr(TargetEntry *tle) +{ + Assert(tle!=NULL); + Assert(tle->expr!=NULL); + + return ((Var *)tle->expr); +} + +#endif /* 0 */ + + + diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c new file mode 100644 index 00000000000..85416db8b33 --- /dev/null +++ b/src/backend/optimizer/util/joininfo.c @@ -0,0 +1,107 @@ +/*------------------------------------------------------------------------- + * + * joininfo.c-- + * JoinInfo node manipulation routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/joininfo.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/var.h" +#include "optimizer/clauses.h" + + +/* + * joininfo-member-- + * Determines whether a node has already been created for a join + * between a set of join relations and the relation described by + * 'joininfo-list'. + * + * 'join-relids' is a list of relids corresponding to the join relation + * 'joininfo-list' is the list of joininfo nodes against which this is + * checked + * + * Returns the corresponding node in 'joininfo-list' if such a node + * exists. + * + */ +JInfo * +joininfo_member(List *join_relids, List *joininfo_list) +{ + List *i = NIL; + List *other_rels = NIL; + + foreach(i,joininfo_list) { + other_rels = lfirst(i); + if(same(join_relids, ((JInfo*)other_rels)->otherrels)) + return((JInfo*)other_rels); + } + return((JInfo*)NULL); +} + + +/* + * find-joininfo-node-- + * Find the joininfo node within a relation entry corresponding + * to a join between 'this_rel' and the relations in 'join-relids'. A + * new node is created and added to the relation entry's joininfo + * field if the desired one can't be found. + * + * Returns a joininfo node. + * + */ +JInfo * +find_joininfo_node(Rel *this_rel, List *join_relids) +{ + JInfo *joininfo = joininfo_member(join_relids, + this_rel->joininfo); + if( joininfo == NULL ) { + joininfo = makeNode(JInfo); + joininfo->otherrels = join_relids; + joininfo->jinfoclauseinfo = NIL; + joininfo->mergesortable = false; + joininfo->hashjoinable = false; + joininfo->inactive = false; + this_rel->joininfo = lcons(joininfo, this_rel->joininfo); + } + return(joininfo); +} + +/* + * other-join-clause-var-- + * Determines whether a var node is contained within a joinclause + * of the form(op var var). + * + * Returns the other var node in the joinclause if it is, nil if not. + * + */ +Var * +other_join_clause_var(Var *var, Expr *clause) +{ + Var *retval; + Var *l, *r; + + retval = (Var*) NULL; + + if( var != NULL && join_clause_p((Node*)clause)) { + l = (Var *) get_leftop(clause); + r = (Var *) get_rightop(clause); + + if(var_equal(var, l)) { + retval = r; + } else if(var_equal(var, r)) { + retval = l; + } + } + + return(retval); +} diff --git a/src/backend/optimizer/util/keys.c b/src/backend/optimizer/util/keys.c new file mode 100644 index 00000000000..ac0915b9096 --- /dev/null +++ b/src/backend/optimizer/util/keys.c @@ -0,0 +1,193 @@ +/*------------------------------------------------------------------------- + * + * keys.c-- + * Key manipulation routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/keys.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" +#include "nodes/pg_list.h" +#include "nodes/nodes.h" +#include "nodes/relation.h" +#include "utils/elog.h" + +#include "optimizer/internal.h" +#include "optimizer/keys.h" +#include "optimizer/tlist.h" + + +static Expr *matching2_tlvar(int var, List *tlist, bool (*test)()); + +/* + * 1. index key + * one of: + * attnum + * (attnum arrayindex) + * 2. path key + * (subkey1 ... subkeyN) + * where subkeyI is a var node + * note that the 'Keys field is a list of these + * 3. join key + * (outer-subkey inner-subkey) + * where each subkey is a var node + * 4. sort key + * one of: + * SortKey node + * number + * nil + * (may also refer to the 'SortKey field of a SortKey node, + * which looks exactly like an index key) + * + */ + +/* + * match-indexkey-operand-- + * Returns t iff an index key 'index-key' matches the given clause + * operand. + * + */ +bool +match_indexkey_operand(int indexkey, Var *operand, Rel *rel) +{ + if (IsA (operand,Var) && + (lfirsti(rel->relids) == operand->varno) && + equal_indexkey_var(indexkey,operand)) + return(true); + else + return(false); +} + +/* + * equal_indexkey_var-- + * Returns t iff an index key 'index-key' matches the corresponding + * fields of var node 'var'. + * + */ +bool +equal_indexkey_var(int index_key, Var *var) +{ + if (index_key == var->varattno) + return(true); + else + return(false); +} + +/* + * extract-subkey-- + * Returns the subkey in a join key corresponding to the outer or inner + * lelation. + * + */ +Var * +extract_subkey(JoinKey *jk, int which_subkey) +{ + Var *retval; + + switch (which_subkey) { + case OUTER: + retval = jk->outer; + break; + case INNER: + retval = jk->inner; + break; + default: /* do nothing */ + elog(DEBUG,"extract_subkey with neither INNER or OUTER"); + retval = NULL; + } + return(retval); +} + +/* + * samekeys-- + * Returns t iff two sets of path keys are equivalent. They are + * equivalent if the first subkey (var node) within each sublist of + * list 'keys1' is contained within the corresponding sublist of 'keys2'. + * + * XXX It isn't necessary to check that each sublist exactly contain + * the same elements because if the routine that built these + * sublists together is correct, having one element in common + * implies having all elements in common. + * + */ +bool +samekeys(List *keys1, List *keys2) +{ + bool allmember = true; + List *key1, *key2; + + for(key1=keys1,key2=keys2 ; key1 != NIL && key2 !=NIL ; + key1=lnext(key1), key2=lnext(key2)) + if (!member(lfirst(key1), lfirst(key2))) + allmember = false; + + if ( (length (keys2) >= length (keys1)) && allmember) + return(true); + else + return(false); +} + +/* + * collect-index-pathkeys-- + * Creates a list of subkeys by retrieving var nodes corresponding to + * each index key in 'index-keys' from the relation's target list + * 'tlist'. If the key is not in the target list, the key is irrelevant + * and is thrown away. The returned subkey list is of the form: + * ((var1) (var2) ... (varn)) + * + * 'index-keys' is a list of index keys + * 'tlist' is a relation target list + * + * Returns the list of cons'd subkeys. + * + */ +/* This function is identical to matching_tlvar and tlistentry_member. + * They should be merged. + */ +static Expr * +matching2_tlvar(int var, List *tlist, bool (*test)()) +{ + TargetEntry *tlentry = NULL; + + if (var) { + List *temp; + foreach (temp,tlist) { + if ((*test)(var, get_expr(lfirst(temp)))) { + tlentry = lfirst(temp); + break; + } + } + } + + if (tlentry) + return((Expr*)get_expr(tlentry)); + else + return((Expr*)NULL); +} + + +List * +collect_index_pathkeys(int *index_keys, List *tlist) +{ + List *retval = NIL; + + Assert (index_keys != NULL); + + while(index_keys[0] != 0) { + Expr *mvar; + mvar = matching2_tlvar(index_keys[0], + tlist, + equal_indexkey_var); + if (mvar) + retval = nconc(retval,lcons(lcons(mvar,NIL), + NIL)); + index_keys++; + } + return(retval); +} + diff --git a/src/backend/optimizer/util/ordering.c b/src/backend/optimizer/util/ordering.c new file mode 100644 index 00000000000..3dffbff9f3c --- /dev/null +++ b/src/backend/optimizer/util/ordering.c @@ -0,0 +1,117 @@ +/*------------------------------------------------------------------------- + * + * ordering.c-- + * Routines to manipulate and compare merge and path orderings + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/Attic/ordering.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#include "optimizer/internal.h" +#include "optimizer/ordering.h" + + +/* + * equal-path-path-ordering-- + * Returns t iff two path orderings are equal. + * + */ +bool +equal_path_path_ordering(PathOrder *path_ordering1, + PathOrder *path_ordering2) +{ + if (path_ordering1 == path_ordering2) + return true; + + if (!path_ordering1 || !path_ordering2) + return false; + + if (path_ordering1->ordtype == MERGE_ORDER && + path_ordering2->ordtype == MERGE_ORDER) { + + return equal(path_ordering1->ord.merge, path_ordering2->ord.merge); + + } else if (path_ordering1->ordtype == SORTOP_ORDER && + path_ordering2->ordtype == SORTOP_ORDER) { + + return + (equal_sortops_order(path_ordering1->ord.sortop, + path_ordering2->ord.sortop)); + } else if (path_ordering1->ordtype == MERGE_ORDER && + path_ordering2->ordtype == SORTOP_ORDER) { + + return (path_ordering2->ord.sortop && + (path_ordering1->ord.merge->left_operator == + path_ordering2->ord.sortop[0])); + } else { + + return (path_ordering1->ord.sortop && + (path_ordering1->ord.sortop[0] == + path_ordering2->ord.merge->left_operator)); + } +} + +/* + * equal-path-merge-ordering-- + * Returns t iff a path ordering is usable for ordering a merge join. + * + * XXX Presently, this means that the first sortop of the path matches + * either of the merge sortops. Is there a "right" and "wrong" + * sortop to match? + * + */ +bool +equal_path_merge_ordering(Oid *path_ordering, + MergeOrder *merge_ordering) +{ + if (path_ordering == NULL || merge_ordering == NULL) + return(false); + + if (path_ordering[0] == merge_ordering->left_operator || + path_ordering[0] == merge_ordering->right_operator) + return(true); + else + return(false); +} + +/* + * equal-merge-merge-ordering-- + * Returns t iff two merge orderings are equal. + * + */ +bool +equal_merge_merge_ordering(MergeOrder *merge_ordering1, + MergeOrder *merge_ordering2) +{ + return (equal(merge_ordering1, merge_ordering2)); +} + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * equal_sort_ops_order - + * Returns true iff the sort operators are in the same order. + */ +bool +equal_sortops_order(Oid *ordering1, Oid *ordering2) +{ + int i = 0; + + if (ordering1 == NULL || ordering2 == NULL) + return (ordering1==ordering2); + + while (ordering1[i]!=0 && ordering2[i]!=0) { + if (ordering1[i] != ordering2[i]) + break; + i++; + } + + return (ordering1[i]==0 && ordering2[i]==0); +} diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c new file mode 100644 index 00000000000..728ac9b422e --- /dev/null +++ b/src/backend/optimizer/util/pathnode.c @@ -0,0 +1,566 @@ +/*------------------------------------------------------------------------- + * + * pathnode.c-- + * Routines to manipulate pathlists and create path nodes + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/pathnode.c,v 1.1.1.1 1996/07/09 06:21:38 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +#include "postgres.h" + +#include "nodes/relation.h" +#include "utils/elog.h" + +#include "optimizer/internal.h" +#include "optimizer/pathnode.h" +#include "optimizer/clauseinfo.h" +#include "optimizer/plancat.h" +#include "optimizer/cost.h" +#include "optimizer/keys.h" +#include "optimizer/xfunc.h" +#include "optimizer/ordering.h" + +#include "parser/parsetree.h" /* for getrelid() */ + +static Path *better_path(Path *new_path, List *unique_paths, bool *noOther); + + +/***************************************************************************** + * MISC. PATH UTILITIES + *****************************************************************************/ + +/* + * path-is-cheaper-- + * Returns t iff 'path1' is cheaper than 'path2'. + * + */ +bool +path_is_cheaper(Path *path1, Path *path2) +{ + Cost cost1 = path1->path_cost; + Cost cost2 = path2->path_cost; + + return((bool)(cost1 < cost2)); +} + +/* + * set_cheapest-- + * Finds the minimum cost path from among a relation's paths. + * + * 'parent-rel' is the parent relation + * 'pathlist' is a list of path nodes corresponding to 'parent-rel' + * + * Returns and sets the relation entry field with the pathnode that + * is minimum. + * + */ +Path * +set_cheapest(Rel *parent_rel, List *pathlist) +{ + List *p; + Path *cheapest_so_far; + + Assert(pathlist!=NIL); + Assert(IsA(parent_rel,Rel)); + + cheapest_so_far = (Path*)lfirst(pathlist); + + foreach (p, lnext(pathlist)) { + Path *path = (Path*)lfirst(p); + + if (path_is_cheaper(path, cheapest_so_far)) { + cheapest_so_far = path; + } + } + + parent_rel->cheapestpath = cheapest_so_far; + + return(cheapest_so_far); +} + +/* + * add_pathlist-- + * For each path in the list 'new-paths', add to the list 'unique-paths' + * only those paths that are unique (i.e., unique ordering and ordering + * keys). Should a conflict arise, the more expensive path is thrown out, + * thereby pruning the plan space. But we don't prune if xfunc + * told us not to. + * + * 'parent-rel' is the relation entry to which these paths correspond. + * + * Returns the list of unique pathnodes. + * + */ +List * +add_pathlist(Rel *parent_rel, List *unique_paths, List *new_paths) +{ + List *x; + Path *new_path; + Path *old_path; + bool noOther; + + foreach (x, new_paths) { + new_path = (Path*)lfirst(x); + if (member(new_path, unique_paths)) + continue; + old_path = better_path(new_path,unique_paths,&noOther); + + if (noOther) { + /* Is a brand new path. */ + new_path->parent = parent_rel; + unique_paths = lcons(new_path, unique_paths); + } else if (old_path==NULL) { + ; /* do nothing if path is not cheaper */ + } else if (old_path != NULL) { /* (IsA(old_path,Path)) { */ + new_path->parent = parent_rel; + if (!parent_rel->pruneable) { + unique_paths = lcons(new_path, unique_paths); + }else + unique_paths = lcons(new_path, + LispRemove(old_path,unique_paths)); + } + } + return(unique_paths); +} + +/* + * better_path-- + * Determines whether 'new-path' has the same ordering and keys as some + * path in the list 'unique-paths'. If there is a redundant path, + * eliminate the more expensive path. + * + * Returns: + * The old path - if 'new-path' matches some path in 'unique-paths' and is + * cheaper + * nil - if 'new-path' matches but isn't cheaper + * t - if there is no path in the list with the same ordering and keys + * + */ +static Path * +better_path(Path *new_path, List *unique_paths, bool *noOther) +{ + Path *old_path = (Path*)NULL; + Path *path = (Path*)NULL; + List *temp = NIL; + Path *retval = NULL; + + /* XXX - added the following two lines which weren't int + * the lisp planner, but otherwise, doesn't seem to work + * for the case where new_path is 'nil + */ + foreach (temp,unique_paths) { + path = (Path*) lfirst(temp); + + if ((equal_path_path_ordering(&new_path->p_ordering, + &path->p_ordering) && + samekeys(new_path->keys, path->keys))) { + old_path = path; + break; + } + } + + if (old_path==NULL) { + *noOther = true; + } else { + *noOther = false; + if (path_is_cheaper(new_path,old_path)) { + retval = old_path; + } + } + + return(retval); +} + + + +/***************************************************************************** + * PATH NODE CREATION ROUTINES + *****************************************************************************/ + +/* + * create_seqscan_path-- + * Creates a path corresponding to a sequential scan, returning the + * pathnode. + * + */ +Path * +create_seqscan_path(Rel *rel) +{ + int relid=0; + + Path *pathnode = makeNode(Path); + + pathnode->pathtype = T_SeqScan; + pathnode->parent = rel; + pathnode->path_cost = 0.0; + pathnode->p_ordering.ordtype = SORTOP_ORDER; + pathnode->p_ordering.ord.sortop = NULL; + pathnode->keys = NIL; + /* copy clauseinfo list into path for expensive function processing + * -- JMH, 7/7/92 + */ + pathnode->locclauseinfo= + (List*)copyObject((Node*)rel->clauseinfo); + + if (rel->relids !=NULL) + relid = lfirsti(rel->relids); + + pathnode->path_cost = cost_seqscan (relid, + rel->pages, rel->tuples); + /* add in expensive functions cost! -- JMH, 7/7/92 */ +#if 0 + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost += + xfunc_get_path_cost(pathnode)); + } +#endif + return (pathnode); +} + +/* + * create_index_path-- + * Creates a single path node for an index scan. + * + * 'rel' is the parent rel + * 'index' is the pathnode for the index on 'rel' + * 'restriction-clauses' is a list of restriction clause nodes. + * 'is-join-scan' is a flag indicating whether or not the index is being + * considered because of its sort order. + * + * Returns the new path node. + * + */ +IndexPath * +create_index_path(Query *root, + Rel *rel, + Rel *index, + List *restriction_clauses, + bool is_join_scan) +{ + IndexPath *pathnode = makeNode(IndexPath); + + pathnode->path.pathtype = T_IndexScan; + pathnode->path.parent = rel; + pathnode->indexid = index->relids; + + pathnode->path.p_ordering.ordtype = SORTOP_ORDER; + pathnode->path.p_ordering.ord.sortop = index->ordering; + pathnode->indexqual = NIL; + + /* copy clauseinfo list into path for expensive function processing + * -- JMH, 7/7/92 + */ + pathnode->path.locclauseinfo = + set_difference((List*) copyObject((Node*)rel->clauseinfo), + (List*) restriction_clauses); + + /* + * The index must have an ordering for the path to have (ordering) keys, + * and vice versa. + */ + if (pathnode->path.p_ordering.ord.sortop) { + pathnode->path.keys = collect_index_pathkeys(index->indexkeys, + rel->targetlist); + /* + * Check that the keys haven't 'disappeared', since they may + * no longer be in the target list (i.e., index keys that are not + * relevant to the scan are not applied to the scan path node, + * so if no index keys were found, we can't order the path). + */ + if (pathnode->path.keys==NULL) { + pathnode->path.p_ordering.ord.sortop = NULL; + } + } else { + pathnode->path.keys = NULL; + } + + if (is_join_scan || restriction_clauses==NULL) { + /* + * Indices used for joins or sorting result nodes don't + * restrict the result at all, they simply order it, + * so compute the scan cost + * accordingly -- use a selectivity of 1.0. + */ +/* is the statement above really true? what about IndexScan as the + inner of a join? */ + pathnode->path.path_cost = + cost_index (lfirsti(index->relids), + index->pages, + 1.0, + rel->pages, + rel->tuples, + index->pages, + index->tuples, + false); + /* add in expensive functions cost! -- JMH, 7/7/92 */ +#if 0 + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost = + (pathnode->path_cost + + xfunc_get_path_cost((Path*)pathnode)); + } +#endif + } else { + /* + * Compute scan cost for the case when 'index' is used with a + * restriction clause. + */ + List *attnos; + List *values; + List *flags; + float npages; + float selec; + Cost clausesel; + + get_relattvals(restriction_clauses, + &attnos, + &values, + &flags); + index_selectivity(lfirsti(index->relids), + index->classlist, + get_opnos(restriction_clauses), + getrelid(lfirsti(rel->relids), + root->rtable), + attnos, + values, + flags, + length(restriction_clauses), + &npages, + &selec); + /* each clause gets an equal selectivity */ + clausesel = + pow(selec, + 1.0 / (double) length(restriction_clauses)); + + pathnode->indexqual = restriction_clauses; + pathnode->path.path_cost = + cost_index (lfirsti(index->relids), + (int)npages, + selec, + rel->pages, + rel->tuples, + index->pages, + index->tuples, + false); + +#if 0 + /* add in expensive functions cost! -- JMH, 7/7/92 */ + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost += + xfunc_get_path_cost((Path*)pathnode); + } +#endif + /* Set selectivities of clauses used with index to the selectivity + * of this index, subdividing the selectivity equally over each of + * the clauses. + */ + + /* XXX Can this divide the selectivities in a better way? */ + set_clause_selectivities(restriction_clauses, clausesel); + } + return(pathnode); +} + +/* + * create_nestloop_path-- + * Creates a pathnode corresponding to a nestloop join between two + * relations. + * + * 'joinrel' is the join relation. + * 'outer_rel' is the outer join relation + * 'outer_path' is the outer join path. + * 'inner_path' is the inner join path. + * 'keys' are the keys of the path + * + * Returns the resulting path node. + * + */ +JoinPath * +create_nestloop_path(Rel *joinrel, + Rel *outer_rel, + Path *outer_path, + Path *inner_path, + List *keys) +{ + JoinPath *pathnode = makeNode(JoinPath); + + pathnode->path.pathtype = T_NestLoop; + pathnode->path.parent = joinrel; + pathnode->outerjoinpath = outer_path; + pathnode->innerjoinpath = inner_path; + pathnode->pathclauseinfo = joinrel->clauseinfo; + pathnode->path.keys = keys; + pathnode->path.joinid = NIL; + pathnode->path.outerjoincost = (Cost)0.0; + pathnode->path.locclauseinfo = NIL; + + if (keys) { + pathnode->path.p_ordering.ordtype = + outer_path->p_ordering.ordtype; + if (outer_path->p_ordering.ordtype == SORTOP_ORDER) { + pathnode->path.p_ordering.ord.sortop = + outer_path->p_ordering.ord.sortop; + } else { + pathnode->path.p_ordering.ord.merge = + outer_path->p_ordering.ord.merge; + } + } else { + pathnode->path.p_ordering.ordtype = SORTOP_ORDER; + pathnode->path.p_ordering.ord.sortop = NULL; + } + + pathnode->path.path_cost = + cost_nestloop(outer_path->path_cost, + inner_path->path_cost, + outer_rel->size, + inner_path->parent->size, + page_size(outer_rel->size, + outer_rel->width), + IsA(inner_path,IndexPath)); + /* add in expensive function costs -- JMH 7/7/92 */ +#if 0 + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost += xfunc_get_path_cost((Path*)pathnode); + } +#endif + return(pathnode); +} + +/* + * create_mergesort_path-- + * Creates a pathnode corresponding to a mergesort join between + * two relations + * + * 'joinrel' is the join relation + * 'outersize' is the number of tuples in the outer relation + * 'innersize' is the number of tuples in the inner relation + * 'outerwidth' is the number of bytes per tuple in the outer relation + * 'innerwidth' is the number of bytes per tuple in the inner relation + * 'outer_path' is the outer path + * 'inner_path' is the inner path + * 'keys' are the new keys of the join relation + * 'order' is the sort order required for the merge + * 'mergeclauses' are the applicable join/restriction clauses + * 'outersortkeys' are the sort varkeys for the outer relation + * 'innersortkeys' are the sort varkeys for the inner relation + * + */ +MergePath * +create_mergesort_path(Rel *joinrel, + int outersize, + int innersize, + int outerwidth, + int innerwidth, + Path *outer_path, + Path *inner_path, + List *keys, + MergeOrder *order, + List *mergeclauses, + List *outersortkeys, + List *innersortkeys) +{ + MergePath *pathnode = makeNode(MergePath); + + pathnode->jpath.path.pathtype = T_MergeJoin; + pathnode->jpath.path.parent = joinrel; + pathnode->jpath.outerjoinpath = outer_path; + pathnode->jpath.innerjoinpath = inner_path; + pathnode->jpath.pathclauseinfo = joinrel->clauseinfo; + pathnode->jpath.path.keys = keys; + pathnode->jpath.path.p_ordering.ordtype = MERGE_ORDER; + pathnode->jpath.path.p_ordering.ord.merge = order; + pathnode->path_mergeclauses = mergeclauses; + pathnode->jpath.path.locclauseinfo = NIL; + pathnode->outersortkeys = outersortkeys; + pathnode->innersortkeys = innersortkeys; + pathnode->jpath.path.path_cost = + cost_mergesort(outer_path->path_cost, + inner_path->path_cost, + outersortkeys, + innersortkeys, + outersize, + innersize, + outerwidth, + innerwidth); + /* add in expensive function costs -- JMH 7/7/92 */ +#if 0 + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost += + xfunc_get_path_cost((Path*)pathnode); + } +#endif + return(pathnode); +} + +/* + * create_hashjoin_path-- XXX HASH + * Creates a pathnode corresponding to a hash join between two relations. + * + * 'joinrel' is the join relation + * 'outersize' is the number of tuples in the outer relation + * 'innersize' is the number of tuples in the inner relation + * 'outerwidth' is the number of bytes per tuple in the outer relation + * 'innerwidth' is the number of bytes per tuple in the inner relation + * 'outer_path' is the outer path + * 'inner_path' is the inner path + * 'keys' are the new keys of the join relation + * 'operator' is the hashjoin operator + * 'hashclauses' are the applicable join/restriction clauses + * 'outerkeys' are the sort varkeys for the outer relation + * 'innerkeys' are the sort varkeys for the inner relation + * + */ +HashPath * +create_hashjoin_path(Rel *joinrel, + int outersize, + int innersize, + int outerwidth, + int innerwidth, + Path *outer_path, + Path *inner_path, + List *keys, + Oid operator, + List *hashclauses, + List *outerkeys, + List *innerkeys) +{ + HashPath *pathnode = makeNode(HashPath); + + pathnode->jpath.path.pathtype = T_HashJoin; + pathnode->jpath.path.parent = joinrel; + pathnode->jpath.outerjoinpath = outer_path; + pathnode->jpath.innerjoinpath = inner_path; + pathnode->jpath.pathclauseinfo = joinrel->clauseinfo; + pathnode->jpath.path.locclauseinfo = NIL; + pathnode->jpath.path.keys = keys; + pathnode->jpath.path.p_ordering.ordtype = SORTOP_ORDER; + pathnode->jpath.path.p_ordering.ord.sortop = NULL; + pathnode->jpath.path.outerjoincost = (Cost)0.0; + pathnode->jpath.path.joinid = (Relid)NULL; + /* pathnode->hashjoinoperator = operator; */ + pathnode->path_hashclauses = hashclauses; + pathnode->outerhashkeys = outerkeys; + pathnode->innerhashkeys = innerkeys; + pathnode->jpath.path.path_cost = + cost_hashjoin(outer_path->path_cost, + inner_path->path_cost, + outerkeys, + innerkeys, + outersize,innersize, + outerwidth,innerwidth); + /* add in expensive function costs -- JMH 7/7/92 */ +#if 0 + if (XfuncMode != XFUNC_OFF) { + pathnode->path_cost += + xfunc_get_path_cost((Path*)pathnode); + } +#endif + return(pathnode); +} diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c new file mode 100644 index 00000000000..4dca017f95a --- /dev/null +++ b/src/backend/optimizer/util/plancat.c @@ -0,0 +1,582 @@ +/*------------------------------------------------------------------------- + * + * plancat.c-- + * routines for accessing the system catalogs + * + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/plancat.c,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "access/genam.h" +#include "access/htup.h" +#include "access/itup.h" + +#include "catalog/catname.h" +#include "catalog/pg_amop.h" +#include "catalog/pg_index.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_version.h" + +#include "nodes/pg_list.h" +#include "parser/parsetree.h" /* for getrelid() */ +#include "fmgr.h" + +#include "optimizer/internal.h" +#include "optimizer/plancat.h" + +#include "utils/tqual.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/syscache.h" + + +static void IndexSelectivity(Oid indexrelid, Oid indrelid, int32 nIndexKeys, + Oid AccessMethodOperatorClasses[], Oid operatorObjectIds[], + int32 varAttributeNumbers[], char *constValues[], int32 constFlags[], + float *idxPages, float *idxSelec); + + +/* + * relation-info - + * Retrieves catalog information for a given relation. Given the oid of + * the relation, return the following information: + * whether the relation has secondary indices + * number of pages + * number of tuples + */ +void +relation_info(Query *root, Index relid, + bool *hasindex, int *pages, int *tuples) +{ + HeapTuple relationTuple; + Form_pg_class relation; + Oid relationObjectId; + + relationObjectId = getrelid(relid, root->rtable); + relationTuple = SearchSysCacheTuple(RELOID, + ObjectIdGetDatum(relationObjectId), + 0,0,0); + if (HeapTupleIsValid(relationTuple)) { + relation = (Form_pg_class)GETSTRUCT(relationTuple); + + *hasindex = (relation->relhasindex) ? TRUE : FALSE; + *pages = relation->relpages; + *tuples = relation->reltuples; + } else { + elog(WARN, "RelationCatalogInformation: Relation %d not found", + relationObjectId); + } + + return; +} + + +/* + * index-info-- + * Retrieves catalog information on an index on a given relation. + * + * The index relation is opened on the first invocation. The current + * retrieves the next index relation within the catalog that has not + * already been retrieved by a previous call. The index catalog + * is closed when no more indices for 'relid' can be found. + * + * 'first' is 1 if this is the first call + * + * Returns true if successful and false otherwise. Index info is returned + * via the transient data structure 'info'. + * + */ +bool +index_info(Query *root, bool first, int relid, IdxInfoRetval *info) +{ + register i; + HeapTuple indexTuple, amopTuple; + IndexTupleForm index; + Relation indexRelation; + uint16 amstrategy; + Oid relam; + Oid indrelid; + + static Relation relation = (Relation) NULL; + static HeapScanDesc scan = (HeapScanDesc) NULL; + static ScanKeyData indexKey; + + + /* find the oid of the indexed relation */ + indrelid = getrelid(relid, root->rtable); + + memset(info, 0, sizeof(IdxInfoRetval)); + + /* + * the maximum number of elements in each of the following arrays is + * 8. We allocate one more for a terminating 0 to indicate the end + * of the array. + */ + info->indexkeys = (int *)palloc(sizeof(int)*9); + memset(info->indexkeys, 0, sizeof(int)*9); + info->orderOprs = (Oid *)palloc(sizeof(Oid)*9); + memset(info->orderOprs, 0, sizeof(Oid)*9); + info->classlist = (Oid *)palloc(sizeof(Oid)*9); + memset(info->classlist, 0, sizeof(Oid)*9); + + /* Find an index on the given relation */ + if (first) { + if (RelationIsValid(relation)) + heap_close(relation); + if (HeapScanIsValid(scan)) + heap_endscan(scan); + + ScanKeyEntryInitialize(&indexKey, 0, + Anum_pg_index_indrelid, + F_OIDEQ, + ObjectIdGetDatum(indrelid)); + + relation = heap_openr(IndexRelationName); + scan = heap_beginscan(relation, 0, NowTimeQual, + 1, &indexKey); + } + if (!HeapScanIsValid(scan)) + elog(WARN, "index_info: scan not started"); + indexTuple = heap_getnext(scan, 0, (Buffer *) NULL); + if (!HeapTupleIsValid(indexTuple)) { + heap_endscan(scan); + heap_close(relation); + scan = (HeapScanDesc) NULL; + relation = (Relation) NULL; + return(0); + } + + /* Extract info from the index tuple */ + index = (IndexTupleForm)GETSTRUCT(indexTuple); + info->relid = index->indexrelid; /* index relation */ + for (i = 0; i < 8; i++) + info->indexkeys[i] = index->indkey[i]; + for (i = 0; i < 8; i++) + info->classlist[i] = index->indclass[i]; + + info->indproc = index->indproc; /* functional index ?? */ + + /* partial index ?? */ + if (VARSIZE(&index->indpred) != 0) { + /* + * The memory allocated here for the predicate (in lispReadString) + * only needs to stay around until it's used in find_index_paths, + * which is all within a command, so the automatic pfree at end + * of transaction should be ok. + */ + char *predString; + + predString = fmgr(F_TEXTOUT, &index->indpred); + info->indpred = (Node*)stringToNode(predString); + pfree(predString); + } + + /* Extract info from the relation descriptor for the index */ + indexRelation = index_open(index->indexrelid); +#ifdef notdef + /* XXX should iterate through strategies -- but how? use #1 for now */ + amstrategy = indexRelation->rd_am->amstrategies; +#endif /* notdef */ + amstrategy = 1; + relam = indexRelation->rd_rel->relam; + info->relam = relam; + info->pages = indexRelation->rd_rel->relpages; + info->tuples = indexRelation->rd_rel->reltuples; + heap_close(indexRelation); + + /* + * Find the index ordering keys + * + * Must use indclass to know when to stop looking since with + * functional indices there could be several keys (args) for + * one opclass. -mer 27 Sept 1991 + */ + for (i = 0; i < 8 && index->indclass[i]; ++i) { + amopTuple = SearchSysCacheTuple(AMOPSTRATEGY, + ObjectIdGetDatum(relam), + ObjectIdGetDatum(index->indclass[i]), + UInt16GetDatum(amstrategy), + 0); + if (!HeapTupleIsValid(amopTuple)) + elog(WARN, "index_info: no amop %d %d %d", + relam, index->indclass[i], amstrategy); + info->orderOprs[i] = + ((Form_pg_amop)GETSTRUCT(amopTuple))->amopopr; + } + return(TRUE); +} + +/* + * index-selectivity-- + * + * Call util/plancat.c:IndexSelectivity with the indicated arguments. + * + * 'indid' is the index OID + * 'classes' is a list of index key classes + * 'opnos' is a list of index key operator OIDs + * 'relid' is the OID of the relation indexed + * 'attnos' is a list of the relation attnos which the index keys over + * 'values' is a list of the values of the clause's constants + * 'flags' is a list of fixnums which describe the constants + * 'nkeys' is the number of index keys + * + * Returns two floats: index pages and index selectivity in 'idxPages' and + * 'idxSelec'. + * + */ +void +index_selectivity(Oid indid, + Oid *classes, + List *opnos, + Oid relid, + List *attnos, + List *values, + List *flags, + int32 nkeys, + float *idxPages, + float *idxSelec) +{ + Oid *opno_array; + int *attno_array, *flag_array; + char **value_array; + int i = 0; + List *xopno, *xattno, *value, *flag; + + if (length(opnos)!=nkeys || length(attnos)!=nkeys || + length(values)!=nkeys || length(flags)!=nkeys) { + + *idxPages = 0.0; + *idxSelec = 1.0; + return; + } + + opno_array = (Oid *)palloc(nkeys*sizeof(Oid)); + attno_array = (int *)palloc(nkeys*sizeof(int32)); + value_array = (char **)palloc(nkeys*sizeof(char *)); + flag_array = (int *)palloc(nkeys*sizeof(int32)); + + i = 0; + foreach(xopno, opnos) { + opno_array[i++] = (int)lfirst(xopno); + } + + i = 0; + foreach(xattno,attnos) { + attno_array[i++] = (int)lfirst(xattno); + } + + i = 0; + foreach(value, values) { + value_array[i++] = (char *)lfirst(value); + } + + i = 0; + foreach(flag,flags) { + flag_array[i++] = (int)lfirst(flag); + } + + IndexSelectivity(indid, + relid, + nkeys, + classes, /* not used */ + opno_array, + attno_array, + value_array, + flag_array, + idxPages, + idxSelec); + return; +} + +/* + * restriction_selectivity in lisp system.-- + * + * NOTE: The routine is now merged with RestrictionClauseSelectivity + * as defined in plancat.c + * + * Returns the selectivity of a specified operator. + * This code executes registered procedures stored in the + * operator relation, by calling the function manager. + * + * XXX The assumption in the selectivity procedures is that if the + * relation OIDs or attribute numbers are -1, then the clause + * isn't of the form (op var const). + */ +Cost +restriction_selectivity(Oid functionObjectId, + Oid operatorObjectId, + Oid relationObjectId, + AttrNumber attributeNumber, + char *constValue, + int32 constFlag) +{ + float64 result; + + result = (float64) fmgr(functionObjectId, + (char *) operatorObjectId, + (char *) relationObjectId, + (char *) attributeNumber, + (char *) constValue, + (char *) constFlag, + NULL); + if (!PointerIsValid(result)) + elog(WARN, "RestrictionClauseSelectivity: bad pointer"); + + if (*result < 0.0 || *result > 1.0) + elog(WARN, "RestrictionClauseSelectivity: bad value %lf", + *result); + + return ((Cost)*result); +} + +/* + * join_selectivity-- + * Similarly, this routine is merged with JoinClauseSelectivity in + * plancat.c + * + * Returns the selectivity of an operator, given the join clause + * information. + * + * XXX The assumption in the selectivity procedures is that if the + * relation OIDs or attribute numbers are -1, then the clause + * isn't of the form (op var var). + */ +Cost +join_selectivity (Oid functionObjectId, + Oid operatorObjectId, + Oid relationObjectId1, + AttrNumber attributeNumber1, + Oid relationObjectId2, + AttrNumber attributeNumber2) +{ + float64 result; + + result = (float64) fmgr(functionObjectId, + (char *) operatorObjectId, + (char *) relationObjectId1, + (char *) attributeNumber1, + (char *) relationObjectId2, + (char *) attributeNumber2, + NULL); + if (!PointerIsValid(result)) + elog(WARN, "JoinClauseSelectivity: bad pointer"); + + if (*result < 0.0 || *result > 1.0) + elog(WARN, "JoinClauseSelectivity: bad value %lf", + *result); + + return((Cost)*result); +} + +/* + * find_all_inheritors-- + * + * Returns a LISP list containing the OIDs of all relations which + * inherits from the relation with OID 'inhparent'. + */ +List * +find_inheritance_children(Oid inhparent) +{ + static ScanKeyData key[1] = { + { 0, Anum_pg_inherits_inhparent, F_OIDEQ } + }; + + HeapTuple inheritsTuple; + Relation relation; + HeapScanDesc scan; + List *list = NIL; + Oid inhrelid; + + fmgr_info(F_OIDEQ, &key[0].sk_func, &key[0].sk_nargs); + + key[0].sk_argument = ObjectIdGetDatum((Oid)inhparent); + relation = heap_openr(InheritsRelationName); + scan = heap_beginscan(relation, 0, NowTimeQual, 1, key); + while (HeapTupleIsValid(inheritsTuple = + heap_getnext(scan, 0, + (Buffer *) NULL))) { + inhrelid = ((InheritsTupleForm)GETSTRUCT(inheritsTuple))->inhrel; + list = lappendi(list, inhrelid); + } + heap_endscan(scan); + heap_close(relation); + return(list); +} + +/* + * VersionGetParents-- + * + * Returns a LISP list containing the OIDs of all relations which are + * base relations of the relation with OID 'verrelid'. + */ +List * +VersionGetParents(Oid verrelid) +{ + static ScanKeyData key[1] = { + { 0, Anum_pg_version_verrelid, F_OIDEQ } + }; + + HeapTuple versionTuple; + Relation relation; + HeapScanDesc scan; + Oid verbaseid; + List *list= NIL; + + fmgr_info(F_OIDEQ, &key[0].sk_func, &key[0].sk_nargs); + relation = heap_openr(VersionRelationName); + key[0].sk_argument = ObjectIdGetDatum(verrelid); + scan = heap_beginscan(relation, 0, NowTimeQual, 1, key); + for (;;) { + versionTuple = heap_getnext(scan, 0, + (Buffer *) NULL); + if (!HeapTupleIsValid(versionTuple)) + break; + verbaseid = ((VersionTupleForm) + GETSTRUCT(versionTuple))->verbaseid; + + list = lconsi(verbaseid, list); + + key[0].sk_argument = ObjectIdGetDatum(verbaseid); + heap_rescan(scan, 0, key); + } + heap_endscan(scan); + heap_close(relation); + return(list); +} + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * IdexSelectivity-- + * + * Retrieves the 'amopnpages' and 'amopselect' parameters for each + * AM operator when a given index (specified by 'indexrelid') is used. + * These two parameters are returned by copying them to into an array of + * floats. + * + * Assumption: the attribute numbers and operator ObjectIds are in order + * WRT to each other (otherwise, you have no way of knowing which + * AM operator class or attribute number corresponds to which operator. + * + * 'varAttributeNumbers' contains attribute numbers for variables + * 'constValues' contains the constant values + * 'constFlags' describes how to treat the constants in each clause + * 'nIndexKeys' describes how many keys the index actually has + * + * Returns 'selectivityInfo' filled with the sum of all pages touched + * and the product of each clause's selectivity. + * + */ +static void +IndexSelectivity(Oid indexrelid, + Oid indrelid, + int32 nIndexKeys, + Oid AccessMethodOperatorClasses[], /* XXX not used? */ + Oid operatorObjectIds[], + int32 varAttributeNumbers[], + char *constValues[], + int32 constFlags[], + float *idxPages, + float *idxSelec) +{ + register i, n; + HeapTuple indexTuple, amopTuple, indRel; + IndexTupleForm index; + Form_pg_amop amop; + Oid indclass; + float64data npages, select; + float64 amopnpages, amopselect; + Oid relam; + + indRel = SearchSysCacheTuple(RELOID, + ObjectIdGetDatum(indexrelid), + 0,0,0); + if (!HeapTupleIsValid(indRel)) + elog(WARN, "IndexSelectivity: index %d not found", + indexrelid); + relam = ((Form_pg_class)GETSTRUCT(indRel))->relam; + + indexTuple = SearchSysCacheTuple(INDEXRELID, + ObjectIdGetDatum(indexrelid), + 0,0,0); + if (!HeapTupleIsValid(indexTuple)) + elog(WARN, "IndexSelectivity: index %d not found", + indexrelid); + index = (IndexTupleForm)GETSTRUCT(indexTuple); + + npages = 0.0; + select = 1.0; + for (n = 0; n < nIndexKeys; ++n) { + /* + * Find the AM class for this key. + * + * If the first attribute number is invalid then we have a + * functional index, and AM class is the first one defined + * since functional indices have exactly one key. + */ + indclass = (varAttributeNumbers[0] == InvalidAttrNumber) ? + index->indclass[0] : InvalidOid; + i = 0; + while ((i < nIndexKeys) && (indclass == InvalidOid)) { + if (varAttributeNumbers[n] == index->indkey[i]) { + indclass = index->indclass[i]; + break; + } + i++; + } + if (!OidIsValid(indclass)) { + /* + * Presumably this means that we are using a functional + * index clause and so had no variable to match to + * the index key ... if not we are in trouble. + */ + elog(NOTICE, "IndexSelectivity: no key %d in index %d", + varAttributeNumbers[n], indexrelid); + continue; + } + + amopTuple = SearchSysCacheTuple(AMOPOPID, + ObjectIdGetDatum(indclass), + ObjectIdGetDatum(operatorObjectIds[n]), + ObjectIdGetDatum(relam), + 0); + if (!HeapTupleIsValid(amopTuple)) + elog(WARN, "IndexSelectivity: no amop %d %d", + indclass, operatorObjectIds[n]); + amop = (Form_pg_amop)GETSTRUCT(amopTuple); + amopnpages = (float64) fmgr(amop->amopnpages, + (char *) operatorObjectIds[n], + (char *) indrelid, + (char *) varAttributeNumbers[n], + (char *) constValues[n], + (char *) constFlags[n], + (char *) nIndexKeys, + (char *) indexrelid); + npages += PointerIsValid(amopnpages) ? *amopnpages : 0.0; + if ((i = npages) < npages) /* ceil(npages)? */ + npages += 1.0; + amopselect = (float64) fmgr(amop->amopselect, + (char *) operatorObjectIds[n], + (char *) indrelid, + (char *) varAttributeNumbers[n], + (char *) constValues[n], + (char *) constFlags[n], + (char *) nIndexKeys, + (char *) indexrelid); + select *= PointerIsValid(amopselect) ? *amopselect : 1.0; + } + *idxPages = npages; + *idxSelec = select; +} + diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c new file mode 100644 index 00000000000..351fb182107 --- /dev/null +++ b/src/backend/optimizer/util/relnode.c @@ -0,0 +1,123 @@ +/*------------------------------------------------------------------------- + * + * relnode.c-- + * Relation manipulation routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/relnode.c,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/relation.h" + +#include "optimizer/internal.h" +#include "optimizer/pathnode.h" /* where the decls go */ +#include "optimizer/plancat.h" + + + +/* + * get_base_rel-- + * Returns relation entry corresponding to 'relid', creating a new one if + * necessary. This is for base relations. + * + */ +Rel *get_base_rel(Query* root, int relid) +{ + List *relids; + Rel *rel; + + relids = lconsi(relid, NIL); + rel = rel_member(relids, root->base_relation_list_); + if (rel==NULL) { + rel = makeNode(Rel); + rel->relids = relids; + rel->indexed = false; + rel->pages = 0; + rel->tuples = 0; + rel->width = 0; + rel->targetlist = NIL; + rel->pathlist = NIL; + rel->unorderedpath = (Path *)NULL; + rel->cheapestpath = (Path *)NULL; + rel->pruneable = true; + rel->classlist = NULL; + rel->ordering = NULL; + rel->relam = InvalidOid; + rel->clauseinfo = NIL; + rel->joininfo = NIL; + rel->innerjoin = NIL; + rel->superrels = NIL; + + root->base_relation_list_ = lcons(rel, + root->base_relation_list_); + + /* + * ??? the old lispy C code (get_rel) do a listp(relid) here but + * that can never happen since we already established relid is not + * a list. -ay 10/94 + */ + if(relid < 0) { + /* + * If the relation is a materialized relation, assume + * constants for sizes. + */ + rel->pages = _TEMP_RELATION_PAGES_; + rel->tuples = _TEMP_RELATION_TUPLES_; + + } else { + bool hasindex; + int pages, tuples; + + /* + * Otherwise, retrieve relation characteristics from the + * system catalogs. + */ + relation_info(root, relid, &hasindex, &pages, &tuples); + rel->indexed = hasindex; + rel->pages = pages; + rel->tuples = tuples; + } + } + return rel; +} + +/* + * get_join_rel-- + * Returns relation entry corresponding to 'relid' (a list of relids), + * creating a new one if necessary. This is for join relations. + * + */ +Rel *get_join_rel(Query *root, List *relid) +{ + return rel_member(relid, root->join_relation_list_); +} + +/* + * rel-member-- + * Determines whether a relation of id 'relid' is contained within a list + * 'rels'. + * + * Returns the corresponding entry in 'rels' if it is there. + * + */ +Rel * +rel_member(List *relid, List *rels) +{ + List *temp = NIL; + List *temprelid = NIL; + + if (relid!=NIL && rels!=NIL) { + foreach(temp,rels) { + temprelid = ((Rel*)lfirst(temp))->relids; + if(same(temprelid, relid)) + return((Rel*)(lfirst(temp))); + } + } + return(NULL); +} diff --git a/src/backend/optimizer/util/tlist.c b/src/backend/optimizer/util/tlist.c new file mode 100644 index 00000000000..073c2a08231 --- /dev/null +++ b/src/backend/optimizer/util/tlist.c @@ -0,0 +1,577 @@ +/*------------------------------------------------------------------------- + * + * tlist.c-- + * Target list manipulation routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/tlist.c,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "nodes/relation.h" +#include "nodes/primnodes.h" +#include "nodes/pg_list.h" +#include "nodes/nodeFuncs.h" +#include "utils/elog.h" +#include "utils/lsyscache.h" + +#include "optimizer/internal.h" +#include "optimizer/var.h" +#include "optimizer/tlist.h" +#include "optimizer/clauses.h" + +#include "nodes/makefuncs.h" +#include "parser/catalog_utils.h" + +static Node *flatten_tlistentry(Node *tlistentry, List *flat_tlist); + +/***************************************************************************** + * ---------- RELATION node target list routines ---------- + *****************************************************************************/ + +/* + * tlistentry-member-- + * + * RETURNS: the leftmost member of sequence "targetlist" that satisfies + * the predicate "var_equal" + * MODIFIES: nothing + * REQUIRES: test = function which can operate on a lispval union + * var = valid var-node + * targetlist = valid sequence + */ +TargetEntry * +tlistentry_member(Var *var, List *targetlist) +{ + if (var) { + List *temp = NIL; + + foreach (temp,targetlist) { + if (var_equal(var, + get_expr(lfirst(temp)))) + return((TargetEntry*)lfirst(temp)); + } + } + return (NULL); +} + +/* + * matching_tlvar-- + * + * RETURNS: var node in a target list which is var_equal to 'var', + * if one exists. + * REQUIRES: "test" operates on lispval unions, + * + */ +Expr * +matching_tlvar(Var *var, List *targetlist) +{ + TargetEntry *tlentry; + + tlentry = tlistentry_member(var,targetlist); + if (tlentry) + return((Expr*)get_expr (tlentry) ); + + return((Expr*) NULL); +} + +/* + * add_tl_element-- + * Creates a targetlist entry corresponding to the supplied var node + * + * 'var' and adds the new targetlist entry to the targetlist field of + * 'rel' + * + * RETURNS: nothing + * MODIFIES: vartype and varid fields of leftmost varnode that matches + * argument "var" (sometimes). + * CREATES: new var-node iff no matching var-node exists in targetlist + */ +void +add_tl_element(Rel *rel, Var *var) +{ + Expr *oldvar = (Expr *)NULL; + + oldvar = matching_tlvar(var, rel->targetlist); + + /* + * If 'var' is not already in 'rel's target list, add a new node. + */ + if (oldvar==NULL) { + List *tlist = rel->targetlist; + Var *newvar = makeVar(var->varno, + var->varattno, + var->vartype, + var->varno, + var->varoattno); + + rel->targetlist = + lappend (tlist, + create_tl_element(newvar, + length(tlist) + 1)); + + } +} + +/* + * create_tl_element-- + * Creates a target list entry node and its associated (resdom var) pair + * with its resdom number equal to 'resdomno' and the joinlist field set + * to 'joinlist'. + * + * RETURNS: newly created tlist-entry + * CREATES: new targetlist entry (always). + */ +TargetEntry* +create_tl_element(Var *var, int resdomno) +{ + TargetEntry *tlelement= makeNode(TargetEntry); + + tlelement->resdom = + makeResdom(resdomno, + var->vartype, + get_typlen(var->vartype), + NULL, + (Index)0, + (Oid)0, + 0); + tlelement->expr = (Node*)var; + + return(tlelement); +} + +/* + * get-actual-tlist-- + * Returns the targetlist elements from a relation tlist. + * + */ +List * +get_actual_tlist(List *tlist) +{ + /* + * this function is not making sense. - ay 10/94 + */ +#if 0 + List *element = NIL; + List *result = NIL; + + if (tlist==NULL) { + elog(DEBUG,"calling get_actual_tlist with empty tlist"); + return(NIL); + } + /* XXX - it is unclear to me what exactly get_entry + should be doing, as it is unclear to me the exact + relationship between "TL" "TLE" and joinlists */ + + foreach(element,tlist) + result = lappend(result, lfirst((List*)lfirst(element))); + + return(result); +#endif + return tlist; +} + +/***************************************************************************** + * ---------- GENERAL target list routines ---------- + *****************************************************************************/ + +/* + * tlist-member-- + * Determines whether a var node is already contained within a + * target list. + * + * 'var' is the var node + * 'tlist' is the target list + * 'dots' is t if we must match dotfields to determine uniqueness + * + * Returns the resdom entry of the matching var node. + * + */ +Resdom * +tlist_member(Var *var, List *tlist) +{ + List *i = NIL; + TargetEntry *temp_tle = (TargetEntry *)NULL; + TargetEntry *tl_elt = (TargetEntry *)NULL; + + if (var) { + foreach (i,tlist) { + temp_tle = (TargetEntry *)lfirst(i); + if (var_equal(var, get_expr(temp_tle))) { + tl_elt = temp_tle; + break; + } + } + + if (tl_elt != NULL) + return(tl_elt->resdom); + else + return((Resdom*)NULL); + } + return ((Resdom*)NULL); +} + +/* + * Routine to get the resdom out of a targetlist. + */ +Resdom * +tlist_resdom(List *tlist, Resdom *resnode) +{ + Resdom *resdom = (Resdom*)NULL; + List *i = NIL; + TargetEntry *temp_tle = (TargetEntry *)NULL; + + foreach(i,tlist) { + temp_tle = (TargetEntry *)lfirst(i); + resdom = temp_tle->resdom; + /* Since resnos are supposed to be unique */ + if (resnode->resno == resdom->resno) + return(resdom); + } + return((Resdom*)NULL); +} + + +/* + * match_varid-- + * Searches a target list for an entry with some desired varid. + * + * 'varid' is the desired id + * 'tlist' is the target list that is searched + * + * Returns the target list entry (resdom var) of the matching var. + * + * Now checks to make sure array references (in addition to range + * table indices) are identical - retrieve (a.b[1],a.b[2]) should + * not be turned into retrieve (a.b[1],a.b[1]). + * + * [what used to be varid is now broken up into two fields varnoold and + * varoattno. Also, nested attnos are long gone. - ay 2/95] + */ +TargetEntry * +match_varid(Var *test_var, List *tlist) +{ + List *tl; + Oid type_var; + + type_var = (Oid) test_var->vartype; + + foreach (tl, tlist) { + TargetEntry *entry; + Var *tlvar; + + entry = lfirst(tl); + tlvar = get_expr(entry); + + /* + * we test the original varno (instead of varno which might + * be changed to INNER/OUTER. + */ + if (tlvar->varnoold == test_var->varnoold && + tlvar->varoattno == test_var->varoattno) { + + if (tlvar->vartype == type_var) + return(entry); + } + } + + return (NULL); +} + + +/* + * new-unsorted-tlist-- + * Creates a copy of a target list by creating new resdom nodes + * without sort information. + * + * 'targetlist' is the target list to be copied. + * + * Returns the resulting target list. + * + */ +List * +new_unsorted_tlist(List *targetlist) +{ + List *new_targetlist = (List*)copyObject ((Node*)targetlist); + List *x = NIL; + + foreach (x, new_targetlist) { + TargetEntry *tle = (TargetEntry *)lfirst(x); + tle->resdom->reskey = 0; + tle->resdom->reskeyop = (Oid)0; + } + return(new_targetlist); +} + +/* + * copy-vars-- + * Replaces the var nodes in the first target list with those from + * the second target list. The two target lists are assumed to be + * identical except their actual resdoms and vars are different. + * + * 'target' is the target list to be replaced + * 'source' is the target list to be copied + * + * Returns a new target list. + * + */ +List * +copy_vars(List *target, List *source) +{ + List *result = NIL; + List *src = NIL; + List *dest = NIL; + + for ( src = source, dest = target; src != NIL && + dest != NIL; src = lnext(src), dest = lnext(dest)) { + TargetEntry *temp = MakeTLE(((TargetEntry *)lfirst(dest))->resdom, + (Node*)get_expr(lfirst(src))); + result = lappend(result,temp); + } + return(result); +} + +/* + * flatten-tlist-- + * Create a target list that only contains unique variables. + * + * + * 'tlist' is the current target list + * + * Returns the "flattened" new target list. + * + */ +List * +flatten_tlist(List *tlist) +{ + int last_resdomno = 1; + List *new_tlist = NIL; + List *tlist_vars = NIL; + List *temp; + + foreach (temp, tlist) { + TargetEntry *temp_entry = NULL; + List *vars; + + temp_entry = lfirst(temp); + vars = pull_var_clause((Node*)get_expr(temp_entry)); + if(vars != NULL) { + tlist_vars = nconc(tlist_vars, vars); + } + } + + foreach (temp, tlist_vars) { + Var *var = lfirst(temp); + if (!(tlist_member(var, new_tlist))) { + Resdom *r; + + r = makeResdom(last_resdomno, + var->vartype, + get_typlen(var->vartype), + NULL, + (Index)0, + (Oid)0, + 0); + last_resdomno++; + new_tlist = lappend(new_tlist, MakeTLE (r, (Node*)var)); + } + } + + return new_tlist; +} + +/* + * flatten-tlist-vars-- + * Redoes the target list of a query with no nested attributes by + * replacing vars within computational expressions with vars from + * the 'flattened' target list of the query. + * + * 'full-tlist' is the actual target list + * 'flat-tlist' is the flattened (var-only) target list + * + * Returns the modified actual target list. + * + */ +List * +flatten_tlist_vars(List *full_tlist, List *flat_tlist) +{ + List *x = NIL; + List *result = NIL; + + foreach(x,full_tlist) { + TargetEntry *tle= lfirst(x); + result = + lappend(result, + MakeTLE(tle->resdom, + flatten_tlistentry((Node*)get_expr(tle), + flat_tlist))); + } + + return(result); +} + +/* + * flatten-tlistentry-- + * Replaces vars within a target list entry with vars from a flattened + * target list. + * + * 'tlistentry' is the target list entry to be modified + * 'flat-tlist' is the flattened target list + * + * Returns the (modified) target_list entry from the target list. + * + */ +static Node * +flatten_tlistentry(Node *tlistentry, List *flat_tlist) +{ + if (tlistentry==NULL) { + + return NULL; + + } else if (IsA (tlistentry,Var)) { + + return + ((Node *)get_expr(match_varid((Var*)tlistentry, + flat_tlist))); + } else if (IsA (tlistentry,Iter)) { + + ((Iter*)tlistentry)->iterexpr = + flatten_tlistentry((Node*)((Iter*)tlistentry)->iterexpr, + flat_tlist); + return tlistentry; + + } else if (single_node(tlistentry)) { + + return tlistentry; + + } else if (is_funcclause (tlistentry)) { + Expr *expr = (Expr*)tlistentry; + List *temp_result = NIL; + List *elt = NIL; + + foreach(elt, expr->args) + temp_result = lappend(temp_result, + flatten_tlistentry(lfirst(elt),flat_tlist)); + + return + ((Node *)make_funcclause((Func*)expr->oper, temp_result)); + + } else if (IsA(tlistentry,Aggreg)) { + + return tlistentry; + + } else if (IsA(tlistentry,ArrayRef)) { + ArrayRef *aref = (ArrayRef *)tlistentry; + List *temp = NIL; + List *elt = NIL; + + foreach(elt, aref->refupperindexpr) + temp = lappend(temp, flatten_tlistentry(lfirst(elt), flat_tlist)); + aref->refupperindexpr = temp; + + temp = NIL; + foreach(elt, aref->reflowerindexpr) + temp = lappend(temp, flatten_tlistentry(lfirst(elt), flat_tlist)); + aref->reflowerindexpr = temp; + + aref->refexpr = + flatten_tlistentry(aref->refexpr, flat_tlist); + + aref->refassgnexpr = + flatten_tlistentry(aref->refassgnexpr, flat_tlist); + + return tlistentry; + } else { + Expr *expr = (Expr*)tlistentry; + Var *left = + (Var*)flatten_tlistentry((Node*)get_leftop(expr), + flat_tlist); + Var *right = + (Var*)flatten_tlistentry((Node*)get_rightop(expr), + flat_tlist); + + return((Node *) + make_opclause((Oper*)expr->oper, left, right)); + } +} + + +TargetEntry * +MakeTLE(Resdom *resdom, Node *expr) +{ + TargetEntry *rt = makeNode(TargetEntry); + + rt->resdom = resdom; + rt->expr = expr; + return rt; +} + +Var * +get_expr(TargetEntry *tle) +{ + Assert(tle!=NULL); + Assert(tle->expr!=NULL); + + return ((Var *)tle->expr); +} + + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * AddGroupAttrToTlist - + * append the group attribute to the target list if it's not already + * in there. + */ +void +AddGroupAttrToTlist(List *tlist, List *grpCl) +{ + List *gl; + int last_resdomno = length(tlist) + 1; + + foreach (gl, grpCl) { + GroupClause *gc = (GroupClause*)lfirst(gl); + Var *var = gc->grpAttr; + + if (!(tlist_member(var, tlist))) { + Resdom *r; + + r = makeResdom(last_resdomno, + var->vartype, + get_typlen(var->vartype), + NULL, + (Index)0, + (Oid)0, + 0); + last_resdomno++; + tlist = lappend(tlist, MakeTLE(r, (Node*)var)); + } + } +} + +/* was ExecTargetListLength() in execQual.c, + moved here to reduce dependencies on the executor module */ +int +exec_tlist_length(List *targetlist) +{ + int len; + List *tl; + TargetEntry *curTle; + + len = 0; + foreach (tl, targetlist) { + curTle = lfirst(tl); + + if (curTle->resdom != NULL) + len++; + } + return len; +} + + diff --git a/src/backend/optimizer/util/var.c b/src/backend/optimizer/util/var.c new file mode 100644 index 00000000000..b4f8436c775 --- /dev/null +++ b/src/backend/optimizer/util/var.c @@ -0,0 +1,189 @@ +/*------------------------------------------------------------------------- + * + * var.c-- + * Var node manipulation routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/optimizer/util/var.c,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "nodes/primnodes.h" +#include "nodes/nodeFuncs.h" + +#include "optimizer/internal.h" +#include "optimizer/clauses.h" +#include "optimizer/var.h" + +#include "parser/parsetree.h" + +/* + * find_varnos + * + * Descends down part of a parsetree (qual or tlist), + * + * XXX assumes varno's are always integers, which shouldn't be true... + * (though it currently is, see primnodes.h) + */ +List * +pull_varnos(Node *me) +{ + List *i, *result = NIL; + + if (me == NULL) + return (NIL); + + switch (nodeTag(me)) { + case T_List: + foreach (i, (List*)me) { + result = nconc(result, pull_varnos(lfirst(i))); + } + break; + case T_ArrayRef: + foreach (i, ((ArrayRef*) me)->refupperindexpr) + result = nconc(result, pull_varnos(lfirst(i))); + foreach (i, ((ArrayRef*) me)->reflowerindexpr) + result = nconc(result, pull_varnos(lfirst(i))); + result = nconc(result, pull_varnos(((ArrayRef*) me)->refassgnexpr)); + break; + case T_Var: + result = lconsi(((Var*) me)->varno, NIL); + break; + default: + break; + } + return(result); +} + +/* + * contain_var_clause-- + * Recursively find var nodes from a clause by pulling vars from the + * left and right operands of the clause. + * + * Returns true if any varnode found. + */ +bool contain_var_clause(Node *clause) +{ + if (clause==NULL) + return FALSE; + else if (IsA(clause,Var)) + return TRUE; + else if (IsA(clause,Iter)) + return contain_var_clause(((Iter*)clause)->iterexpr); + else if (single_node(clause)) + return FALSE; + else if (or_clause(clause)) { + List *temp; + + foreach (temp, ((Expr*)clause)->args) { + if (contain_var_clause(lfirst(temp))) + return TRUE; + } + return FALSE; + } else if (is_funcclause (clause)) { + List *temp; + + foreach(temp, ((Expr *)clause)->args) { + if (contain_var_clause(lfirst(temp))) + return TRUE; + } + return FALSE; + } else if (IsA(clause,ArrayRef)) { + List *temp; + + foreach(temp, ((ArrayRef*)clause)->refupperindexpr) { + if (contain_var_clause(lfirst(temp))) + return TRUE; + } + foreach(temp, ((ArrayRef*)clause)->reflowerindexpr) { + if (contain_var_clause(lfirst(temp))) + return TRUE; + } + if (contain_var_clause(((ArrayRef*)clause)->refexpr)) + return TRUE; + if (contain_var_clause(((ArrayRef*)clause)->refassgnexpr)) + return TRUE; + return FALSE; + } else if (not_clause(clause)) + return contain_var_clause((Node*)get_notclausearg((Expr*)clause)); + else if (is_opclause(clause)) + return (contain_var_clause((Node*)get_leftop((Expr*)clause)) || + contain_var_clause((Node*)get_rightop((Expr*)clause))); + + return FALSE; +} + +/* + * pull_var_clause-- + * Recursively pulls all var nodes from a clause by pulling vars from the + * left and right operands of the clause. + * + * Returns list of varnodes found. + */ +List * +pull_var_clause(Node *clause) +{ + List *retval = NIL; + + if (clause==NULL) + return(NIL); + else if (IsA(clause,Var)) + retval = lcons(clause,NIL); + else if (IsA(clause,Iter)) + retval = pull_var_clause(((Iter*)clause)->iterexpr); + else if (single_node(clause)) + retval = NIL; + else if (or_clause(clause)) { + List *temp; + + foreach (temp, ((Expr*)clause)->args) + retval = nconc(retval, pull_var_clause(lfirst(temp))); + } else if (is_funcclause (clause)) { + List *temp; + + foreach(temp, ((Expr *)clause)->args) + retval = nconc (retval,pull_var_clause(lfirst(temp))); + } else if (IsA(clause,Aggreg)) { + retval = pull_var_clause(((Aggreg*)clause)->target); + } else if (IsA(clause,ArrayRef)) { + List *temp; + + foreach(temp, ((ArrayRef*)clause)->refupperindexpr) + retval = nconc (retval,pull_var_clause(lfirst(temp))); + foreach(temp, ((ArrayRef*)clause)->reflowerindexpr) + retval = nconc (retval,pull_var_clause(lfirst(temp))); + retval = nconc(retval, + pull_var_clause(((ArrayRef*)clause)->refexpr)); + retval = nconc(retval, + pull_var_clause(((ArrayRef*)clause)->refassgnexpr)); + } else if (not_clause(clause)) + retval = pull_var_clause((Node*)get_notclausearg((Expr*)clause)); + else if (is_opclause(clause)) + retval = nconc(pull_var_clause((Node*)get_leftop((Expr*)clause)), + pull_var_clause((Node*)get_rightop((Expr*)clause))); + else + retval = NIL; + + return (retval); +} + +/* + * var_equal + * + * Returns t iff two var nodes correspond to the same attribute. + */ +bool +var_equal(Var *var1, Var *var2) +{ + if (IsA (var1,Var) && IsA (var2,Var) && + (((Var*)var1)->varno == ((Var*)var2)->varno) && + (((Var*)var1)->vartype == ((Var*)var2)->vartype) && + (((Var*)var1)->varattno == ((Var*)var2)->varattno)) { + + return(true); + } else + return(false); +} diff --git a/src/backend/optimizer/var.h b/src/backend/optimizer/var.h new file mode 100644 index 00000000000..fdcf1ea647b --- /dev/null +++ b/src/backend/optimizer/var.h @@ -0,0 +1,21 @@ +/*------------------------------------------------------------------------- + * + * var.h-- + * prototypes for var.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: var.h,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef VAR_H +#define VAR_H + +extern List *pull_varnos(Node *me); +extern bool contain_var_clause(Node *clause); +extern List *pull_var_clause(Node *clause); +extern bool var_equal(Var *var1, Var *var2); + +#endif /* VAR_H */ diff --git a/src/backend/optimizer/xfunc.h b/src/backend/optimizer/xfunc.h new file mode 100644 index 00000000000..a3ee1b99cc2 --- /dev/null +++ b/src/backend/optimizer/xfunc.h @@ -0,0 +1,84 @@ +/*------------------------------------------------------------------------- + * + * xfunc.h-- + * prototypes for xfunc.c and predmig.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: xfunc.h,v 1.1.1.1 1996/07/09 06:21:35 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef XFUNC_H +#define XFUNC_H + +#include "nodes/relation.h" + +/* command line arg flags */ +#define XFUNC_OFF -1 /* do no optimization of expensive preds */ +#define XFUNC_NOR 2 /* do no optimization of OR clauses */ +#define XFUNC_NOPULL 4 /* never pull restrictions above joins */ +#define XFUNC_NOPM 8 /* don't do predicate migration */ +#define XFUNC_WAIT 16 /* don't do pullup until predicate migration */ +#define XFUNC_PULLALL 32 /* pull all expensive restrictions up, always */ + +/* constants for local and join predicates */ +#define XFUNC_LOCPRD 1 +#define XFUNC_JOINPRD 2 +#define XFUNC_UNKNOWN 0 + +extern int XfuncMode; /* defined in tcop/postgres.c */ + +/* defaults for function attributes used for expensive function calculations */ +#define BYTE_PCT 100 +#define PERBYTE_CPU 0 +#define PERCALL_CPU 0 +#define OUTIN_RATIO 100 + +/* default width assumed for variable length attributes */ +#define VARLEN_DEFAULT 128; + +/* Macro to get group rank out of group cost and group sel */ +#define get_grouprank(a) ((get_groupsel(a) - 1) / get_groupcost(a)) + +/* Macro to see if a path node is actually a Join */ +#define is_join(pathnode) (length(get_relids(get_parent(pathnode))) > 1 ? 1 : 0) + +/* function prototypes from planner/path/xfunc.c */ +extern void xfunc_trypullup(Rel *rel); +extern int xfunc_shouldpull(Path *childpath, JoinPath *parentpath, + int whichchild, CInfo *maxcinfopt); +extern CInfo *xfunc_pullup(Path *childpath, JoinPath *parentpath, CInfo *cinfo, + int whichchild, int clausetype); +extern Cost xfunc_rank(Expr *clause); +extern Cost xfunc_expense(Query* queryInfo, Expr *clause); +extern Cost xfunc_join_expense(JoinPath *path, int whichchild); +extern Cost xfunc_local_expense(Expr *clause); +extern Cost xfunc_func_expense(Expr *node, List *args); +extern int xfunc_width(Expr *clause); +/* static, moved to xfunc.c */ +/* extern int xfunc_card_unreferenced(Expr *clause, Relid referenced); */ +extern int xfunc_card_product(Relid relids); +extern List *xfunc_find_references(List *clause); +extern List *xfunc_primary_join(JoinPath *pathnode); +extern Cost xfunc_get_path_cost(Path *pathnode); +extern Cost xfunc_total_path_cost(JoinPath *pathnode); +extern Cost xfunc_expense_per_tuple(JoinPath *joinnode, int whichchild); +extern void xfunc_fixvars(Expr *clause, Rel *rel, int varno); +extern int xfunc_cinfo_compare(void *arg1, void *arg2); +extern int xfunc_clause_compare(void *arg1, void *arg2); +extern void xfunc_disjunct_sort(List *clause_list); +extern int xfunc_disjunct_compare(void *arg1, void *arg2); +extern int xfunc_func_width(RegProcedure funcid, List *args); +extern int xfunc_tuple_width(Relation rd); +extern int xfunc_num_join_clauses(JoinPath *path); +extern List *xfunc_LispRemove(List *foo, List *bar); +extern bool xfunc_copyrel(Rel *from, Rel **to); + +/* + * function prototypes for path/predmig.c + */ +extern bool xfunc_do_predmig(Path root); + +#endif /* XFUNC_H */ diff --git a/src/backend/parser/Makefile.inc b/src/backend/parser/Makefile.inc new file mode 100644 index 00000000000..38607f6fb01 --- /dev/null +++ b/src/backend/parser/Makefile.inc @@ -0,0 +1,46 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the parser module +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/parser/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ +# +#------------------------------------------------------------------------- + +VPATH:= $(VPATH):$(CURDIR)/parser + +#PARSEYACCS= gram.c parse.h +PARSEYACCS= gram.c + +$(PARSEYACCS): gram.y + cd $(objdir); \ + $(YACC) $(YFLAGS) $<; \ + mv y.tab.c gram.c; \ + mv y.tab.h parse.h + +$(objdir)/gram.o: gram.c + $(cc_inobjdir) + + +scan.c: scan.l + cd $(objdir); $(LEX) $<; mv lex.yy.c scan.c + +$(objdir)/scan.o: scan.c + $(cc_inobjdir) + + +SRCS_PARSER+= analyze.c catalog_utils.c dbcommands.c gram.c \ + keywords.c parser.c parse_query.c scan.c scansup.c + +CLEANFILES+= scan.c ${PARSEYACCS} + +POSTGRES_DEPEND+= scan.c $(PARSEYACCS) + +HEADERS+= catalog_utils.h io.h parse_query.h parsetree.h \ + dbcommands.h keywords.h + + diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c new file mode 100644 index 00000000000..504e557abee --- /dev/null +++ b/src/backend/parser/analyze.c @@ -0,0 +1,2467 @@ +/*------------------------------------------------------------------------- + * + * analyze.c-- + * transform the parse tree into a query tree + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.1.1.1 1996/07/09 06:21:39 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include +#include "postgres.h" +#include "nodes/nodes.h" +#include "nodes/primnodes.h" +#include "nodes/parsenodes.h" +#include "nodes/relation.h" +#include "parse.h" /* for AND, OR, etc. */ +#include "catalog/pg_type.h" /* for INT4OID, etc. */ +#include "utils/elog.h" +#include "utils/builtins.h" /* namecmp(), textout() */ +#include "utils/lsyscache.h" +#include "utils/palloc.h" +#include "utils/mcxt.h" +#include "parser/parse_query.h" +#include "parser/parse_state.h" +#include "nodes/makefuncs.h" /* for makeResdom(), etc. */ +#include "nodes/nodeFuncs.h" + +#include "optimizer/clauses.h" +#include "access/heapam.h" + +/* convert the parse tree into a query tree */ +static Query *transformStmt(ParseState *pstate, Node *stmt); + +static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt); +static Query *transformInsertStmt(ParseState *pstate, AppendStmt *stmt); +static Query *transformIndexStmt(ParseState *pstate, IndexStmt *stmt); +static Query *transformExtendStmt(ParseState *pstate, ExtendStmt *stmt); +static Query *transformRuleStmt(ParseState *query, RuleStmt *stmt); +static Query *transformSelectStmt(ParseState *pstate, RetrieveStmt *stmt); +static Query *transformUpdateStmt(ParseState *pstate, ReplaceStmt *stmt); +static Query *transformCursorStmt(ParseState *pstate, CursorStmt *stmt); +static Node *handleNestedDots(ParseState *pstate, Attr *attr, int *curr_resno); + +static Node *transformExpr(ParseState *pstate, Node *expr); + +static void makeRangeTable(ParseState *pstate, char *relname, List *frmList); +static List *expandAllTables(ParseState *pstate); +static char *figureColname(Node *expr, Node *resval); +static List *makeTargetList(ParseState *pstate, List *cols, List *exprs); +static List *transformTargetList(ParseState *pstate, + List *targetlist, bool isInsert, + bool isUpdate); +static TargetEntry *make_targetlist_expr(ParseState *pstate, + char *name, Node *expr, + List *arrayRef, + bool ResdomNoIsAttrNo); +static Node *transformWhereClause(ParseState *pstate, Node *a_expr); +static List *transformGroupClause(ParseState *pstate, List *grouplist); +static List *transformSortClause(List *orderlist, List *targetlist, + char* uniqueFlag); + +static void parseFromClause(ParseState *pstate, List *frmList); +static Node *ParseFunc(ParseState *pstate, char *funcname, + List *fargs, int *curr_resno); +static char *ParseColumnName(ParseState *pstate, char *name, bool *isRelName); +static List *setup_tlist(char *attname, Oid relid); +static List *setup_base_tlist(Oid typeid); +static void make_arguments(int nargs, List *fargs, Oid *input_typeids, Oid *function_typeids); +static void AddAggToParseState(ParseState *pstate, Aggreg *aggreg); +static void finalizeAggregates(ParseState *pstate, Query *qry); +static void parseCheckAggregates(ParseState *pstate, Query *qry); + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * makeParseState() -- + * allocate and initialize a new ParseState. + * the CALLERS is responsible for freeing the ParseState* returned + * + */ + +ParseState* +makeParseState() { + ParseState *pstate; + + pstate = malloc(sizeof(ParseState)); + pstate->p_last_resno = 1; + pstate->p_target_resnos = NIL; + pstate->p_rtable = NIL; + pstate->p_query_is_rule = 0; + pstate->p_numAgg = 0; + pstate->p_aggs = NULL; + + return (pstate); +} +/* + * parse_analyze - + * analyze a list of parse trees and transform them if necessary. + * + * Returns a list of transformed parse trees. Optimizable statements are + * all transformed to Query while the rest stays the same. + * + * CALLER is responsible for freeing the QueryTreeList* returned + */ +QueryTreeList * +parse_analyze(List *pl) +{ + QueryTreeList *result; + ParseState *pstate; + int i = 0; + + result = malloc(sizeof(QueryTreeList)); + result->len = length(pl); + result->qtrees = (Query**)malloc(result->len * sizeof(Query*)); + + while(pl!=NIL) { + pstate = makeParseState(); + result->qtrees[i++] = transformStmt(pstate, lfirst(pl)); + pl = lnext(pl); + free(pstate); + } + + return result; +} + +/* + * transformStmt - + * transform a Parse tree. If it is an optimizable statement, turn it + * into a Query tree. + */ +static Query * +transformStmt(ParseState* pstate, Node *parseTree) +{ + Query* result = NULL; + + switch(nodeTag(parseTree)) { + /*------------------------ + * Non-optimizable statements + *------------------------ + */ + case T_IndexStmt: + result = transformIndexStmt(pstate, (IndexStmt *)parseTree); + break; + + case T_ExtendStmt: + result = transformExtendStmt(pstate, (ExtendStmt *)parseTree); + break; + + case T_RuleStmt: + result = transformRuleStmt(pstate, (RuleStmt *)parseTree); + break; + + case T_ViewStmt: + { + ViewStmt *n = (ViewStmt *)parseTree; + n->query = (Query *)transformStmt(pstate, (Node*)n->query); + result = makeNode(Query); + result->commandType = CMD_UTILITY; + result->utilityStmt = (Node*)n; + } + break; + + case T_VacuumStmt: + { + MemoryContext oldcontext; + /* make sure that this Query is allocated in TopMemory context + because vacuum spans transactions and we don't want to lose + the vacuum Query due to end-of-transaction free'ing*/ + oldcontext = MemoryContextSwitchTo(TopMemoryContext); + result = makeNode(Query); + result->commandType = CMD_UTILITY; + result->utilityStmt = (Node*)parseTree; + MemoryContextSwitchTo(oldcontext); + break; + + } + case T_ExplainStmt: + { + ExplainStmt *n = (ExplainStmt *)parseTree; + result = makeNode(Query); + result->commandType = CMD_UTILITY; + n->query = transformStmt(pstate, (Node*)n->query); + result->utilityStmt = (Node*)parseTree; + } + break; + + /*------------------------ + * Optimizable statements + *------------------------ + */ + case T_AppendStmt: + result = transformInsertStmt(pstate, (AppendStmt *)parseTree); + break; + + case T_DeleteStmt: + result = transformDeleteStmt(pstate, (DeleteStmt *)parseTree); + break; + + case T_ReplaceStmt: + result = transformUpdateStmt(pstate, (ReplaceStmt *)parseTree); + break; + + case T_CursorStmt: + result = transformCursorStmt(pstate, (CursorStmt *)parseTree); + break; + + case T_RetrieveStmt: + result = transformSelectStmt(pstate, (RetrieveStmt *)parseTree); + break; + + default: + /* + * other statments don't require any transformation-- just + * return the original parsetree + */ + result = makeNode(Query); + result->commandType = CMD_UTILITY; + result->utilityStmt = (Node*)parseTree; + break; + } + return result; +} + +/* + * transformDeleteStmt - + * transforms a Delete Statement + */ +static Query * +transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt) +{ + Query *qry = makeNode(Query); + + qry->commandType = CMD_DELETE; + + /* set up a range table */ + makeRangeTable(pstate, stmt->relname, NULL); + +/* qry->uniqueFlag = FALSE; */ + qry->uniqueFlag = NULL; + + /* fix where clause */ + qry->qual = transformWhereClause(pstate, stmt->whereClause); + + qry->rtable = pstate->p_rtable; + qry->resultRelation = RangeTablePosn(pstate->p_rtable, stmt->relname); + + /* make sure we don't have aggregates in the where clause */ + if (pstate->p_numAgg > 0) + parseCheckAggregates(pstate, qry); + + return (Query *)qry; +} + +/* + * transformInsertStmt - + * transform an Insert Statement + */ +static Query * +transformInsertStmt(ParseState *pstate, AppendStmt *stmt) +{ + Query *qry = makeNode(Query); /* make a new query tree */ + List *targetlist; + + qry->commandType = CMD_INSERT; + + /* set up a range table */ + makeRangeTable(pstate, stmt->relname, stmt->fromClause); + +/* qry->uniqueFlag = FALSE; */ + qry->uniqueFlag = NULL; + + /* fix the target list */ + targetlist = makeTargetList(pstate, stmt->cols, stmt->exprs); + qry->targetList = transformTargetList(pstate, + targetlist, + TRUE /* is insert */, + FALSE /*not update*/); + + /* fix where clause */ + qry->qual = transformWhereClause(pstate, stmt->whereClause); + + /* now the range table will not change */ + qry->rtable = pstate->p_rtable; + qry->resultRelation = RangeTablePosn(pstate->p_rtable, stmt->relname); + + if (pstate->p_numAgg > 0) + finalizeAggregates(pstate, qry); + + return (Query *)qry; +} + +/* + * transformIndexStmt - + * transforms the qualification of the index statement + */ +static Query * +transformIndexStmt(ParseState *pstate, IndexStmt *stmt) +{ + Query* q; + + q = makeNode(Query); + q->commandType = CMD_UTILITY; + + /* take care of the where clause */ + stmt->whereClause = transformWhereClause(pstate,stmt->whereClause); + stmt->rangetable = pstate->p_rtable; + + q->utilityStmt = (Node*)stmt; + + return q; +} + +/* + * transformExtendStmt - + * transform the qualifications of the Extend Index Statement + * + */ +static Query * +transformExtendStmt(ParseState *pstate, ExtendStmt *stmt) +{ + Query *q; + + q = makeNode(Query); + q->commandType = CMD_UTILITY; + + /* take care of the where clause */ + stmt->whereClause = transformWhereClause(pstate,stmt->whereClause); + stmt->rangetable = pstate->p_rtable; + + q->utilityStmt = (Node*)stmt; + return q; +} + +/* + * transformRuleStmt - + * transform a Create Rule Statement. The actions is a list of parse + * trees which is transformed into a list of query trees. + */ +static Query * +transformRuleStmt(ParseState *pstate, RuleStmt *stmt) +{ + Query *q; + List *actions; + + q = makeNode(Query); + q->commandType = CMD_UTILITY; + + actions = stmt->actions; + /* + * transform each statment, like parse_analyze() + */ + while (actions != NIL) { + RangeTblEntry *curEnt, *newEnt; + + /* + * NOTE: 'CURRENT' must always have a varno equal to 1 and 'NEW' + * equal to 2. + */ + curEnt = makeRangeTableEntry(stmt->object->relname, FALSE, + NULL, "*CURRENT*"); + newEnt = makeRangeTableEntry(stmt->object->relname, FALSE, + NULL, "*NEW*"); + pstate->p_rtable = makeList(curEnt, newEnt, -1); + + pstate->p_last_resno = 1; + pstate->p_target_resnos = NIL; + pstate->p_query_is_rule = 1; /* for expand all */ + pstate->p_numAgg = 0; + pstate->p_aggs = NULL; + + lfirst(actions) = transformStmt(pstate, lfirst(actions)); + actions = lnext(actions); + } + + /* take care of the where clause */ + stmt->whereClause = transformWhereClause(pstate,stmt->whereClause); + + q->utilityStmt = (Node*)stmt; + return q; +} + + +/* + * transformSelectStmt - + * transforms a Select Statement + * + */ +static Query * +transformSelectStmt(ParseState *pstate, RetrieveStmt *stmt) +{ + Query *qry = makeNode(Query); + + qry->commandType = CMD_SELECT; + + /* set up a range table */ + makeRangeTable(pstate, NULL, stmt->fromClause); + + qry->uniqueFlag = stmt->unique; + + qry->into = stmt->into; + qry->isPortal = FALSE; + + /* fix the target list */ + qry->targetList = transformTargetList(pstate, + stmt->targetList, + FALSE, /*is insert */ + FALSE /*not update*/); + + /* fix where clause */ + qry->qual = transformWhereClause(pstate,stmt->whereClause); + + /* fix order clause */ + qry->sortClause = transformSortClause(stmt->orderClause, + qry->targetList, + qry->uniqueFlag); + + /* fix group by clause */ + qry->groupClause = transformGroupClause(pstate, + stmt->groupClause); + qry->rtable = pstate->p_rtable; + + if (pstate->p_numAgg > 0) + finalizeAggregates(pstate, qry); + + return (Query *)qry; +} + +/* + * transformUpdateStmt - + * transforms an update statement + * + */ +static Query * +transformUpdateStmt(ParseState *pstate, ReplaceStmt *stmt) +{ + Query *qry = makeNode(Query); + + qry->commandType = CMD_UPDATE; + + /* + * the FROM clause is non-standard SQL syntax. We used to be able to + * do this with REPLACE in POSTQUEL so we keep the feature. + */ + makeRangeTable(pstate, stmt->relname, stmt->fromClause); + + /* fix the target list */ + qry->targetList = transformTargetList(pstate, + stmt->targetList, + FALSE, /* not insert */ + TRUE /* is update */); + + /* fix where clause */ + qry->qual = transformWhereClause(pstate,stmt->whereClause); + + qry->rtable = pstate->p_rtable; + qry->resultRelation = RangeTablePosn(pstate->p_rtable, stmt->relname); + + /* make sure we don't have aggregates in the where clause */ + if (pstate->p_numAgg > 0) + parseCheckAggregates(pstate, qry); + + return (Query *)qry; +} + +/* + * transformCursorStmt - + * transform a Create Cursor Statement + * + */ +static Query * +transformCursorStmt(ParseState *pstate, CursorStmt *stmt) +{ + Query *qry = makeNode(Query); + + /* + * in the old days, a cursor statement is a 'retrieve into portal'; + * If you change the following, make sure you also go through the code + * in various places that tests the kind of operation. + */ + qry->commandType = CMD_SELECT; + + /* set up a range table */ + makeRangeTable(pstate, NULL, stmt->fromClause); + + qry->uniqueFlag = stmt->unique; + + qry->into = stmt->portalname; + qry->isPortal = TRUE; + qry->isBinary = stmt->binary; /* internal portal */ + + /* fix the target list */ + qry->targetList = transformTargetList(pstate, + stmt->targetList, + FALSE, /*is insert */ + FALSE /*not update*/); + + /* fix where clause */ + qry->qual = transformWhereClause(pstate,stmt->whereClause); + + /* fix order clause */ + qry->sortClause = transformSortClause(stmt->orderClause, + qry->targetList, + qry->uniqueFlag); + qry->rtable = pstate->p_rtable; + + if (pstate->p_numAgg > 0) + finalizeAggregates(pstate, qry); + + return (Query *)qry; +} + +/***************************************************************************** + * + * Transform Exprs, Aggs, etc. + * + *****************************************************************************/ + +/* + * transformExpr - + * analyze and transform expressions. Type checking and type casting is + * done here. The optimizer and the executor cannot handle the original + * (raw) expressions collected by the parse tree. Hence the transformation + * here. + */ +static Node * +transformExpr(ParseState *pstate, Node *expr) +{ + Node *result; + + if (expr==NULL) + return NULL; + + switch(nodeTag(expr)) { + case T_Attr: { + Attr *att = (Attr *)expr; + Node *temp; + + /* what if att.attrs == "*"?? */ + temp = handleNestedDots(pstate, att, &pstate->p_last_resno); + if (att->indirection != NIL) { + List *idx = att->indirection; + while(idx!=NIL) { + A_Indices *ai = (A_Indices *)lfirst(idx); + Node *lexpr=NULL, *uexpr; + uexpr = transformExpr(pstate, ai->uidx); /* must exists */ + if (exprType(uexpr) != INT4OID) + elog(WARN, "array index expressions must be int4's"); + if (ai->lidx != NULL) { + lexpr = transformExpr(pstate, ai->lidx); + if (exprType(lexpr) != INT4OID) + elog(WARN, "array index expressions must be int4's"); + } +#if 0 + pfree(ai->uidx); + if (ai->lidx!=NULL) pfree(ai->lidx); +#endif + ai->lidx = lexpr; + ai->uidx = uexpr; + /* note we reuse the list of indices, make sure we don't free + them! Otherwise, make a new list here */ + idx = lnext(idx); + } + result = (Node*)make_array_ref(temp, att->indirection); + }else { + result = temp; + } + break; + } + case T_A_Const: { + A_Const *con= (A_Const *)expr; + Value *val = &con->val; + if (con->typename != NULL) { + result = parser_typecast(val, con->typename, -1); + }else { + result = (Node *)make_const(val); + } + break; + } + case T_ParamNo: { + ParamNo *pno = (ParamNo *)expr; + Oid toid; + int paramno; + Param *param; + + paramno = pno->number; + toid = param_type(paramno); + if (!OidIsValid(toid)) { + elog(WARN, "Parameter '$%d' is out of range", + paramno); + } + param = makeNode(Param); + param->paramkind = PARAM_NUM; + param->paramid = (AttrNumber) paramno; + param->paramname = ""; + param->paramtype = (Oid)toid; + param->param_tlist = (List*) NULL; + + result = (Node *)param; + break; + } + case T_A_Expr: { + A_Expr *a = (A_Expr *)expr; + + switch(a->oper) { + case OP: + { + Node *lexpr = transformExpr(pstate, a->lexpr); + Node *rexpr = transformExpr(pstate, a->rexpr); + result = (Node *)make_op(a->opname, lexpr, rexpr); + } + break; + case ISNULL: + { + Node *lexpr = transformExpr(pstate, a->lexpr); + result = ParseFunc(pstate, + "NullValue", lcons(lexpr, NIL), + &pstate->p_last_resno); + } + break; + case NOTNULL: + { + Node *lexpr = transformExpr(pstate, a->lexpr); + result = ParseFunc(pstate, + "NonNullValue", lcons(lexpr, NIL), + &pstate->p_last_resno); + } + break; + case AND: + { + Expr *expr = makeNode(Expr); + Node *lexpr = transformExpr(pstate, a->lexpr); + Node *rexpr = transformExpr(pstate, a->rexpr); + if (exprType(lexpr) != BOOLOID) + elog(WARN, + "left-hand side of AND is type '%s', not bool", + tname(get_id_type(exprType(lexpr)))); + if (exprType(rexpr) != BOOLOID) + elog(WARN, + "right-hand side of AND is type '%s', not bool", + tname(get_id_type(exprType(rexpr)))); + expr->typeOid = BOOLOID; + expr->opType = AND_EXPR; + expr->args = makeList(lexpr, rexpr, -1); + result = (Node *)expr; + } + break; + case OR: + { + Expr *expr = makeNode(Expr); + Node *lexpr = transformExpr(pstate, a->lexpr); + Node *rexpr = transformExpr(pstate, a->rexpr); + if (exprType(lexpr) != BOOLOID) + elog(WARN, + "left-hand side of OR is type '%s', not bool", + tname(get_id_type(exprType(lexpr)))); + if (exprType(rexpr) != BOOLOID) + elog(WARN, + "right-hand side of OR is type '%s', not bool", + tname(get_id_type(exprType(rexpr)))); + expr->typeOid = BOOLOID; + expr->opType = OR_EXPR; + expr->args = makeList(lexpr, rexpr, -1); + result = (Node *)expr; + } + break; + case NOT: + { + Expr *expr = makeNode(Expr); + Node *rexpr = transformExpr(pstate, a->rexpr); + if (exprType(rexpr) != BOOLOID) + elog(WARN, + "argument to NOT is type '%s', not bool", + tname(get_id_type(exprType(rexpr)))); + expr->typeOid = BOOLOID; + expr->opType = NOT_EXPR; + expr->args = makeList(rexpr, -1); + result = (Node *)expr; + } + break; + } + break; + } + case T_Ident: { + Ident *ident = (Ident*)expr; + bool isrel; + char *reln= ParseColumnName(pstate,ident->name, &isrel); + + /* could be a column name or a relation_name */ + if (reln==NULL) { + /* + * may be a relation_name + * + * ??? in fact, every ident left after transfromExpr() is called + * will be assumed to be a relation. + */ + if (isrel) { + ident->isRel = TRUE; + result = (Node*)ident; + } else { + elog(WARN, "attribute \"%s\" not found", ident->name); + } + }else { + Attr *att = makeNode(Attr); + att->relname = reln; + att->attrs = lcons(makeString(ident->name), NIL); + /* + * a column name + */ + result = + (Node*)handleNestedDots(pstate, att, &pstate->p_last_resno); + } + break; + } + case T_FuncCall: { + FuncCall *fn = (FuncCall *)expr; + List *args; + + /* transform the list of arguments */ + foreach(args, fn->args) { + lfirst(args) = transformExpr(pstate, (Node*)lfirst(args)); + } + result = ParseFunc(pstate, + fn->funcname, fn->args, &pstate->p_last_resno); + break; + } + default: + /* should not reach here */ + elog(WARN, "transformExpr: does not know how to transform %d\n", + nodeTag(expr)); + break; + } + + return result; +} + +/***************************************************************************** + * + * From Clause + * + *****************************************************************************/ + +/* + * parseFromClause - + * turns the table references specified in the from-clause into a + * range table. The range table may grow as we transform the expressions + * in the target list. (Note that this happens because in POSTQUEL, we + * allow references to relations not specified in the from-clause. We + * also allow that in our POST-SQL) + * + */ +static void +parseFromClause(ParseState *pstate, List *frmList) +{ + List *fl= frmList; + + while(fl!=NIL) { + RangeVar *r = lfirst(fl); + RelExpr *baserel = r->relExpr; + RangeTblEntry *ent; + char *relname = baserel->relname; + char *refname = r->name; + + if (refname==NULL) { + refname = relname; + } else { + /* + * check whether refname exists already + */ + if (RangeTablePosn(pstate->p_rtable, refname) != 0) + elog(WARN, "parser: range variable \"%s\" duplicated", + refname); + } + + ent = makeRangeTableEntry(relname, baserel->inh, + baserel->timeRange, refname); + /* + * marks this entry to indicate it comes from the from clause. In + * SQL, the target list can only refer to range variables specified + * in the from clause but we follow the more powerful POSTQUEL + * semantics and automatically generate the range variable if not + * specified. However there are times we need to know whether the + * entries are legitimate. + * + * eg. select * from foo f where f.x = 1; will generate wrong answer + * if we expand * to foo.x. + */ + ent->inFromCl = true; + + pstate->p_rtable = lappend(pstate->p_rtable, ent); + fl= lnext(fl); + } +} + +/* + * makeRangeTable - + * make a range table with the specified relation (optional) and the + * from-clause. + */ +static void +makeRangeTable(ParseState *pstate, char *relname, List *frmList) +{ + int x; + + parseFromClause(pstate, frmList); + + if (relname == NULL) + return; + + if (RangeTablePosn(pstate->p_rtable, relname) < 1) { + RangeTblEntry *ent; + + ent = makeRangeTableEntry(relname, FALSE, NULL, relname); + pstate->p_rtable = lappend(pstate->p_rtable, ent); + } + x = RangeTablePosn(pstate->p_rtable, relname); + pstate->parser_current_rel = heap_openr(VarnoGetRelname(pstate,x)); + if (pstate->parser_current_rel == NULL) + elog(WARN,"invalid relation name"); +} + +/* + * exprType - + * returns the Oid of the type of the expression. (Used for typechecking.) + */ +Oid +exprType(Node *expr) +{ + Oid type; + + switch(nodeTag(expr)) { + case T_Func: + type = ((Func*)expr)->functype; + break; + case T_Iter: + type = ((Iter*)expr)->itertype; + break; + case T_Var: + type = ((Var*)expr)->vartype; + break; + case T_Expr: + type = ((Expr*)expr)->typeOid; + break; + case T_Const: + type = ((Const*)expr)->consttype; + break; + case T_ArrayRef: + type = ((ArrayRef*)expr)->refelemtype; + break; + case T_Aggreg: + type = ((Aggreg*)expr)->aggtype; + break; + case T_Param: + type = ((Param*)expr)->paramtype; + break; + case T_Ident: + /* is this right? */ + type = UNKNOWNOID; + break; + default: + elog(WARN, "exprType: don't know how to get type for %d node", + nodeTag(expr)); + break; + } + return type; +} + +/* + * expandAllTables - + * turns '*' (in the target list) into a list of attributes (of all + * relations in the range table) + */ +static List * +expandAllTables(ParseState *pstate) +{ + List *target= NIL; + List *legit_rtable=NIL; + List *rt, *rtable; + + rtable = pstate->p_rtable; + if (pstate->p_query_is_rule) { + /* + * skip first two entries, "*new*" and "*current*" + */ + rtable = lnext(lnext(pstate->p_rtable)); + } + + /* this should not happen */ + if (rtable==NULL) + elog(WARN, "cannot expand: null p_rtable"); + + /* + * go through the range table and make a list of range table entries + * which we will expand. + */ + foreach(rt, rtable) { + RangeTblEntry *rte = lfirst(rt); + + /* + * we only expand those specify in the from clause. (This will + * also prevent us from using the wrong table in inserts: eg. tenk2 + * in "insert into tenk2 select * from tenk1;") + */ + if (!rte->inFromCl) + continue; + legit_rtable = lappend(legit_rtable, rte); + } + + foreach(rt, legit_rtable) { + RangeTblEntry *rte = lfirst(rt); + char *rt_name= rte->refname; /* use refname here so that we + refer to the right entry */ + List *temp = target; + + if(temp == NIL ) + target = expandAll(pstate, rt_name, &pstate->p_last_resno); + else { + while (temp != NIL && lnext(temp) != NIL) + temp = lnext(temp); + lnext(temp) = expandAll(pstate, rt_name, &pstate->p_last_resno); + } + } + return target; +} + + +/* + * figureColname - + * if the name of the resulting column is not specified in the target + * list, we have to guess. + * + */ +static char * +figureColname(Node *expr, Node *resval) +{ + switch (nodeTag(expr)) { + case T_Aggreg: + return (char*) /* XXX */ + ((Aggreg *)expr)->aggname; + case T_Expr: + if (((Expr*)expr)->opType == FUNC_EXPR) { + if (nodeTag(resval)==T_FuncCall) + return ((FuncCall*)resval)->funcname; + } + break; + default: + break; + } + + return "?column?"; +} + +/***************************************************************************** + * + * Target list + * + *****************************************************************************/ + +/* + * makeTargetList - + * turn a list of column names and expressions (in the same order) into + * a target list (used exclusively for inserts) + */ +static List * +makeTargetList(ParseState *pstate, List *cols, List *exprs) +{ + List *tlist, *tl=NULL; + if (cols != NIL) { + /* has to transform colElem too (opt_indirection can be exprs) */ + while(cols!=NIL) { + ResTarget *res = makeNode(ResTarget); + Ident *id = lfirst(cols); + /* Id opt_indirection */ + res->name = id->name; + res->indirection = id->indirection; + if (exprs == NIL) { + elog(WARN, "insert: number of expressions less than columns"); + }else { + res->val = (Node *)lfirst(exprs); + } + if (tl==NIL) { + tlist = tl = lcons(res, NIL); + }else { + lnext(tl) = lcons(res,NIL); + tl = lnext(tl); + } + cols = lnext(cols); + exprs = lnext(exprs); + } + if (cols != NIL) { + elog(WARN, "insert: number of columns more than expressions"); + } + }else { + bool has_star = false; + + if (exprs==NIL) + return NIL; + if (IsA(lfirst(exprs),Attr)) { + Attr *att = lfirst(exprs); + + if ((att->relname!=NULL && !strcmp(att->relname,"*")) || + (att->attrs!=NIL && !strcmp(strVal(lfirst(att->attrs)),"*"))) + has_star = true; + } + if (has_star) { + /* + * right now, these better be 'relname.*' or '*' (this can happen + * in eg. insert into tenk2 values (tenk1.*); or + * insert into tenk2 select * from tenk1; + */ + while(exprs!=NIL) { + ResTarget *res = makeNode(ResTarget); + res->name = NULL; + res->indirection = NULL; + res->val = (Node *)lfirst(exprs); + if (tl==NIL) { + tlist = tl = lcons(res, NIL); + }else { + lnext(tl) = lcons(res,NIL); + tl = lnext(tl); + } + exprs = lnext(exprs); + } + } else { + Relation insertRel = pstate->parser_current_rel; + int numcol; + int i; + AttributeTupleForm *attr = insertRel->rd_att->attrs; + + numcol = Min(length(exprs), insertRel->rd_rel->relnatts); + for(i=0; i < numcol; i++) { + ResTarget *res = makeNode(ResTarget); + + res->name = palloc(NAMEDATALEN+1); + strncpy(res->name, attr[i]->attname.data, NAMEDATALEN); + res->name[NAMEDATALEN]='\0'; + res->indirection = NULL; + res->val = (Node *)lfirst(exprs); + if (tl==NIL) { + tlist = tl = lcons(res, NIL); + }else { + lnext(tl) = lcons(res,NIL); + tl = lnext(tl); + } + exprs = lnext(exprs); + } + } + } + return tlist; +} + +/* + * transformTargetList - + * turns a list of ResTarget's into a list of TargetEntry's + */ +static List * +transformTargetList(ParseState *pstate, + List *targetlist, + bool isInsert, + bool isUpdate) +{ + List *p_target= NIL; + List *temp = NIL; + + while(targetlist != NIL) { + ResTarget *res= (ResTarget *)lfirst(targetlist); + TargetEntry *tent = makeNode(TargetEntry); + + switch(nodeTag(res->val)) { + case T_Ident: { + Node *expr; + Oid type_id; + int type_len; + char *identname; + char *resname; + + identname = ((Ident*)res->val)->name; + expr = transformExpr(pstate, (Node*)res->val); + type_id = exprType(expr); + type_len = tlen(get_id_type(type_id)); + resname = (res->name) ? res->name : identname; + tent->resdom = makeResdom((AttrNumber)pstate->p_last_resno++, + (Oid)type_id, + (Size)type_len, + resname, + (Index)0, + (Oid)0, + 0); + + tent->expr = expr; + break; + } + case T_ParamNo: + case T_FuncCall: + case T_A_Const: + case T_A_Expr: { + Node *expr = transformExpr(pstate, (Node *)res->val); + + if (isInsert && res->name==NULL) + elog(WARN, "Sorry, have to specify the column list"); + + /* note indirection has not been transformed */ + if (isInsert && res->indirection!=NIL) { + /* this is an array assignment */ + char *val; + char *str, *save_str; + List *elt; + int i = 0, ndims; + int lindx[MAXDIM], uindx[MAXDIM]; + int resdomno; + Relation rd; + Value *constval; + + if (exprType(expr) != UNKNOWNOID || + !IsA(expr,Const)) + elog(WARN, "yyparse: string constant expected"); + + val = (char *) textout((struct varlena *) + ((Const *)expr)->constvalue); + str = save_str = (char*)palloc(strlen(val) + MAXDIM * 25 + 2); + foreach(elt, res->indirection) { + A_Indices *aind = (A_Indices *)lfirst(elt); + aind->uidx = transformExpr(pstate, aind->uidx); + if (!IsA(aind->uidx,Const)) + elog(WARN, + "Array Index for Append should be a constant"); + uindx[i] = ((Const *)aind->uidx)->constvalue; + if (aind->lidx!=NULL) { + aind->lidx = transformExpr(pstate, aind->lidx); + if (!IsA(aind->lidx,Const)) + elog(WARN, + "Array Index for Append should be a constant"); + lindx[i] = ((Const*)aind->lidx)->constvalue; + }else { + lindx[i] = 1; + } + if (lindx[i] > uindx[i]) + elog(WARN, "yyparse: lower index cannot be greater than upper index"); + sprintf(str, "[%d:%d]", lindx[i], uindx[i]); + str += strlen(str); + i++; + } + sprintf(str, "=%s", val); + rd = pstate->parser_current_rel; + Assert(rd != NULL); + resdomno = varattno(rd, res->name); + ndims = att_attnelems(rd, resdomno); + if (i != ndims) + elog(WARN, "yyparse: array dimensions do not match"); + constval = makeNode(Value); + constval->type = T_String; + constval->val.str = save_str; + tent = make_targetlist_expr(pstate, res->name, + (Node*)make_const(constval), + NULL, + (isInsert||isUpdate)); + pfree(save_str); + } else { + char *colname= res->name; + /* this is not an array assignment */ + if (colname==NULL) { + /* if you're wondering why this is here, look at + * the yacc grammar for why a name can be missing. -ay + */ + colname = figureColname(expr, res->val); + } + if (res->indirection) { + List *ilist = res->indirection; + while (ilist!=NIL) { + A_Indices *ind = lfirst(ilist); + ind->lidx = transformExpr(pstate, ind->lidx); + ind->uidx = transformExpr(pstate, ind->uidx); + ilist = lnext(ilist); + } + } + tent = make_targetlist_expr(pstate, colname, expr, + res->indirection, + (isInsert||isUpdate)); + } + break; + } + case T_Attr: { + Oid type_id; + int type_len; + Attr *att = (Attr *)res->val; + Node *result; + char *attrname; + char *resname; + Resdom *resnode; + List *attrs = att->attrs; + + + /* + * Target item is a single '*', expand all tables + * (eg. SELECT * FROM emp) + */ + if (att->relname!=NULL && !strcmp(att->relname, "*")) { + if(lnext(targetlist)!=NULL) + elog(WARN, "cannot expand target list *, ..."); + p_target = expandAllTables(pstate); + + /* + * skip rest of while loop + */ + targetlist = lnext(targetlist); + continue; + } + + /* + * Target item is relation.*, expand the table + * (eg. SELECT emp.*, dname FROM emp, dept) + */ + attrname = strVal(lfirst(att->attrs)); + if (att->attrs!=NIL && !strcmp(attrname,"*")) { + /* temp is the target list we're building in the while + * loop. Make sure we fix it after appending more nodes. + */ + if (temp == NIL) { + p_target = temp = + expandAll(pstate, att->relname, &pstate->p_last_resno); + } else { + lnext(temp) = + expandAll(pstate, att->relname, &pstate->p_last_resno); + } + while(lnext(temp)!=NIL) + temp = lnext(temp); /* make sure we point to the last + target entry */ + /* + * skip the rest of the while loop + */ + targetlist = lnext(targetlist); + continue; + } + + + /* + * Target item is fully specified: ie. relation.attribute + */ + result = handleNestedDots(pstate, att, &pstate->p_last_resno); + if (att->indirection != NIL) { + List *ilist = att->indirection; + while (ilist!=NIL) { + A_Indices *ind = lfirst(ilist); + ind->lidx = transformExpr(pstate, ind->lidx); + ind->uidx = transformExpr(pstate, ind->uidx); + ilist = lnext(ilist); + } + result = (Node*)make_array_ref(result, att->indirection); + } + type_id = exprType(result); + type_len = tlen(get_id_type(type_id)); + while(lnext(attrs)!=NIL) + attrs=lnext(attrs); + resname = (res->name) ? res->name : strVal(lfirst(attrs)); + resnode = makeResdom((AttrNumber)pstate->p_last_resno++, + (Oid)type_id, + (Size)type_len, + resname, + (Index)0, + (Oid)0, + 0); + tent->resdom = resnode; + tent->expr = result; + break; + } + default: + /* internal error */ + elog(WARN, + "internal error: do not know how to transform targetlist"); + break; + } + + if (p_target==NIL) { + p_target = temp = lcons(tent, NIL); + }else { + lnext(temp) = lcons(tent, NIL); + temp = lnext(temp); + } + targetlist = lnext(targetlist); + } + return p_target; +} + + +/* + * make_targetlist_expr - + * make a TargetEntry + * + * arrayRef is a list of transformed A_Indices + */ +static TargetEntry * +make_targetlist_expr(ParseState *pstate, + char *name, + Node *expr, + List *arrayRef, + bool ResdomNoIsAttrNo) +{ + int type_id, type_len, attrtype, attrlen; + int resdomno; + Relation rd; + bool attrisset; + TargetEntry *tent; + Resdom *resnode; + + if (expr == NULL) + elog(WARN, "make_targetlist_expr: invalid use of NULL expression"); + + type_id = exprType(expr); + type_len = tlen(get_id_type(type_id)); + + /* I have no idea what the following does! */ + if (ResdomNoIsAttrNo) { + /* + * append or replace query -- + * append, replace work only on one relation, + * so multiple occurence of same resdomno is bogus + */ + rd = pstate->parser_current_rel; + Assert(rd != NULL); + resdomno = varattno(rd,name); + attrisset = varisset(rd,name); + attrtype = att_typeid(rd,resdomno); + if ((arrayRef != NIL) && (lfirst(arrayRef) == NIL)) + attrtype = GetArrayElementType(attrtype); + if (attrtype==BPCHAROID || attrtype==VARCHAROID) { + attrlen = rd->rd_att->attrs[resdomno-1]->attlen; + } else { + attrlen = tlen(get_id_type(attrtype)); + } +#if 0 + if(Input_is_string && Typecast_ok){ + Datum val; + if (type_id == typeid(type("unknown"))){ + val = (Datum)textout((struct varlena *) + ((Const)lnext(expr))->constvalue); + }else{ + val = ((Const)lnext(expr))->constvalue; + } + if (attrisset) { + lnext(expr) = makeConst(attrtype, + attrlen, + val, + false, + true, + true /* is set */); + } else { + lnext(expr) = + makeConst(attrtype, + attrlen, + (Datum)fmgr(typeid_get_retinfunc(attrtype), + val,get_typelem(attrtype),-1), + false, + true /* Maybe correct-- 80% chance */, + false /* is not a set */); + } + } else if((Typecast_ok) && (attrtype != type_id)){ + lnext(expr) = + parser_typecast2(expr, get_id_type((long)attrtype)); + } else + if (attrtype != type_id) { + if ((attrtype == INT2OID) && (type_id == INT4OID)) + lfirst(expr) = lispInteger (INT2OID); + else if ((attrtype == FLOAT4OID) && (type_id == FLOAT8OID)) + lfirst(expr) = lispInteger (FLOAT4OID); + else + elog(WARN, "unequal type in tlist : %s \n", + name)); + } + + Input_is_string = false; + Input_is_integer = false; + Typecast_ok = true; +#endif + if (attrtype != type_id) { + if (IsA(expr,Const)) { + /* try to cast the constant */ + expr = (Node*)parser_typecast2(expr, + type_id, + get_id_type((long)attrtype), + attrlen); + } else { + /* currently, we can't handle casting of expressions */ + elog(WARN, "parser: attribute '%s' is of type '%.*s' but expression is of type '%.*s'", + name, + NAMEDATALEN, get_id_typname(attrtype), + NAMEDATALEN, get_id_typname(type_id)); + } + } + + if (intMember(resdomno, pstate->p_target_resnos)) { + elog(WARN,"two or more occurrences of same attr"); + } else { + pstate->p_target_resnos = lconsi(resdomno, + pstate->p_target_resnos); + } + if (arrayRef != NIL) { + Expr *target_expr; + Attr *att = makeNode(Attr); + List *ar = arrayRef; + List *upperIndexpr = NIL; + List *lowerIndexpr = NIL; + + att->relname = pstrdup(RelationGetRelationName(rd)->data); + att->attrs = lcons(makeString(name), NIL); + target_expr = (Expr*)handleNestedDots(pstate, att, + &pstate->p_last_resno); + while(ar!=NIL) { + A_Indices *ind = lfirst(ar); + if (lowerIndexpr) { + /* XXX assume all lowerIndexpr is non-null in + * this case + */ + lowerIndexpr = lappend(lowerIndexpr, ind->lidx); + } + upperIndexpr = lappend(upperIndexpr, ind->uidx); + ar = lnext(ar); + } + + expr = (Node*)make_array_set(target_expr, + upperIndexpr, + lowerIndexpr, + (Expr*)expr); + attrtype = att_typeid(rd,resdomno); + attrlen = tlen(get_id_type(attrtype)); + } + } else { + resdomno = pstate->p_last_resno++; + attrtype = type_id; + attrlen = type_len; + } + tent = makeNode(TargetEntry); + + resnode = makeResdom((AttrNumber)resdomno, + (Oid) attrtype, + (Size) attrlen, + name, + (Index)0, + (Oid)0, + 0); + + tent->resdom = resnode; + tent->expr = expr; + + return tent; + } + + +/***************************************************************************** + * + * Where Clause + * + *****************************************************************************/ + +/* + * transformWhereClause - + * transforms the qualification and make sure it is of type Boolean + * + */ +static Node * +transformWhereClause(ParseState *pstate, Node *a_expr) +{ + Node *qual; + + if (a_expr == NULL) + return (Node *)NULL; /* no qualifiers */ + + qual = transformExpr(pstate, a_expr); + if (exprType(qual) != BOOLOID) { + elog(WARN, + "where clause must return type bool, not %s", + tname(get_id_type(exprType(qual)))); + } + return qual; +} + +/***************************************************************************** + * + * Sort Clause + * + *****************************************************************************/ + +/* + * find_tl_elt - + * returns the Resdom in the target list matching the specified varname + * + */ +static Resdom * +find_tl_elt(char *varname, List *tlist) +{ + List *i; + + foreach(i, tlist) { + TargetEntry *target = (TargetEntry *)lfirst(i); + Resdom *resnode = target->resdom; + char *resname = resnode->resname; + + if (!strcmp(resname, varname)) + return (resnode); + } + return ((Resdom *)NULL); +} + +static Oid +any_ordering_op(int restype) +{ + Operator order_op; + Oid order_opid; + + order_op = oper("<",restype,restype); + order_opid = (Oid)oprid(order_op); + + return order_opid; +} + +/* + * transformGroupClause - + * transform an Group By clause + * + */ +static List * +transformGroupClause(ParseState *pstate, List *grouplist) +{ + List *glist = NIL, *gl; + + while (grouplist != NIL) { + GroupClause *grpcl = makeNode(GroupClause); + Var *groupAttr = (Var*)transformExpr(pstate, (Node*)lfirst(grouplist)); + + if (nodeTag(groupAttr) != T_Var) { + elog(WARN, "parser: can only specify attribute in group by"); + } + grpcl->grpAttr = groupAttr; + grpcl->grpOpoid = any_ordering_op(groupAttr->vartype); + if (glist == NIL) { + gl = glist = lcons(grpcl, NIL); + } else { + lnext(gl) = lcons(grpcl, NIL); + gl = lnext(gl); + } + grouplist = lnext(grouplist); + } + + return glist; +} + +/* + * transformSortClause - + * transform an Order By clause + * + */ +static List * +transformSortClause(List *orderlist, List *targetlist, + char* uniqueFlag) +{ + List *sortlist = NIL; + List *s, *i; + + while(orderlist != NIL) { + SortBy *sortby = lfirst(orderlist); + SortClause *sortcl = makeNode(SortClause); + Resdom *resdom; + + resdom = find_tl_elt(sortby->name, targetlist); + if (resdom == NULL) + elog(WARN,"The field being sorted by must appear in the target list"); + + sortcl->resdom = resdom; + sortcl->opoid = oprid(oper(sortby->useOp, + resdom->restype, + resdom->restype)); + if (sortlist == NIL) { + s = sortlist = lcons(sortcl, NIL); + }else { + lnext(s) = lcons(sortcl, NIL); + s = lnext(s); + } + orderlist = lnext(orderlist); + } + + if (uniqueFlag) { + if (uniqueFlag[0] == '*') { + /* concatenate all elements from target list + that are not already in the sortby list */ + foreach (i,targetlist) { + TargetEntry *tlelt = (TargetEntry *)lfirst(i); + + s = sortlist; + while(s != NIL) { + SortClause *sortcl = lfirst(s); + if (sortcl->resdom==tlelt->resdom) + break; + s = lnext(s); + } + if (s == NIL) { + /* not a member of the sortclauses yet */ + SortClause *sortcl = makeNode(SortClause); + + sortcl->resdom = tlelt->resdom; + sortcl->opoid = any_ordering_op(tlelt->resdom->restype); + + sortlist = lappend(sortlist, sortcl); + } + } + } + else { + TargetEntry *tlelt; + char* uniqueAttrName = uniqueFlag; + + /* only create sort clause with the specified unique attribute */ + foreach (i, targetlist) { + tlelt = (TargetEntry*)lfirst(i); + if (strcmp(tlelt->resdom->resname, uniqueAttrName) == 0) + break; + } + if (i == NIL) { + elog(WARN, "The field specified in the UNIQUE ON clause is not in the targetlist"); + } + s = sortlist; + foreach (s, sortlist) { + SortClause *sortcl = lfirst(s); + if (sortcl->resdom == tlelt->resdom) + break; + } + if (s == NIL) { + /* not a member of the sortclauses yet */ + SortClause *sortcl = makeNode(SortClause); + + sortcl->resdom = tlelt->resdom; + sortcl->opoid = any_ordering_op(tlelt->resdom->restype); + + sortlist = lappend(sortlist, sortcl); + } + } + + } + + return sortlist; +} + +/* + ** HandleNestedDots -- + ** Given a nested dot expression (i.e. (relation func ... attr), build up + ** a tree with of Iter and Func nodes. + */ +static Node* +handleNestedDots(ParseState *pstate, Attr *attr, int *curr_resno) +{ + List *mutator_iter; + Node *retval = NULL; + + if (attr->paramNo != NULL) { + Param *param = (Param *)transformExpr(pstate, (Node*)attr->paramNo); + + retval = + ParseFunc(pstate, strVal(lfirst(attr->attrs)), + lcons(param, NIL), + curr_resno); + } else { + Ident *ident = makeNode(Ident); + + ident->name = attr->relname; + ident->isRel = TRUE; + retval = + ParseFunc(pstate, strVal(lfirst(attr->attrs)), + lcons(ident, NIL), + curr_resno); + } + + foreach (mutator_iter, lnext(attr->attrs)) { + retval = ParseFunc(pstate,strVal(lfirst(mutator_iter)), + lcons(retval, NIL), + curr_resno); + } + + return(retval); +} + +/* + ** make_arguments -- + ** Given the number and types of arguments to a function, and the + ** actual arguments and argument types, do the necessary typecasting. + */ +static void +make_arguments(int nargs, + List *fargs, + Oid *input_typeids, + Oid *function_typeids) +{ + /* + * there are two ways an input typeid can differ from a function typeid : + * either the input type inherits the function type, so no typecasting is + * necessary, or the input type can be typecast into the function type. + * right now, we only typecast unknowns, and that is all we check for. + */ + + List *current_fargs; + int i; + + for (i=0, current_fargs = fargs; + iresdom = resnode; + tle->expr = (Node*)varnode; + return (lcons(tle, NIL)); +} + +/* + ** setup_base_tlist -- + ** Build a tlist that extracts a base type from the tuple + ** returned by the executor. + */ +static List * +setup_base_tlist(Oid typeid) +{ + TargetEntry *tle; + Resdom *resnode; + Var *varnode; + + resnode = makeResdom(1, + typeid, + tlen(get_id_type(typeid)), + "", + 0, + (Oid)0, + 0); + varnode = makeVar(-1, 1, typeid, -1, 1); + tle = makeNode(TargetEntry); + tle->resdom = resnode; + tle->expr = (Node*)varnode; + + return (lcons(tle, NIL)); +} + +/* + * ParseComplexProjection - + * handles function calls with a single argument that is of complex type. + * This routine returns NULL if it can't handle the projection (eg. sets). + */ +static Node * +ParseComplexProjection(ParseState *pstate, + char *funcname, + Node *first_arg, + bool *attisset) +{ + Oid argtype; + Oid argrelid; + Name relname; + Relation rd; + Oid relid; + int attnum; + + switch (nodeTag(first_arg)) { + case T_Iter: + { + Func *func; + Iter *iter; + + iter = (Iter*)first_arg; + func = (Func *)((Expr*)iter->iterexpr)->oper; + argtype = funcid_get_rettype(func->funcid); + argrelid = typeid_get_relid(argtype); + if (argrelid && + ((attnum = get_attnum(argrelid, funcname)) + != InvalidAttrNumber)) { + + /* the argument is a function returning a tuple, so funcname + may be a projection */ + + /* add a tlist to the func node and return the Iter */ + rd = heap_openr(tname(get_id_type(argtype))); + if (RelationIsValid(rd)) { + relid = RelationGetRelationId(rd); + relname = RelationGetRelationName(rd); + heap_close(rd); + } + if (RelationIsValid(rd)) { + func->func_tlist = + setup_tlist(funcname, argrelid); + iter->itertype = att_typeid(rd,attnum); + return ((Node*)iter); + }else { + elog(WARN, + "Function %s has bad returntype %d", + funcname, argtype); + } + }else { + /* drop through */ + ; + } + break; + } + case T_Var: + { + /* + * The argument is a set, so this is either a projection + * or a function call on this set. + */ + *attisset = true; + break; + } + case T_Expr: + { + Expr *expr = (Expr*)first_arg; + Func *funcnode; + + if (expr->opType != FUNC_EXPR) + break; + + funcnode= (Func *) expr->oper; + argtype = funcid_get_rettype(funcnode->funcid); + argrelid = typeid_get_relid(argtype); + /* + * the argument is a function returning a tuple, so funcname + * may be a projection + */ + if (argrelid && + (attnum = get_attnum(argrelid, funcname)) + != InvalidAttrNumber) { + + /* add a tlist to the func node */ + rd = heap_openr(tname(get_id_type(argtype))); + if (RelationIsValid(rd)) { + relid = RelationGetRelationId(rd); + relname = RelationGetRelationName(rd); + heap_close(rd); + } + if (RelationIsValid(rd)) { + Expr *newexpr; + + funcnode->func_tlist = + setup_tlist(funcname, argrelid); + funcnode->functype = att_typeid(rd,attnum); + + newexpr = makeNode(Expr); + newexpr->typeOid = funcnode->functype; + newexpr->opType = FUNC_EXPR; + newexpr->oper = (Node *)funcnode; + newexpr->args = lcons(first_arg, NIL); + + return ((Node*)newexpr); + } + + } + + elog(WARN, "Function %s has bad returntype %d", + funcname, argtype); + break; + } + case T_Param: + { + Param *param = (Param*)first_arg; + /* + * If the Param is a complex type, this could be a projection + */ + rd = heap_openr(tname(get_id_type(param->paramtype))); + if (RelationIsValid(rd)) { + relid = RelationGetRelationId(rd); + relname = RelationGetRelationName(rd); + heap_close(rd); + } + if (RelationIsValid(rd) && + (attnum = get_attnum(relid, funcname)) + != InvalidAttrNumber) { + + param->paramtype = att_typeid(rd, attnum); + param->param_tlist = setup_tlist(funcname, relid); + return ((Node*)param); + } + break; + } + default: + break; + } + + return NULL; +} + +static Node * +ParseFunc(ParseState *pstate, char *funcname, List *fargs, int *curr_resno) +{ + Oid rettype = (Oid)0; + Oid argrelid; + Oid funcid = (Oid)0; + List *i = NIL; + Node *first_arg= NULL; + char *relname, *oldname; + Relation rd; + Oid relid; + int nargs; + Func *funcnode; + Oid oid_array[8]; + Oid *true_oid_array; + Node *retval; + bool retset; + bool exists; + bool attisset = false; + Oid toid; + Expr *expr; + + if (fargs) { + first_arg = lfirst(fargs); + if (first_arg == NULL) + elog (WARN,"function %s does not allow NULL input",funcname); + } + + /* + ** check for projection methods: if function takes one argument, and + ** that argument is a relation, param, or PQ function returning a complex + ** type, then the function could be a projection. + */ + if (length(fargs) == 1) { + if (nodeTag(first_arg)==T_Ident && ((Ident*)first_arg)->isRel) { + Ident *ident = (Ident*)first_arg; + + /* + * first arg is a relation. This could be a projection. + */ + relname = ident->name; + if (RangeTablePosn(pstate->p_rtable, relname)== 0) { + RangeTblEntry *ent; + + ent = + makeRangeTableEntry(relname, + FALSE, NULL, relname); + pstate->p_rtable = lappend(pstate->p_rtable, ent); + } + oldname = relname; + relname = VarnoGetRelname(pstate, + RangeTablePosn(pstate->p_rtable, + oldname)); + rd = heap_openr(relname); + relid = RelationGetRelationId(rd); + heap_close(rd); + /* If the attr isn't a set, just make a var for it. If + * it is a set, treat it like a function and drop through. + */ + if (get_attnum(relid, funcname) != InvalidAttrNumber) { + int dummyTypeId; + + return + ((Node*)make_var(pstate, + oldname, + funcname, + &dummyTypeId)); + } else { + /* drop through - attr is a set */ + ; + } + } else if (ISCOMPLEX(exprType(first_arg))) { + /* + * Attempt to handle projection of a complex argument. If + * ParseComplexProjection can't handle the projection, we + * have to keep going. + */ + retval = ParseComplexProjection(pstate, + funcname, + first_arg, + &attisset); + if (attisset) { + toid = exprType(first_arg); + rd = heap_openr(tname(get_id_type(toid))); + if (RelationIsValid(rd)) { + relname = RelationGetRelationName(rd)->data; + heap_close(rd); + } else + elog(WARN, + "Type %s is not a relation type", + tname(get_id_type(toid))); + argrelid = typeid_get_relid(toid); + /* A projection contains either an attribute name or the + * word "all". + */ + if ((get_attnum(argrelid, funcname) == InvalidAttrNumber) + && strcmp(funcname, "all")) { + elog(WARN, "Functions on sets are not yet supported"); + } + } + + if (retval) + return retval; + } else { + /* + * Parsing aggregates. + */ + Oid basetype; + /* the aggregate count is a special case, + ignore its base type. Treat it as zero */ + if (strcmp(funcname, "count") == 0) + basetype = 0; + else + basetype = exprType(lfirst(fargs)); + if (SearchSysCacheTuple(AGGNAME, + PointerGetDatum(funcname), + ObjectIdGetDatum(basetype), + 0, 0)) { + Aggreg *aggreg = ParseAgg(funcname, basetype, lfirst(fargs)); + + AddAggToParseState(pstate, aggreg); + return (Node*)aggreg; + } + } + } + + + /* + ** If we dropped through to here it's really a function (or a set, which + ** is implemented as a function.) + ** extract arg type info and transform relation name arguments into + ** varnodes of the appropriate form. + */ + memset(&oid_array[0], 0, 8 * sizeof(Oid)); + + nargs=0; + foreach ( i , fargs ) { + int vnum; + Node *pair = lfirst(i); + + if (nodeTag(pair)==T_Ident && ((Ident*)pair)->isRel) { + /* + * a relation + */ + relname = ((Ident*)pair)->name; + + /* get the range table entry for the var node */ + vnum = RangeTablePosn(pstate->p_rtable, relname); + if (vnum == 0) { + pstate->p_rtable = + lappend(pstate->p_rtable , + makeRangeTableEntry(relname, FALSE, + NULL, relname)); + vnum = RangeTablePosn (pstate->p_rtable, relname); + } + + /* + * We have to do this because the relname in the pair + * may have been a range table variable name, rather + * than a real relation name. + */ + relname = VarnoGetRelname(pstate, vnum); + + rd = heap_openr(relname); + relid = RelationGetRelationId(rd); + heap_close(rd); + + /* + * for func(relname), the param to the function + * is the tuple under consideration. we build a special + * VarNode to reflect this -- it has varno set to the + * correct range table entry, but has varattno == 0 to + * signal that the whole tuple is the argument. + */ + toid = typeid(type(relname)); + /* replace it in the arg list */ + lfirst(fargs) = + makeVar(vnum, 0, toid, vnum, 0); + }else if (!attisset) { /* set functions don't have parameters */ + + /* any functiona args which are typed "unknown", but aren't + constants, we don't know what to do with, because we + can't cast them - jolly*/ + if (exprType(pair) == UNKNOWNOID && + !IsA(pair, Const)) + { + elog(WARN, "ParseFunc: no function named %s that takes in an unknown type as argument #%d", funcname, nargs); + } + else + toid = exprType(pair); + } + + oid_array[nargs++] = toid; + } + + /* + * func_get_detail looks up the function in the catalogs, does + * disambiguation for polymorphic functions, handles inheritance, + * and returns the funcid and type and set or singleton status of + * the function's return value. it also returns the true argument + * types to the function. if func_get_detail returns true, + * the function exists. otherwise, there was an error. + */ + if (attisset) { /* we know all of these fields already */ + /* We create a funcnode with a placeholder function SetEval. + * SetEval() never actually gets executed. When the function + * evaluation routines see it, they use the funcid projected + * out from the relation as the actual function to call. + * Example: retrieve (emp.mgr.name) + * The plan for this will scan the emp relation, projecting + * out the mgr attribute, which is a funcid. This function + * is then called (instead of SetEval) and "name" is projected + * from its result. + */ + funcid = SetEvalRegProcedure; + rettype = toid; + retset = true; + true_oid_array = oid_array; + exists = true; + } else { + exists = func_get_detail(funcname, nargs, oid_array, &funcid, + &rettype, &retset, &true_oid_array); + } + + if (!exists) + elog(WARN, "no such attribute or function %s", funcname); + + /* got it */ + funcnode = makeNode(Func); + funcnode->funcid = funcid; + funcnode->functype = rettype; + funcnode->funcisindex = false; + funcnode->funcsize = 0; + funcnode->func_fcache = NULL; + funcnode->func_tlist = NIL; + funcnode->func_planlist = NIL; + + /* perform the necessary typecasting */ + make_arguments(nargs, fargs, oid_array, true_oid_array); + + /* + * for functions returning base types, we want to project out the + * return value. set up a target list to do that. the executor + * will ignore these for c functions, and do the right thing for + * postquel functions. + */ + + if (typeid_get_relid(rettype) == InvalidOid) + funcnode->func_tlist = setup_base_tlist(rettype); + + /* For sets, we want to make a targetlist to project out this + * attribute of the set tuples. + */ + if (attisset) { + if (!strcmp(funcname, "all")) { + funcnode->func_tlist = + expandAll(pstate, (char*)relname, curr_resno); + } else { + funcnode->func_tlist = setup_tlist(funcname,argrelid); + rettype = find_atttype(argrelid, funcname); + } + } + + expr = makeNode(Expr); + expr->typeOid = rettype; + expr->opType = FUNC_EXPR; + expr->oper = (Node *)funcnode; + expr->args = fargs; + retval = (Node*)expr; + + /* + * if the function returns a set of values, then we need to iterate + * over all the returned values in the executor, so we stick an + * iter node here. if it returns a singleton, then we don't need + * the iter node. + */ + + if (retset) { + Iter *iter = makeNode(Iter); + iter->itertype = rettype; + iter->iterexpr = retval; + retval = (Node*)iter; + } + + return(retval); +} + +/* + * returns (relname) if found, NIL if not a column + */ +static char* +ParseColumnName(ParseState *pstate, char *name, bool *isRelName) +{ + List *et; + Relation rd; + List *rtable; + + /* + * see if it is a relation name. If so, leave it as it is + */ + if (RangeTablePosn(pstate->p_rtable, name)!=0) { + *isRelName = TRUE; + return NULL; + } + + if (pstate->p_query_is_rule) { + rtable = lnext(lnext(pstate->p_rtable)); + } else { + rtable = pstate->p_rtable; + } + /* + * search each relation in the FROM list and see if we have a match + */ + foreach(et, rtable) { + RangeTblEntry *rte = lfirst(et); + char *relname= rte->relname; + char *refname= rte->refname; + Oid relid; + + rd= heap_openr(relname); + relid = RelationGetRelationId(rd); + heap_close(rd); + if (get_attnum(relid, name) != InvalidAttrNumber) { + /* found */ + *isRelName = FALSE; + return refname; + } + + } + + /* attribute not found */ + *isRelName = FALSE; + return NULL; +} + + +/***************************************************************************** + * + *****************************************************************************/ + +/* + * AddAggToParseState - + * add the aggregate to the list of unique aggregates in pstate. + * + * SIDE EFFECT: aggno in target list entry will be modified + */ +static void +AddAggToParseState(ParseState *pstate, Aggreg *aggreg) +{ + List *ag; + int i; + + /* + * see if we have the aggregate already (we only need to record + * the aggregate once) + */ + i = 0; + foreach(ag, pstate->p_aggs) { + Aggreg *a = lfirst(ag); + + if (!strcmp(a->aggname, aggreg->aggname) && + equal(a->target, aggreg->target)) { + + /* fill in the aggno and we're done */ + aggreg->aggno = i; + return; + } + i++; + } + + /* not found, new aggregate */ + aggreg->aggno = i; + pstate->p_numAgg++; + pstate->p_aggs = lappend(pstate->p_aggs, aggreg); + return; +} + +/* + * finalizeAggregates - + * fill in qry_aggs from pstate. Also checks to make sure that aggregates + * are used in the proper place. + */ +static void +finalizeAggregates(ParseState *pstate, Query *qry) +{ + List *l; + int i; + + parseCheckAggregates(pstate, qry); + + qry->qry_numAgg = pstate->p_numAgg; + qry->qry_aggs = + (Aggreg **)palloc(sizeof(Aggreg *) * qry->qry_numAgg); + i = 0; + foreach(l, pstate->p_aggs) { + qry->qry_aggs[i++] = (Aggreg*)lfirst(l); + } +} + +/* + * contain_agg_clause-- + * Recursively find aggreg nodes from a clause. + * + * Returns true if any aggregate found. + */ +static bool +contain_agg_clause(Node *clause) +{ + if (clause==NULL) + return FALSE; + else if (IsA(clause,Aggreg)) + return TRUE; + else if (IsA(clause,Iter)) + return contain_agg_clause(((Iter*)clause)->iterexpr); + else if (single_node(clause)) + return FALSE; + else if (or_clause(clause)) { + List *temp; + + foreach (temp, ((Expr*)clause)->args) { + if (contain_agg_clause(lfirst(temp))) + return TRUE; + } + return FALSE; + } else if (is_funcclause (clause)) { + List *temp; + + foreach(temp, ((Expr *)clause)->args) { + if (contain_agg_clause(lfirst(temp))) + return TRUE; + } + return FALSE; + } else if (IsA(clause,ArrayRef)) { + List *temp; + + foreach(temp, ((ArrayRef*)clause)->refupperindexpr) { + if (contain_agg_clause(lfirst(temp))) + return TRUE; + } + foreach(temp, ((ArrayRef*)clause)->reflowerindexpr) { + if (contain_agg_clause(lfirst(temp))) + return TRUE; + } + if (contain_agg_clause(((ArrayRef*)clause)->refexpr)) + return TRUE; + if (contain_agg_clause(((ArrayRef*)clause)->refassgnexpr)) + return TRUE; + return FALSE; + } else if (not_clause(clause)) + return contain_agg_clause((Node*)get_notclausearg((Expr*)clause)); + else if (is_opclause(clause)) + return (contain_agg_clause((Node*)get_leftop((Expr*)clause)) || + contain_agg_clause((Node*)get_rightop((Expr*)clause))); + + return FALSE; +} + +/* + * exprIsAggOrGroupCol - + * returns true if the expression does not contain non-group columns. + */ +static bool +exprIsAggOrGroupCol(Node *expr, List *groupClause) +{ + if (expr==NULL) + return TRUE; + else if (IsA(expr,Const)) + return TRUE; + else if (IsA(expr,Var)) { + List *gl; + Var *var = (Var*)expr; + /* + * only group columns are legal + */ + foreach (gl, groupClause) { + GroupClause *grpcl = lfirst(gl); + if ((grpcl->grpAttr->varno == var->varno) && + (grpcl->grpAttr->varattno == var->varattno)) + return TRUE; + } + return FALSE; + } else if (IsA(expr,Aggreg)) + /* aggregates can take group column or non-group column as argument, + no further check necessary. */ + return TRUE; + else if (IsA(expr,Expr)) { + List *temp; + + foreach (temp, ((Expr*)expr)->args) { + if (!exprIsAggOrGroupCol(lfirst(temp),groupClause)) + return FALSE; + } + return TRUE; + } + + return FALSE; +} + +/* + * parseCheckAggregates - + * this should really be done earlier but the current grammar + * cannot differentiate functions from aggregates. So we have do check + * here when the target list and the qualifications are finalized. + */ +static void +parseCheckAggregates(ParseState *pstate, Query *qry) +{ + List *tl; + Assert(pstate->p_numAgg > 0); + + /* + * aggregates never appear in WHERE clauses. (we have to check where + * clause first because if there is an aggregate, the check for + * non-group column in target list may fail.) + */ + if (contain_agg_clause(qry->qual)) + elog(WARN, "parser: aggregates not allowed in WHERE clause"); + + /* + * the target list can only contain aggregates, group columns and + * functions thereof. + */ + foreach (tl, qry->targetList) { + TargetEntry *tle = lfirst(tl); + if (!exprIsAggOrGroupCol(tle->expr, qry->groupClause)) + elog(WARN, + "parser: illegal use of aggregates or non-group column in target list"); + } + + /* + * the expression specified in the HAVING clause has the same restriction + * as those in the target list. + */ + if (!exprIsAggOrGroupCol(qry->havingQual, qry->groupClause)) + elog(WARN, + "parser: illegal use of aggregates or non-group column in HAVING clause"); + + return; +} + + diff --git a/src/backend/parser/catalog_utils.c b/src/backend/parser/catalog_utils.c new file mode 100644 index 00000000000..a4fc775c452 --- /dev/null +++ b/src/backend/parser/catalog_utils.c @@ -0,0 +1,1470 @@ +/*------------------------------------------------------------------------- + * + * catalog_utils.c-- + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/Attic/catalog_utils.c,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include "postgres.h" + +#include "lib/dllist.h" +#include "utils/datum.h" + +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "fmgr.h" + +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "utils/syscache.h" +#include "catalog/catname.h" + +#include "catalog_utils.h" +#include "catalog/pg_inherits.h" +#include "catalog/pg_type.h" +#include "catalog/indexing.h" +#include "catalog/catname.h" + +#include "access/skey.h" +#include "access/relscan.h" +#include "access/tupdesc.h" +#include "access/htup.h" +#include "access/heapam.h" +#include "access/genam.h" +#include "access/itup.h" +#include "access/tupmacs.h" + +#include "storage/buf.h" +#include "storage/bufmgr.h" +#include "utils/lsyscache.h" +#include "storage/lmgr.h" + +struct { + char *field; + int code; +} special_attr[] = { + { "ctid", SelfItemPointerAttributeNumber }, + { "oid", ObjectIdAttributeNumber }, + { "xmin", MinTransactionIdAttributeNumber }, + { "cmin", MinCommandIdAttributeNumber }, + { "xmax", MaxTransactionIdAttributeNumber }, + { "cmax", MaxCommandIdAttributeNumber }, + { "chain", ChainItemPointerAttributeNumber }, + { "anchor", AnchorItemPointerAttributeNumber }, + { "tmin", MinAbsoluteTimeAttributeNumber }, + { "tmax", MaxAbsoluteTimeAttributeNumber }, + { "vtype", VersionTypeAttributeNumber } +}; + +#define SPECIALS (sizeof(special_attr)/sizeof(*special_attr)) + +static char *attnum_type[SPECIALS] = { + "tid", + "oid", + "xid", + "cid", + "xid", + "cid", + "tid", + "tid", + "abstime", + "abstime", + "char" + }; + +#define MAXFARGS 8 /* max # args to a c or postquel function */ + +static Oid **argtype_inherit(); +static Oid **genxprod(); + +static int findsupers(Oid relid, Oid **supervec); +/* + * This structure is used to explore the inheritance hierarchy above + * nodes in the type tree in order to disambiguate among polymorphic + * functions. + */ + +typedef struct _InhPaths { + int nsupers; /* number of superclasses */ + Oid self; /* this class */ + Oid *supervec; /* vector of superclasses */ +} InhPaths; + +/* + * This structure holds a list of possible functions or operators that + * agree with the known name and argument types of the function/operator. + */ +typedef struct _CandidateList { + Oid *args; + struct _CandidateList *next; +} *CandidateList; + +/* check to see if a type id is valid, + * returns true if it is. By using this call before calling + * get_id_type or get_id_typname, more meaningful error messages + * can be produced because the caller typically has more context of + * what's going on - jolly + */ +bool +check_typeid(long id) +{ + return (SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(id), + 0,0,0) != NULL); +} + + +/* return a Type structure, given an typid */ +Type +get_id_type(long id) +{ + HeapTuple tup; + + if (!(tup = SearchSysCacheTuple(TYPOID, ObjectIdGetDatum(id), + 0,0,0))) { + elog ( WARN, "type id lookup of %d failed", id); + return(NULL); + } + return((Type) tup); +} + +/* return a type name, given a typeid */ +char* +get_id_typname(long id) +{ + HeapTuple tup; + TypeTupleForm typetuple; + + if (!(tup = SearchSysCacheTuple(TYPOID, ObjectIdGetDatum(id), + 0,0,0))) { + elog ( WARN, "type id lookup of %d failed", id); + return(NULL); + } + typetuple = (TypeTupleForm)GETSTRUCT(tup); + return (typetuple->typname).data; +} + +/* return a Type structure, given type name */ +Type +type(char *s) +{ + HeapTuple tup; + + if (s == NULL) { + elog ( WARN , "type(): Null type" ); + } + + if (!(tup = SearchSysCacheTuple(TYPNAME, PointerGetDatum(s), 0,0,0))) { + elog (WARN , "type name lookup of %s failed", s); + } + return((Type) tup); +} + +/* given attribute id, return type of that attribute */ +/* XXX Special case for pseudo-attributes is a hack */ +Oid +att_typeid(Relation rd, int attid) +{ + + if (attid < 0) { + return(typeid(type(attnum_type[-attid-1]))); + } + /* -1 because varattno (where attid comes from) returns one + more than index */ + return(rd->rd_att->attrs[attid-1]->atttypid); +} + + +int +att_attnelems(Relation rd, int attid) +{ + return(rd->rd_att->attrs[attid-1]->attnelems); +} + +/* given type, return the type OID */ +Oid +typeid(Type tp) +{ + if (tp == NULL) { + elog ( WARN , "typeid() called with NULL type struct"); + } + return(tp->t_oid); +} + +/* given type (as type struct), return the length of type */ +int16 +tlen(Type t) +{ + TypeTupleForm typ; + + typ = (TypeTupleForm)GETSTRUCT(t); + return(typ->typlen); +} + +/* given type (as type struct), return the value of its 'byval' attribute.*/ +bool +tbyval(Type t) +{ + TypeTupleForm typ; + + typ = (TypeTupleForm)GETSTRUCT(t); + return(typ->typbyval); +} + +/* given type (as type struct), return the name of type */ +char* +tname(Type t) +{ + TypeTupleForm typ; + + typ = (TypeTupleForm)GETSTRUCT(t); + return (typ->typname).data; +} + +/* given type (as type struct), return wether type is passed by value */ +int +tbyvalue(Type t) +{ + TypeTupleForm typ; + + typ = (TypeTupleForm) GETSTRUCT(t); + return(typ->typbyval); +} + +/* given a type, return its typetype ('c' for 'c'atalog types) */ +char +typetypetype(Type t) +{ + TypeTupleForm typ; + + typ = (TypeTupleForm) GETSTRUCT(t); + return(typ->typtype); +} + +/* given operator, return the operator OID */ +Oid +oprid(Operator op) +{ + return(op->t_oid); +} + +/* + * given opname, leftTypeId and rightTypeId, + * find all possible (arg1, arg2) pairs for which an operator named + * opname exists, such that leftTypeId can be coerced to arg1 and + * rightTypeId can be coerced to arg2 + */ +static int +binary_oper_get_candidates(char *opname, + int leftTypeId, + int rightTypeId, + CandidateList *candidates) +{ + CandidateList current_candidate; + Relation pg_operator_desc; + HeapScanDesc pg_operator_scan; + HeapTuple tup; + OperatorTupleForm oper; + Buffer buffer; + int nkeys; + int ncandidates = 0; + ScanKeyData opKey[3]; + + *candidates = NULL; + + ScanKeyEntryInitialize(&opKey[0], 0, + Anum_pg_operator_oprname, + NameEqualRegProcedure, + NameGetDatum(opname)); + + ScanKeyEntryInitialize(&opKey[1], 0, + Anum_pg_operator_oprkind, + CharacterEqualRegProcedure, + CharGetDatum('b')); + + + if (leftTypeId == UNKNOWNOID) { + if (rightTypeId == UNKNOWNOID) { + nkeys = 2; + } else { + nkeys = 3; + + ScanKeyEntryInitialize(&opKey[2], 0, + Anum_pg_operator_oprright, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(rightTypeId)); + } + } else if (rightTypeId == UNKNOWNOID) { + nkeys = 3; + + ScanKeyEntryInitialize(&opKey[2], 0, + Anum_pg_operator_oprleft, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(leftTypeId)); + } else { + /* currently only "unknown" can be coerced */ + return 0; + } + + pg_operator_desc = heap_openr(OperatorRelationName); + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + nkeys, + opKey); + + do { + tup = heap_getnext(pg_operator_scan, 0, &buffer); + if (HeapTupleIsValid(tup)) { + current_candidate = (CandidateList)palloc(sizeof(struct _CandidateList)); + current_candidate->args = (Oid *)palloc(2 * sizeof(Oid)); + + oper = (OperatorTupleForm)GETSTRUCT(tup); + current_candidate->args[0] = oper->oprleft; + current_candidate->args[1] = oper->oprright; + current_candidate->next = *candidates; + *candidates = current_candidate; + ncandidates++; + ReleaseBuffer(buffer); + } + } while(HeapTupleIsValid(tup)); + + heap_endscan(pg_operator_scan); + heap_close(pg_operator_desc); + + return ncandidates; +} + +/* + * equivalentOpersAfterPromotion - + * checks if a list of candidate operators obtained from + * binary_oper_get_candidates() contain equivalent operators. If + * this routine is called, we have more than 1 candidate and need to + * decided whether to pick one of them. This routine returns true if + * the all the candidates operate on the same data types after + * promotion (int2, int4, float4 -> float8). + */ +static bool +equivalentOpersAfterPromotion(CandidateList candidates) +{ + CandidateList result; + CandidateList promotedCandidates = NULL; + int leftarg, rightarg; + + for (result = candidates; result != NULL; result = result->next) { + CandidateList c; + c = (CandidateList)palloc(sizeof(*c)); + c->args = (Oid *)palloc(2 * sizeof(Oid)); + switch (result->args[0]) { + case FLOAT4OID: + case INT4OID: + case INT2OID: + c->args[0] = FLOAT8OID; + break; + default: + c->args[0] = result->args[0]; + break; + } + switch (result->args[1]) { + case FLOAT4OID: + case INT4OID: + case INT2OID: + c->args[1] = FLOAT8OID; + break; + default: + c->args[1] = result->args[1]; + break; + } + c->next = promotedCandidates; + promotedCandidates = c; + } + + /* if we get called, we have more than 1 candidates so we can do the + following safely */ + leftarg = promotedCandidates->args[0]; + rightarg = promotedCandidates->args[1]; + + for (result=promotedCandidates->next; result!=NULL; result=result->next) { + if (result->args[0]!=leftarg || result->args[1]!=rightarg) + /* + * this list contains operators that operate on different + * data types even after promotion. Hence we can't decide on + * which one to pick. The user must do explicit type casting. + */ + return FALSE; + } + + /* all the candidates are equivalent in the following sense: they operate + on equivalent data types and picking any one of them is as good. */ + return TRUE; +} + + +/* + * given a choice of argument type pairs for a binary operator, + * try to choose a default pair + */ +static CandidateList +binary_oper_select_candidate(int arg1, + int arg2, + CandidateList candidates) +{ + CandidateList result; + + /* + * if both are "unknown", there is no way to select a candidate + * + * current wisdom holds that the default operator should be one + * in which both operands have the same type (there will only + * be one such operator) + * + * 7.27.93 - I have decided not to do this; it's too hard to + * justify, and it's easy enough to typecast explicitly -avi + * [the rest of this routine were commented out since then -ay] + */ + + if (arg1 == UNKNOWNOID && arg2 == UNKNOWNOID) + return (NULL); + + /* + * 6/23/95 - I don't complete agree with avi. In particular, casting + * floats is a pain for users. Whatever the rationale behind not doing + * this is, I need the following special case to work. + * + * In the WHERE clause of a query, if a float is specified without + * quotes, we treat it as float8. I added the float48* operators so + * that we can operate on float4 and float8. But now we have more + * than one matching operator if the right arg is unknown (eg. float + * specified with quotes). This break some stuff in the regression + * test where there are floats in quotes not properly casted. Below + * is the solution. In addition to requiring the operator operates + * on the same type for both operands [as in the code Avi originally + * commented out], we also require that the operators be equivalent + * in some sense. (see equivalentOpersAfterPromotion for details.) + * - ay 6/95 + */ + if (!equivalentOpersAfterPromotion(candidates)) + return NULL; + + /* if we get here, any one will do but we're more picky and require + both operands be the same. */ + for (result = candidates; result != NULL; result = result->next) { + if (result->args[0] == result->args[1]) + return result; + } + + return (NULL); +} + +/* Given operator, types of arg1, and arg2, return oper struct */ +/* arg1, arg2 --typeids */ +Operator +oper(char *op, int arg1, int arg2) +{ + HeapTuple tup; + CandidateList candidates; + int ncandidates; + + if (!(tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(arg1), + ObjectIdGetDatum(arg2), + Int8GetDatum('b')))) { + ncandidates = binary_oper_get_candidates(op, arg1, arg2, &candidates); + if (ncandidates == 0) { + /* + * no operators of the desired types found + */ + op_error(op, arg1, arg2); + return(NULL); + } else if (ncandidates == 1) { + /* + * exactly one operator of the desired types found + */ + tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(candidates->args[0]), + ObjectIdGetDatum(candidates->args[1]), + Int8GetDatum('b')); + Assert(HeapTupleIsValid(tup)); + } else { + /* + * multiple operators of the desired types found + */ + candidates = binary_oper_select_candidate(arg1, arg2, candidates); + if (candidates != NULL) { + /* we chose one of them */ + tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(candidates->args[0]), + ObjectIdGetDatum(candidates->args[1]), + Int8GetDatum('b')); + Assert(HeapTupleIsValid(tup)); + } else { + Type tp1, tp2; + + /* we chose none of them */ + tp1 = get_id_type(arg1); + tp2 = get_id_type(arg2); + elog(NOTICE, "there is more than one operator %s for types", op); + elog(NOTICE, "%s and %s. You will have to retype this query", + tname(tp1), tname(tp2)); + elog(WARN, "using an explicit cast"); + + return(NULL); + } + } + } + return((Operator) tup); +} + +/* + * given opname and typeId, find all possible types for which + * a right/left unary operator named opname exists, + * such that typeId can be coerced to it + */ +static int +unary_oper_get_candidates(char *op, + int typeId, + CandidateList *candidates, + char rightleft) +{ + CandidateList current_candidate; + Relation pg_operator_desc; + HeapScanDesc pg_operator_scan; + HeapTuple tup; + OperatorTupleForm oper; + Buffer buffer; + int ncandidates = 0; + + static ScanKeyData opKey[2] = { + { 0, Anum_pg_operator_oprname, NameEqualRegProcedure }, + { 0, Anum_pg_operator_oprkind, CharacterEqualRegProcedure } }; + + *candidates = NULL; + + fmgr_info(NameEqualRegProcedure, (func_ptr *) &opKey[0].sk_func, + &opKey[0].sk_nargs); + opKey[0].sk_argument = NameGetDatum(op); + fmgr_info(CharacterEqualRegProcedure, (func_ptr *) &opKey[1].sk_func, + &opKey[1].sk_nargs); + opKey[1].sk_argument = CharGetDatum(rightleft); + + /* currently, only "unknown" can be coerced */ + if (typeId != UNKNOWNOID) { + return 0; + } + + pg_operator_desc = heap_openr(OperatorRelationName); + pg_operator_scan = heap_beginscan(pg_operator_desc, + 0, + SelfTimeQual, + 2, + opKey); + + do { + tup = heap_getnext(pg_operator_scan, 0, &buffer); + if (HeapTupleIsValid(tup)) { + current_candidate = (CandidateList)palloc(sizeof(struct _CandidateList)); + current_candidate->args = (Oid *)palloc(sizeof(Oid)); + + oper = (OperatorTupleForm)GETSTRUCT(tup); + if (rightleft == 'r') + current_candidate->args[0] = oper->oprleft; + else + current_candidate->args[0] = oper->oprright; + current_candidate->next = *candidates; + *candidates = current_candidate; + ncandidates++; + ReleaseBuffer(buffer); + } + } while(HeapTupleIsValid(tup)); + + heap_endscan(pg_operator_scan); + heap_close(pg_operator_desc); + + return ncandidates; +} + +/* Given unary right-side operator (operator on right), return oper struct */ +/* arg-- type id */ +Operator +right_oper(char *op, int arg) +{ + HeapTuple tup; + CandidateList candidates; + int ncandidates; + + /* + if (!OpCache) { + init_op_cache(); + } + */ + if (!(tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(arg), + ObjectIdGetDatum(InvalidOid), + Int8GetDatum('r')))) { + ncandidates = unary_oper_get_candidates(op, arg, &candidates, 'r'); + if (ncandidates == 0) { + elog ( WARN , + "Can't find right op: %s for type %d", op, arg ); + return(NULL); + } + else if (ncandidates == 1) { + tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(candidates->args[0]), + ObjectIdGetDatum(InvalidOid), + Int8GetDatum('r')); + Assert(HeapTupleIsValid(tup)); + } + else { + elog(NOTICE, "there is more than one right operator %s", op); + elog(NOTICE, "you will have to retype this query"); + elog(WARN, "using an explicit cast"); + return(NULL); + } + } + return((Operator) tup); +} + +/* Given unary left-side operator (operator on left), return oper struct */ +/* arg--type id */ +Operator +left_oper(char *op, int arg) +{ + HeapTuple tup; + CandidateList candidates; + int ncandidates; + + /* + if (!OpCache) { + init_op_cache(); + } + */ + if (!(tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(InvalidOid), + ObjectIdGetDatum(arg), + Int8GetDatum('l')))) { + ncandidates = unary_oper_get_candidates(op, arg, &candidates, 'l'); + if (ncandidates == 0) { + elog ( WARN , + "Can't find left op: %s for type %d", op, arg ); + return(NULL); + } + else if (ncandidates == 1) { + tup = SearchSysCacheTuple(OPRNAME, + PointerGetDatum(op), + ObjectIdGetDatum(InvalidOid), + ObjectIdGetDatum(candidates->args[0]), + Int8GetDatum('l')); + Assert(HeapTupleIsValid(tup)); + } + else { + elog(NOTICE, "there is more than one left operator %s", op); + elog(NOTICE, "you will have to retype this query"); + elog(WARN, "using an explicit cast"); + return(NULL); + } + } + return((Operator) tup); +} + +/* given range variable, return id of variable */ + +int +varattno(Relation rd, char *a) +{ + int i; + + for (i = 0; i < rd->rd_rel->relnatts; i++) { + if (!namestrcmp(&(rd->rd_att->attrs[i]->attname), a)) { + return(i+1); + } + } + for (i = 0; i < SPECIALS; i++) { + if (!strcmp(special_attr[i].field, a)) { + return(special_attr[i].code); + } + } + + elog(WARN,"Relation %s does not have attribute %s\n", + RelationGetRelationName(rd), a ); + return(-1); +} + +/* Given range variable, return whether attribute of this name + * is a set. + * NOTE the ASSUMPTION here that no system attributes are, or ever + * will be, sets. + */ +bool +varisset(Relation rd, char *name) +{ + int i; + + /* First check if this is a system attribute */ + for (i = 0; i < SPECIALS; i++) { + if (! strcmp(special_attr[i].field, name)) { + return(false); /* no sys attr is a set */ + } + } + return (get_attisset(rd->rd_id, name)); +} + +/* given range variable, return id of variable */ +int +nf_varattno(Relation rd, char *a) +{ + int i; + + for (i = 0; i < rd->rd_rel->relnatts; i++) { + if (!namestrcmp(&(rd->rd_att->attrs[i]->attname), a)) { + return(i+1); + } + } + for (i = 0; i < SPECIALS; i++) { + if (!strcmp(special_attr[i].field, a)) { + return(special_attr[i].code); + } + } + return InvalidAttrNumber; +} + +/*------------- + * given an attribute number and a relation, return its relation name + */ +char* +getAttrName(Relation rd, int attrno) +{ + char *name; + int i; + + if (attrno<0) { + for (i = 0; i < SPECIALS; i++) { + if (special_attr[i].code == attrno) { + name = special_attr[i].field; + return(name); + } + } + elog(WARN, "Illegal attr no %d for relation %s\n", + attrno, RelationGetRelationName(rd)); + } else if (attrno >=1 && attrno<= RelationGetNumberOfAttributes(rd)) { + name = (rd->rd_att->attrs[attrno-1]->attname).data; + return(name); + } else { + elog(WARN, "Illegal attr no %d for relation %s\n", + attrno, RelationGetRelationName(rd)); + } + + /* + * Shouldn't get here, but we want lint to be happy... + */ + + return(NULL); +} + +/* Given a typename and value, returns the ascii form of the value */ + +char * +outstr(char *typename, /* Name of type of value */ + char *value) /* Could be of any type */ +{ + TypeTupleForm tp; + Oid op; + + tp = (TypeTupleForm ) GETSTRUCT(type(typename)); + op = tp->typoutput; + return((char *) fmgr(op, value)); +} + +/* Given a Type and a string, return the internal form of that string */ +char * +instr2(Type tp, char *string, int typlen) +{ + return(instr1((TypeTupleForm ) GETSTRUCT(tp), string, typlen)); +} + +/* Given a type structure and a string, returns the internal form of + that string */ +char * +instr1(TypeTupleForm tp, char *string, int typlen) +{ + Oid op; + Oid typelem; + + op = tp->typinput; + typelem = tp->typelem; /* XXX - used for array_in */ + /* typlen is for bpcharin() and varcharin() */ + return((char *) fmgr(op, string, typelem, typlen)); +} + +/* Given the attribute type of an array return the arrtribute type of + an element of the array */ + +Oid +GetArrayElementType(Oid typearray) +{ + HeapTuple type_tuple; + TypeTupleForm type_struct_array; + + type_tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(typearray), + 0,0,0); + + if (!HeapTupleIsValid(type_tuple)) + elog(WARN, "GetArrayElementType: Cache lookup failed for type %d\n", + typearray); + + /* get the array type struct from the type tuple */ + type_struct_array = (TypeTupleForm) GETSTRUCT(type_tuple); + + if (type_struct_array->typelem == InvalidOid) { + elog(WARN, "GetArrayElementType: type %s is not an array", + (Name)&(type_struct_array->typname.data[0])); + } + + return(type_struct_array->typelem); +} + +Oid +funcid_get_rettype(Oid funcid) +{ + HeapTuple func_tuple = NULL; + Oid funcrettype = (Oid)0; + + func_tuple = SearchSysCacheTuple(PROOID, ObjectIdGetDatum(funcid), + 0,0,0); + + if ( !HeapTupleIsValid ( func_tuple )) + elog (WARN, "function %d does not exist", funcid); + + funcrettype = (Oid) + ((Form_pg_proc)GETSTRUCT(func_tuple))->prorettype ; + + return (funcrettype); +} + +/* + * get a list of all argument type vectors for which a function named + * funcname taking nargs arguments exists + */ +static CandidateList +func_get_candidates(char *funcname, int nargs) +{ + Relation heapRelation; + Relation idesc; + ScanKeyData skey; + HeapTuple tuple; + IndexScanDesc sd; + RetrieveIndexResult indexRes; + Buffer buffer; + Form_pg_proc pgProcP; + bool bufferUsed = FALSE; + CandidateList candidates = NULL; + CandidateList current_candidate; + int i; + + heapRelation = heap_openr(ProcedureRelationName); + ScanKeyEntryInitialize(&skey, + (bits16)0x0, + (AttrNumber)1, + (RegProcedure)NameEqualRegProcedure, + (Datum)funcname); + + idesc = index_openr(ProcedureNameIndex); + + sd = index_beginscan(idesc, false, 1, &skey); + + do { + tuple = (HeapTuple)NULL; + if (bufferUsed) { + ReleaseBuffer(buffer); + bufferUsed = FALSE; + } + + indexRes = index_getnext(sd, ForwardScanDirection); + if (indexRes) { + ItemPointer iptr; + + iptr = &indexRes->heap_iptr; + tuple = heap_fetch(heapRelation, NowTimeQual, iptr, &buffer); + pfree(indexRes); + if (HeapTupleIsValid(tuple)) { + pgProcP = (Form_pg_proc)GETSTRUCT(tuple); + bufferUsed = TRUE; + if (pgProcP->pronargs == nargs) { + current_candidate = (CandidateList) + palloc(sizeof(struct _CandidateList)); + current_candidate->args = (Oid *) + palloc(8 * sizeof(Oid)); + memset(current_candidate->args, 0, 8 * sizeof(Oid)); + for (i=0; iargs[i] = + pgProcP->proargtypes[i]; + } + + current_candidate->next = candidates; + candidates = current_candidate; + } + } + } + } while (indexRes); + + index_endscan(sd); + index_close(idesc); + heap_close(heapRelation); + + return candidates; +} + +/* + * can input_typeids be coerced to func_typeids? + */ +static bool +can_coerce(int nargs, Oid *input_typeids, Oid *func_typeids) +{ + int i; + Type tp; + + /* + * right now, we only coerce "unknown", and we cannot coerce it to a + * relation type + */ + for (i=0; inext) { + current_typeids = current_candidate->args; + if (can_coerce(nargs, input_typeids, current_typeids)) { + matching_candidate = (CandidateList) + palloc(sizeof(struct _CandidateList)); + matching_candidate->args = current_typeids; + matching_candidate->next = *candidates; + *candidates = matching_candidate; + ncandidates++; + } + } + + return ncandidates; +} + +/* + * given the input argtype array and more than one candidate + * for the function argtype array, attempt to resolve the conflict. + * returns the selected argtype array if the conflict can be resolved, + * otherwise returns NULL + */ +static Oid * +func_select_candidate(int nargs, + Oid *input_typeids, + CandidateList candidates) +{ + /* XXX no conflict resolution implemeneted yet */ + return (NULL); +} + +bool +func_get_detail(char *funcname, + int nargs, + Oid *oid_array, + Oid *funcid, /* return value */ + Oid *rettype, /* return value */ + bool *retset, /* return value */ + Oid **true_typeids) /* return value */ +{ + Oid **input_typeid_vector; + Oid *current_input_typeids; + CandidateList function_typeids; + CandidateList current_function_typeids; + HeapTuple ftup; + Form_pg_proc pform; + + /* + * attempt to find named function in the system catalogs + * with arguments exactly as specified - so that the normal + * case is just as quick as before + */ + ftup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(funcname), + Int32GetDatum(nargs), + PointerGetDatum(oid_array), + 0); + *true_typeids = oid_array; + + /* + * If an exact match isn't found : + * 1) get a vector of all possible input arg type arrays constructed + * from the superclasses of the original input arg types + * 2) get a list of all possible argument type arrays to the + * function with given name and number of arguments + * 3) for each input arg type array from vector #1 : + * a) find how many of the function arg type arrays from list #2 + * it can be coerced to + * b) - if the answer is one, we have our function + * - if the answer is more than one, attempt to resolve the + * conflict + * - if the answer is zero, try the next array from vector #1 + */ + if (!HeapTupleIsValid(ftup)) { + function_typeids = func_get_candidates(funcname, nargs); + + if (function_typeids != NULL) { + int ncandidates = 0; + + input_typeid_vector = argtype_inherit(nargs, oid_array); + current_input_typeids = oid_array; + + do { + ncandidates = match_argtypes(nargs, current_input_typeids, + function_typeids, + ¤t_function_typeids); + if (ncandidates == 1) { + *true_typeids = current_function_typeids->args; + ftup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(funcname), + Int32GetDatum(nargs), + PointerGetDatum(*true_typeids), + 0); + Assert(HeapTupleIsValid(ftup)); + } + else if (ncandidates > 1) { + *true_typeids = + func_select_candidate(nargs, + current_input_typeids, + current_function_typeids); + if (*true_typeids == NULL) { + elog(NOTICE, "there is more than one function named \"%s\"", + funcname); + elog(NOTICE, "that satisfies the given argument types. you will have to"); + elog(NOTICE, "retype your query using explicit typecasts."); + func_error("func_get_detail", funcname, nargs, (int*)oid_array); + } + else { + ftup = SearchSysCacheTuple(PRONAME, + PointerGetDatum(funcname), + Int32GetDatum(nargs), + PointerGetDatum(*true_typeids), + 0); + Assert(HeapTupleIsValid(ftup)); + } + } + current_input_typeids = *input_typeid_vector++; + } + while (current_input_typeids != + InvalidOid && ncandidates == 0); + } + } + + if (!HeapTupleIsValid(ftup)) { + Type tp; + + if (nargs == 1) { + tp = get_id_type(oid_array[0]); + if (typetypetype(tp) == 'c') + elog(WARN, "no such attribute or function \"%s\"", + funcname); + } + func_error("func_get_detail", funcname, nargs, (int*)oid_array); + } else { + pform = (Form_pg_proc) GETSTRUCT(ftup); + *funcid = ftup->t_oid; + *rettype = (Oid) pform->prorettype; + *retset = (Oid) pform->proretset; + + return (true); + } +/* shouldn't reach here */ + return (false); + +} + +/* + * argtype_inherit() -- Construct an argtype vector reflecting the + * inheritance properties of the supplied argv. + * + * This function is used to disambiguate among functions with the + * same name but different signatures. It takes an array of eight + * type ids. For each type id in the array that's a complex type + * (a class), it walks up the inheritance tree, finding all + * superclasses of that type. A vector of new Oid type arrays + * is returned to the caller, reflecting the structure of the + * inheritance tree above the supplied arguments. + * + * The order of this vector is as follows: all superclasses of the + * rightmost complex class are explored first. The exploration + * continues from right to left. This policy means that we favor + * keeping the leftmost argument type as low in the inheritance tree + * as possible. This is intentional; it is exactly what we need to + * do for method dispatch. The last type array we return is all + * zeroes. This will match any functions for which return types are + * not defined. There are lots of these (mostly builtins) in the + * catalogs. + */ +static Oid ** +argtype_inherit(int nargs, Oid *oid_array) +{ + Oid relid; + int i; + InhPaths arginh[MAXFARGS]; + + for (i = 0; i < MAXFARGS; i++) { + if (i < nargs) { + arginh[i].self = oid_array[i]; + if ((relid = typeid_get_relid(oid_array[i])) != InvalidOid) { + arginh[i].nsupers = findsupers(relid, &(arginh[i].supervec)); + } else { + arginh[i].nsupers = 0; + arginh[i].supervec = (Oid *) NULL; + } + } else { + arginh[i].self = InvalidOid; + arginh[i].nsupers = 0; + arginh[i].supervec = (Oid *) NULL; + } + } + + /* return an ordered cross-product of the classes involved */ + return (genxprod(arginh, nargs)); +} + +typedef struct _SuperQE { + Oid sqe_relid; +} SuperQE; + +static int +findsupers(Oid relid, Oid **supervec) +{ + Oid *relidvec; + Relation inhrel; + HeapScanDesc inhscan; + ScanKeyData skey; + HeapTuple inhtup; + TupleDesc inhtupdesc; + int nvisited; + SuperQE *qentry, *vnode; + Dllist *visited, *queue; + Dlelem *qe, *elt; + + Relation rd; + Buffer buf; + Datum d; + bool newrelid; + char isNull; + + nvisited = 0; + queue = DLNewList(); + visited = DLNewList(); + + + inhrel = heap_openr(InheritsRelationName); + RelationSetLockForRead(inhrel); + inhtupdesc = RelationGetTupleDescriptor(inhrel); + + /* + * Use queue to do a breadth-first traversal of the inheritance + * graph from the relid supplied up to the root. + */ + do { + ScanKeyEntryInitialize(&skey, 0x0, Anum_pg_inherits_inhrel, + ObjectIdEqualRegProcedure, + ObjectIdGetDatum(relid)); + + inhscan = heap_beginscan(inhrel, 0, NowTimeQual, 1, &skey); + + while (HeapTupleIsValid(inhtup = heap_getnext(inhscan, 0, &buf))) { + qentry = (SuperQE *) palloc(sizeof(SuperQE)); + + d = (Datum) fastgetattr(inhtup, Anum_pg_inherits_inhparent, + inhtupdesc, &isNull); + qentry->sqe_relid = DatumGetObjectId(d); + + /* put this one on the queue */ + DLAddTail(queue, DLNewElem(qentry)); + + ReleaseBuffer(buf); + } + + heap_endscan(inhscan); + + /* pull next unvisited relid off the queue */ + do { + qe = DLRemHead(queue); + qentry = qe ? (SuperQE*)DLE_VAL(qe) : NULL; + + if (qentry == (SuperQE *) NULL) + break; + + relid = qentry->sqe_relid; + newrelid = true; + + for (elt = DLGetHead(visited); elt; elt = DLGetSucc(elt)) { + vnode = (SuperQE*)DLE_VAL(elt); + if (vnode && (qentry->sqe_relid == vnode->sqe_relid)) { + newrelid = false; + break; + } + } + } while (!newrelid); + + if (qentry != (SuperQE *) NULL) { + + /* save the type id, rather than the relation id */ + if ((rd = heap_open(qentry->sqe_relid)) == (Relation) NULL) + elog(WARN, "relid %d does not exist", qentry->sqe_relid); + qentry->sqe_relid = typeid(type(RelationGetRelationName(rd)->data)); + heap_close(rd); + + DLAddTail(visited, qe); + + nvisited++; + } + } while (qentry != (SuperQE *) NULL); + + RelationUnsetLockForRead(inhrel); + heap_close(inhrel); + + if (nvisited > 0) { + relidvec = (Oid *) palloc(nvisited * sizeof(Oid)); + *supervec = relidvec; + + for (elt = DLGetHead(visited); elt; elt = DLGetSucc(elt)) { + vnode = (SuperQE*)DLE_VAL(elt); + *relidvec++ = vnode->sqe_relid; + } + + } else { + *supervec = (Oid *) NULL; + } + + return (nvisited); +} + +static Oid ** +genxprod(InhPaths *arginh, int nargs) +{ + int nanswers; + Oid **result, **iter; + Oid *oneres; + int i, j; + int cur[MAXFARGS]; + + nanswers = 1; + for (i = 0; i < nargs; i++) { + nanswers *= (arginh[i].nsupers + 2); + cur[i] = 0; + } + + iter = result = (Oid **) palloc(sizeof(Oid *) * nanswers); + + /* compute the cross product from right to left */ + for (;;) { + oneres = (Oid *) palloc(MAXFARGS * sizeof(Oid)); + memset(oneres, 0, MAXFARGS * sizeof(Oid)); + + for (i = nargs - 1; i >= 0 && cur[i] > arginh[i].nsupers; i--) + continue; + + /* if we're done, terminate with NULL pointer */ + if (i < 0) { + *iter = NULL; + return (result); + } + + /* no, increment this column and zero the ones after it */ + cur[i] = cur[i] + 1; + for (j = nargs - 1; j > i; j--) + cur[j] = 0; + + for (i = 0; i < nargs; i++) { + if (cur[i] == 0) + oneres[i] = arginh[i].self; + else if (cur[i] > arginh[i].nsupers) + oneres[i] = 0; /* wild card */ + else + oneres[i] = arginh[i].supervec[cur[i] - 1]; + } + + *iter++ = oneres; + } +} + +/* Given a type id, returns the in-conversion function of the type */ +Oid +typeid_get_retinfunc(int type_id) +{ + HeapTuple typeTuple; + TypeTupleForm type; + Oid infunc; + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type_id), + 0,0,0); + if ( !HeapTupleIsValid ( typeTuple )) + elog(WARN, + "typeid_get_retinfunc: Invalid type - oid = %d", + type_id); + + type = (TypeTupleForm) GETSTRUCT(typeTuple); + infunc = type->typinput; + return(infunc); +} + +Oid +typeid_get_relid(int type_id) +{ + HeapTuple typeTuple; + TypeTupleForm type; + Oid infunc; + typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type_id), + 0,0,0); + if ( !HeapTupleIsValid ( typeTuple )) + elog(WARN, "typeid_get_relid: Invalid type - oid = %d ", type_id); + + type = (TypeTupleForm) GETSTRUCT(typeTuple); + infunc = type->typrelid; + return(infunc); +} + +Oid get_typrelid(Type typ) +{ + TypeTupleForm typtup; + + typtup = (TypeTupleForm) GETSTRUCT(typ); + + return (typtup->typrelid); +} + +Oid +get_typelem(Oid type_id) +{ + HeapTuple typeTuple; + TypeTupleForm type; + + if (!(typeTuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type_id), + 0,0,0))) { + elog (WARN , "type id lookup of %d failed", type_id); + } + type = (TypeTupleForm) GETSTRUCT(typeTuple); + + return (type->typelem); +} + +char +FindDelimiter(char *typename) +{ + char delim; + HeapTuple typeTuple; + TypeTupleForm type; + + + if (!(typeTuple = SearchSysCacheTuple(TYPNAME, + PointerGetDatum(typename), + 0,0,0))) { + elog (WARN , "type name lookup of %s failed", typename); + } + type = (TypeTupleForm) GETSTRUCT(typeTuple); + + delim = type->typdelim; + return (delim); +} + +/* + * Give a somewhat useful error message when the operator for two types + * is not found. + */ +void +op_error(char *op, int arg1, int arg2) +{ + Type tp1, tp2; + + if (check_typeid(arg1)) { + tp1 = get_id_type(arg1); + } else { + elog(WARN, "left hand side of operator %s has an unknown type, probably a bad attribute name", op); + } + + if (check_typeid(arg2)) { + tp2 = get_id_type(arg2); + } else { + elog(WARN, "right hand side of operator %s has an unknown type, probably a bad attribute name", op); + } + + elog(NOTICE, "there is no operator %s for types %s and %s", + op, tname(tp1),tname(tp2)); + elog(NOTICE, "You will either have to retype this query using an"); + elog(NOTICE, "explicit cast, or you will have to define the operator"); + elog(WARN, "%s for %s and %s using DEFINE OPERATOR", + op, tname(tp1),tname(tp2)); +} + +/* + * Error message when function lookup fails that gives details of the + * argument types + */ +void +func_error(char *caller, char *funcname, int nargs, int *argtypes) +{ + Type get_id_type(); + char p[(NAMEDATALEN+2)*MAXFMGRARGS], *ptr; + int i; + + ptr = p; + *ptr = '\0'; + for (i=0; i +#include + +#include "postgres.h" +#include "miscadmin.h" /* for DataDir */ +#include "access/heapam.h" +#include "access/htup.h" +#include "access/relscan.h" +#include "utils/rel.h" +#include "utils/elog.h" +#include "catalog/catname.h" +#include "catalog/pg_proc.h" +#include "catalog/pg_user.h" +#include "catalog/pg_database.h" +#include "utils/syscache.h" +#include "parser/dbcommands.h" +#include "tcop/tcopprot.h" +#include "storage/bufmgr.h" +#include "storage/lmgr.h" + + +/* non-export function prototypes */ +static void check_permissions(char *command, char *dbname, + Oid *dbIdP, Oid *userIdP); +static HeapTuple get_pg_dbtup(char *command, char *dbname, Relation dbrel); + +void +createdb(char *dbname) +{ + Oid db_id, user_id; + char buf[512]; + + /* + * If this call returns, the database does not exist and we're allowed + * to create databases. + */ + check_permissions("createdb", dbname, &db_id, &user_id); + + /* close virtual file descriptors so we can do system() calls */ + closeAllVfds(); + + sprintf(buf, "mkdir %s%cbase%c%s", DataDir, SEP_CHAR, SEP_CHAR, dbname); + system(buf); + sprintf(buf, "%s %s%cbase%ctemplate1%c* %s%cbase%c%s", + COPY_CMD, DataDir, SEP_CHAR, SEP_CHAR, SEP_CHAR, DataDir, + SEP_CHAR, SEP_CHAR, dbname); + system(buf); + +/* sprintf(buf, "insert into pg_database (datname, datdba, datpath) \ + values (\'%s\'::char16, \'%d\'::oid, \'%s\'::text);", + dbname, user_id, dbname); +*/ + sprintf(buf, "insert into pg_database (datname, datdba, datpath) \ + values (\'%s\', \'%d\', \'%s\');", + dbname, user_id, dbname); + + pg_eval(buf, (char **) NULL, (Oid *) NULL, 0); +} + +void +destroydb(char *dbname) +{ + Oid user_id, db_id; + char buf[512]; + + /* + * If this call returns, the database exists and we're allowed to + * remove it. + */ + check_permissions("destroydb", dbname, &db_id, &user_id); + + if (!OidIsValid(db_id)) { + elog(FATAL, "impossible: pg_database instance with invalid OID."); + } + + /* stop the vacuum daemon */ + stop_vacuum(dbname); + + /* remove the pg_database tuple FIRST, + this may fail due to permissions problems*/ + sprintf(buf, "delete from pg_database where pg_database.oid = \'%d\'::oid", + db_id); + pg_eval(buf, (char **) NULL, (Oid *) NULL, 0); + + /* remove the data directory. If the DELETE above failed, this will + not be reached */ + sprintf(buf, "rm -r %s/base/%s", DataDir, dbname); + system(buf); + + /* drop pages for this database that are in the shared buffer cache */ + DropBuffers(db_id); +} + +static HeapTuple +get_pg_dbtup(char *command, char *dbname, Relation dbrel) +{ + HeapTuple dbtup; + HeapTuple tup; + Buffer buf; + HeapScanDesc scan; + ScanKeyData scanKey; + + ScanKeyEntryInitialize(&scanKey, 0, Anum_pg_database_datname, + NameEqualRegProcedure, NameGetDatum(dbname)); + + scan = heap_beginscan(dbrel, 0, NowTimeQual, 1, &scanKey); + if (!HeapScanIsValid(scan)) + elog(WARN, "%s: cannot begin scan of pg_database.", command); + + /* + * since we want to return the tuple out of this proc, and we're + * going to close the relation, copy the tuple and return the copy. + */ + tup = heap_getnext(scan, 0, &buf); + + if (HeapTupleIsValid(tup)) { + dbtup = heap_copytuple(tup); + ReleaseBuffer(buf); + } else + dbtup = tup; + + heap_endscan(scan); + return (dbtup); +} + +/* + * check_permissions() -- verify that the user is permitted to do this. + * + * If the user is not allowed to carry out this operation, this routine + * elog(WARN, ...)s, which will abort the xact. As a side effect, the + * user's pg_user tuple OID is returned in userIdP and the target database's + * OID is returned in dbIdP. + */ + +static void +check_permissions(char *command, + char *dbname, + Oid *dbIdP, + Oid *userIdP) +{ + Relation dbrel; + HeapTuple dbtup, utup; + Oid dbowner; + char use_createdb; + bool dbfound; + bool use_super; + char *userName; + + userName = GetPgUserName(); + utup = SearchSysCacheTuple(USENAME, PointerGetDatum(userName), + 0,0,0); + *userIdP = ((Form_pg_user)GETSTRUCT(utup))->usesysid; + use_super = ((Form_pg_user)GETSTRUCT(utup))->usesuper; + use_createdb = ((Form_pg_user)GETSTRUCT(utup))->usecreatedb; + + /* Check to make sure user has permission to use createdb */ + if (!use_createdb) { + elog(WARN, "user \"%-.*s\" is not allowed to create/destroy databases", + NAMEDATALEN, userName); + } + + /* Make sure we are not mucking with the template database */ + if (!strcmp(dbname, "template1")) { + elog(WARN, "%s cannot be executed on the template database.", command); + } + + /* Check to make sure database is not the currently open database */ + if (!strcmp(dbname, GetDatabaseName())) { + elog(WARN, "%s cannot be executed on an open database", command); + } + + /* Check to make sure database is owned by this user */ + + /* + * need the reldesc to get the database owner out of dbtup + * and to set a write lock on it. + */ + dbrel = heap_openr(DatabaseRelationName); + + if (!RelationIsValid(dbrel)) + elog(FATAL, "%s: cannot open relation \"%-.*s\"", + command, DatabaseRelationName); + + /* + * Acquire a write lock on pg_database from the beginning to avoid + * upgrading a read lock to a write lock. Upgrading causes long delays + * when multiple 'createdb's or 'destroydb's are run simult. -mer 7/3/91 + */ + RelationSetLockForWrite(dbrel); + dbtup = get_pg_dbtup(command, dbname, dbrel); + dbfound = HeapTupleIsValid(dbtup); + + if (dbfound) { + dbowner = (Oid) heap_getattr(dbtup, InvalidBuffer, + Anum_pg_database_datdba, + RelationGetTupleDescriptor(dbrel), + (char *) NULL); + *dbIdP = dbtup->t_oid; + } else { + *dbIdP = InvalidOid; + } + + heap_close(dbrel); + + /* + * Now be sure that the user is allowed to do this. + */ + + if (dbfound && !strcmp(command, "createdb")) { + + elog(WARN, "createdb: database %s already exists.", dbname); + + } else if (!dbfound && !strcmp(command, "destroydb")) { + + elog(WARN, "destroydb: database %s does not exist.", dbname); + + } else if (dbfound && !strcmp(command, "destroydb") + && dbowner != *userIdP && use_super == false) { + + elog(WARN, "%s: database %s is not owned by you.", command, dbname); + + } +} + +/* + * stop_vacuum() -- stop the vacuum daemon on the database, if one is + * running. + */ +void +stop_vacuum(char *dbname) +{ + char filename[256]; + FILE *fp; + int pid; + + sprintf(filename, "%s%cbase%c%s%c%s.vacuum", DataDir, SEP_CHAR, SEP_CHAR, + dbname, SEP_CHAR, dbname); + if ((fp = fopen(filename, "r")) != (FILE *) NULL) { + fscanf(fp, "%d", &pid); + fclose(fp); + if (kill(pid, SIGKILLDAEMON1) < 0) { + elog(WARN, "can't kill vacuum daemon (pid %d) on %s", + pid, dbname); + } + } +} diff --git a/src/backend/parser/dbcommands.h b/src/backend/parser/dbcommands.h new file mode 100644 index 00000000000..a2811493c75 --- /dev/null +++ b/src/backend/parser/dbcommands.h @@ -0,0 +1,28 @@ +/*------------------------------------------------------------------------- + * + * dbcommands.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: dbcommands.h,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef DBCOMMANDS_H +#define DBCOMMANDS_H + +/* + * Originally from tmp/daemon.h. The functions declared in daemon.h does not + * exist; hence removed. -- AY 7/29/94 + */ +#define SIGKILLDAEMON1 SIGINT +#define SIGKILLDAEMON2 SIGTERM + +extern void createdb(char *dbname); +extern void destroydb(char *dbname); +void stop_vacuum(char *dbname); + +#endif /* DBCOMMANDS_H */ + diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y new file mode 100644 index 00000000000..19529d1fa28 --- /dev/null +++ b/src/backend/parser/gram.y @@ -0,0 +1,2113 @@ +%{ /* -*-text-*- */ + +#define YYDEBUG 1 +/*------------------------------------------------------------------------- + * + * gram.y-- + * POSTGRES SQL YACC rules/actions + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/gram.y,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + * HISTORY + * AUTHOR DATE MAJOR EVENT + * Andrew Yu Sept, 1994 POSTQUEL to SQL conversion + * Andrew Yu Oct, 1994 lispy code conversion + * + * NOTES + * CAPITALS are used to represent terminal symbols. + * non-capitals are used to represent non-terminals. + * + * if you use list, make sure the datum is a node so that the printing + * routines work + * + * WARNING + * sometimes we assign constants to makeStrings. Make sure we don't free + * those. + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "postgres.h" +#include "nodes/parsenodes.h" +#include "parser/catalog_utils.h" +#include "parser/parse_query.h" +#include "utils/acl.h" +#include "catalog/catname.h" +#include "utils/elog.h" +#include "access/xact.h" + +static char saved_relname[BUFSIZ]; /* need this for complex attributes */ +static bool QueryIsRule = FALSE; + +extern List *parsetree; + +/* + * If you need access to certain yacc-generated variables and find that + * they're static by default, uncomment the next line. (this is not a + * problem, yet.) + */ +/*#define __YYSCLASS*/ + +extern void yyerror(char message[]); + +static char *xlateSqlType(char *); +static Node *makeA_Expr(int op, char *opname, Node *lexpr, Node *rexpr); + +/* old versions of flex define this as a macro */ +#if defined(yywrap) +#undef yywrap +#endif /* yywrap */ +%} + + +%union { + double dval; + int ival; + char chr; + char *str; + bool boolean; + List *list; + Node *node; + Value *value; + + Attr *attr; + + ColumnDef *coldef; + TypeName *typnam; + DefElem *defelt; + ParamString *param; + SortBy *sortby; + IndexElem *ielem; + RangeVar *range; + RelExpr *relexp; + TimeRange *trange; + A_Indices *aind; + ResTarget *target; + ParamNo *paramno; + + VersionStmt *vstmt; + DefineStmt *dstmt; + PurgeStmt *pstmt; + RuleStmt *rstmt; + AppendStmt *astmt; +} + +%type query, stmt, AddAttrStmt, ClosePortalStmt, + CopyStmt, CreateStmt, DefineStmt, DestroyStmt, + ExtendStmt, FetchStmt, GrantStmt, + IndexStmt, MoveStmt, ListenStmt, OptimizableStmt, + ProcedureStmt, PurgeStmt, + RecipeStmt, RemoveOperStmt, RemoveFuncStmt, RemoveStmt, RenameStmt, + RevokeStmt, RuleStmt, TransactionStmt, ViewStmt, LoadStmt, + CreatedbStmt, DestroydbStmt, VacuumStmt, RetrieveStmt, CursorStmt, + ReplaceStmt, AppendStmt, NotifyStmt, DeleteStmt, ClusterStmt, + ExplainStmt + +%type relation_name, copy_file_name, copy_delimiter, def_name, + database_name, access_method, attr_name, class, index_name, + var_name, name, file_name, recipe_name + +%type opt_id, opt_portal_name, + before_clause, after_clause, all_Op, MathOp, opt_name, opt_unique + result, OptUseOp, opt_class, opt_range_start, opt_range_end, + SpecialRuleRelation + +%type privileges, operation_commalist, grantee +%type operation + +%type queryblock, relation_name_list, OptTableElementList, + tableElementList, OptInherit, definition, + opt_with, def_args, def_name_list, func_argtypes, oper_argtypes, + OptStmtList, OptStmtBlock, opt_column_list, columnList, + exprList, sort_clause, sortby_list, index_params, + name_list, from_clause, from_list, opt_array_bounds, nest_array_bounds, + expr_list, attrs, res_target_list, res_target_list2, def_list, + opt_indirection, group_clause, groupby_list, explain_options + +%type opt_inh_star, opt_binary, opt_instead + +%type copy_dirn, archive_type, OptArchiveType, OptArchiveLocation, + def_type, opt_direction, remove_type, opt_column, event + +%type OptLocation, opt_move_where, fetch_how_many + +%type def_rest +%type purge_quals +%type insert_rest + +%type Typename, typname +%type columnDef +%type def_elem +%type def_arg, columnElem, exprElem, where_clause, + a_expr, AexprConst, having_clause, groupby +%type NumConst +%type event_object, attr +%type sortby +%type index_elem, func_index +%type from_val +%type relation_expr +%type time_range +%type res_target_el, res_target_el2 +%type ParamNo + +%type Iconst +%type Sconst +%type Id, date + + +/* + * If you make any token changes, remember to: + * - use "yacc -d" and update parse.h + * - update the keyword table in parser/keywords.c + */ + +/* Keywords */ +%token ABORT_TRANS, ACL, ADD, AFTER, AGGREGATE, ALL, ALTER, AND, APPEND, + ARCHIVE, ARCH_STORE, AS, ASC, BACKWARD, BEFORE, BEGIN_TRANS, BINARY, + BY, CAST, CHANGE, CLOSE, CLUSTER, COLUMN, COMMIT, COPY, CREATE, CURRENT, + CURSOR, DATABASE, DECLARE, DELETE, DELIMITERS, DESC, DISTINCT, DO, + DROP, END_TRANS, + EXTEND, FETCH, FOR, FORWARD, FROM, FUNCTION, GRANT, GROUP, + HAVING, HEAVY, IN, INDEX, INHERITS, INSERT, INSTEAD, INTO, + ISNULL, LANGUAGE, LIGHT, LISTEN, LOAD, MERGE, MOVE, NEW, + NONE, NOT, NOTHING, NOTIFY, NOTNULL, + ON, OPERATOR, OPTION, OR, ORDER, + PNULL, PRIVILEGES, PUBLIC, PURGE, P_TYPE, + RENAME, REPLACE, RETRIEVE, RETURNS, REVOKE, ROLLBACK, RULE, + SELECT, SET, SETOF, STDIN, STDOUT, STORE, + TABLE, TO, TRANSACTION, UPDATE, USING, VACUUM, VALUES + VERSION, VIEW, WHERE, WITH, WORK +%token EXECUTE, RECIPE, EXPLAIN, LIKE + +/* Special keywords, not in the query language - see the "lex" file */ +%token IDENT, SCONST, Op +%token ICONST, PARAM +%token FCONST + +/* these are not real. they are here so that they gets generated as #define's*/ +%token OP + +/* precedence */ +%left OR +%left AND +%right NOT +%right '=' +%nonassoc LIKE +%nonassoc Op +%nonassoc NOTNULL +%nonassoc ISNULL +%left '+' '-' +%left '*' '/' +%left '|' /* this is the relation union op, not logical or */ +%right ';' ':' /* Unary Operators */ +%nonassoc '<' '>' +%right UMINUS +%left '.' +%left '[' ']' +%nonassoc TYPECAST +%nonassoc REDUCE +%% + +queryblock: query queryblock + { parsetree = lcons($1, parsetree); } + | query + { parsetree = lcons($1, NIL); } + ; + +query: stmt + | stmt ';' { $$ = $1; } + ; + +stmt : AddAttrStmt + | ClosePortalStmt + | CopyStmt + | CreateStmt + | ClusterStmt + | DefineStmt + | DestroyStmt + | ExtendStmt + | ExplainStmt + | FetchStmt + | GrantStmt + | IndexStmt + | MoveStmt + | ListenStmt + | ProcedureStmt + | PurgeStmt + | RecipeStmt + | RemoveOperStmt + | RemoveFuncStmt + | RemoveStmt + | RenameStmt + | RevokeStmt + | OptimizableStmt + | RuleStmt + | TransactionStmt + | ViewStmt + | LoadStmt + | CreatedbStmt + | DestroydbStmt + | VacuumStmt + ; + +/***************************************************************************** + * + * QUERY : + * addattr ( attr1 = type1 .. attrn = typen ) to [*] + * + *****************************************************************************/ + +AddAttrStmt: ALTER TABLE relation_name opt_inh_star ADD COLUMN columnDef + { + AddAttrStmt *n = makeNode(AddAttrStmt); + n->relname = $3; + n->inh = $4; + n->colDef = $7; + $$ = (Node *)n; + } + ; + +columnDef: Id Typename + { + $$ = makeNode(ColumnDef); + $$->colname = $1; + $$->typename = $2; + } + ; + + +/***************************************************************************** + * + * QUERY : + * close + * + *****************************************************************************/ + +ClosePortalStmt: CLOSE opt_id + { + ClosePortalStmt *n = makeNode(ClosePortalStmt); + n->portalname = $2; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY : + * COPY [BINARY] FROM/TO + * [USING DELIMITERS ] + * + *****************************************************************************/ + +CopyStmt: COPY opt_binary relation_name copy_dirn copy_file_name copy_delimiter + { + CopyStmt *n = makeNode(CopyStmt); + n->binary = $2; + n->relname = $3; + n->direction = $4; + n->filename = $5; + n->delimiter = $6; + $$ = (Node *)n; + } + ; + +copy_dirn: TO + { $$ = TO; } + | FROM + { $$ = FROM; } + ; + +/* + * copy_file_name NULL indicates stdio is used. Whether stdin or stdout is + * used depends on the direction. (It really doesn't make sense to copy from + * stdout. We silently correct the "typo". - AY 9/94 + */ +copy_file_name: Sconst { $$ = $1; } + | STDIN { $$ = NULL; } + | STDOUT { $$ = NULL; } + ; + +opt_binary: BINARY { $$ = TRUE; } + | /*EMPTY*/ { $$ = FALSE; } + ; + +/* + * the default copy delimiter is tab but the user can configure it + */ +copy_delimiter: USING DELIMITERS Sconst { $$ = $3;} + | /* EMPTY */ { $$ = "\t"; } + ; + + +/***************************************************************************** + * + * QUERY : + * CREATE relname + * + *****************************************************************************/ + +CreateStmt: CREATE TABLE relation_name '(' OptTableElementList ')' + OptInherit OptArchiveType OptLocation OptArchiveLocation + { + CreateStmt *n = makeNode(CreateStmt); + n->relname = $3; + n->tableElts = $5; + n->inhRelnames = $7; + n->archiveType = $8; + n->location = $9; + n->archiveLoc = $10; + $$ = (Node *)n; + } + ; + +OptTableElementList: tableElementList { $$ = $1; } + | /* EMPTY */ { $$ = NULL; } + ; + +tableElementList : + tableElementList ',' columnDef + { $$ = lappend($1, $3); } + | columnDef + { $$ = lcons($1, NIL); } + ; + + +OptArchiveType: ARCHIVE '=' archive_type { $$ = $3; } + | /*EMPTY*/ { $$ = ARCH_NONE; } + ; + +archive_type: HEAVY { $$ = ARCH_HEAVY; } + | LIGHT { $$ = ARCH_LIGHT; } + | NONE { $$ = ARCH_NONE; } + ; + +OptLocation: STORE '=' Sconst + { $$ = smgrin($3); } + | /*EMPTY*/ + { $$ = -1; } + ; + +OptArchiveLocation: ARCH_STORE '=' Sconst + { $$ = smgrin($3); } + | /*EMPTY*/ + { $$ = -1; } + ; + +OptInherit: INHERITS '(' relation_name_list ')' { $$ = $3; } + | /*EMPTY*/ { $$ = NIL; } + ; + + +/***************************************************************************** + * + * QUERY : + * define (type,operator,aggregate) + * + *****************************************************************************/ + +DefineStmt: CREATE def_type def_rest + { + $3->defType = $2; + $$ = (Node *)$3; + } + ; + +def_rest: def_name definition + { + $$ = makeNode(DefineStmt); + $$->defname = $1; + $$->definition = $2; + } + ; + +def_type: OPERATOR { $$ = OPERATOR; } + | Type { $$ = P_TYPE; } + | AGGREGATE { $$ = AGGREGATE; } + ; + +def_name: Id | MathOp | Op + ; + + +definition: '(' def_list ')' { $$ = $2; } + ; + + +def_list: def_elem + { $$ = lcons($1, NIL); } + | def_list ',' def_elem + { $$ = lappend($1, $3); } + ; + +def_elem: def_name '=' def_arg + { + $$ = makeNode(DefElem); + $$->defname = $1; + $$->arg = (Node *)$3; + } + | def_name + { + $$ = makeNode(DefElem); + $$->defname = $1; + $$->arg = (Node *)NULL; + } + ; + +def_arg: Id { $$ = (Node *)makeString($1); } + | all_Op { $$ = (Node *)makeString($1); } + | NumConst { $$ = (Node *)$1; /* already a Value */ } + | Sconst { $$ = (Node *)makeString($1); } + | SETOF Id { + TypeName *n = makeNode(TypeName); + n->name = $2; + n->setof = TRUE; + n->arrayBounds = NULL; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * destroy [, .. ] + * + *****************************************************************************/ + +DestroyStmt: DROP TABLE relation_name_list + { + DestroyStmt *n = makeNode(DestroyStmt); + n->relNames = $3; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * fetch [forward | backward] [number | all ] [ in ] + * + *****************************************************************************/ + +FetchStmt: FETCH opt_direction fetch_how_many opt_portal_name + { + FetchStmt *n = makeNode(FetchStmt); + n->direction = $2; + n->howMany = $3; + n->portalname = $4; + $$ = (Node *)n; + } + ; + +opt_direction: FORWARD { $$ = FORWARD; } + | BACKWARD { $$ = BACKWARD; } + | /*EMPTY*/ { $$ = FORWARD; /* default */ } + ; + +fetch_how_many: Iconst + { $$ = $1; + if ($1 <= 0) elog(WARN,"Please specify nonnegative count for fetch"); } + | ALL { $$ = 0; /* 0 means fetch all tuples*/} + | /*EMPTY*/ { $$ = 0; /*default*/ } + ; + +/***************************************************************************** + * + * QUERY: + * GRANT [privileges] ON [relation_name_list] TO [GROUP] grantee + * + *****************************************************************************/ + +GrantStmt: GRANT privileges ON relation_name_list TO grantee opt_with_grant + { + $$ = (Node*)makeAclStmt($2,$4,$6,'+'); + free($2); + free($6); + } + ; + +privileges: ALL PRIVILEGES + { + $$ = aclmakepriv("rwaR",0); + } + | ALL + { + $$ = aclmakepriv("rwaR",0); + } + | operation_commalist { + $$ = $1; + } + ; + +operation_commalist: operation { + $$ = aclmakepriv("",$1); + } + | operation_commalist ',' operation + { + $$ = aclmakepriv($1,$3); + free($1); + } + ; + +operation: SELECT { + $$ = ACL_MODE_RD_CHR; + } + | INSERT { + $$ = ACL_MODE_AP_CHR; + } + | UPDATE { + $$ = ACL_MODE_WR_CHR; + } + | DELETE { + $$ = ACL_MODE_WR_CHR; + } + | RULE { + $$ = ACL_MODE_RU_CHR; + } + ; + +grantee: PUBLIC { + $$ = aclmakeuser("A",""); + } + | GROUP Id { + $$ = aclmakeuser("G",$2); + } + | Id { + $$ = aclmakeuser("U",$1); + } + ; + +opt_with_grant : /* empty */ + | WITH GRANT OPTION + { + yyerror("WITH GRANT OPTION is not supported. Only relation owners can set privileges"); + } + ; +/***************************************************************************** + * + * QUERY: + * REVOKE [privileges] ON [relation_name] FROM [user] + * + *****************************************************************************/ + +RevokeStmt: REVOKE privileges ON relation_name_list FROM grantee + { + $$ = (Node*)makeAclStmt($2,$4,$6,'-'); + free($2); + free($6); + } + ; + +/***************************************************************************** + * + * QUERY: + * move [] [] [] + * + *****************************************************************************/ + +MoveStmt: MOVE opt_direction opt_move_where opt_portal_name + { + MoveStmt *n = makeNode(MoveStmt); + n->direction = $2; + n->to = FALSE; + n->where = $3; + n->portalname = $4; + $$ = (Node *)n; + } + | MOVE opt_direction TO Iconst opt_portal_name + { + MoveStmt *n = makeNode(MoveStmt); + n->direction = $2; + n->to = TRUE; + n->where = $4; + n->portalname = $5; + $$ = (Node *)n; + } + ; + +opt_move_where: Iconst { $$ = $1; } + | /*EMPTY*/ { $$ = 1; /* default */ } + ; + +opt_portal_name: IN name { $$ = $2;} + | /*EMPTY*/ { $$ = NULL; } + ; + + +/***************************************************************************** + * + * QUERY: + * define [archive] index on + * using "(" ( with )+ ")" [with + * ] + * + * [where ] is not supported anymore + *****************************************************************************/ + +IndexStmt: CREATE INDEX index_name ON relation_name + USING access_method '(' index_params ')' + { + /* should check that access_method is valid, + etc ... but doesn't */ + IndexStmt *n = makeNode(IndexStmt); + n->idxname = $3; + n->relname = $5; + n->accessMethod = $7; + n->indexParams = $9; + n->withClause = NIL; + n->whereClause = NULL; + $$ = (Node *)n; + } + ; + +/***************************************************************************** + * + * QUERY: + * extend index [where ] + * + *****************************************************************************/ + +ExtendStmt: EXTEND INDEX index_name where_clause + { + ExtendStmt *n = makeNode(ExtendStmt); + n->idxname = $3; + n->whereClause = $4; + $$ = (Node *)n; + } + ; + +/***************************************************************************** + * + * QUERY: + * execute recipe + * + *****************************************************************************/ + +RecipeStmt: EXECUTE RECIPE recipe_name + { + RecipeStmt *n; + if (!IsTransactionBlock()) + elog(WARN, "EXECUTE RECIPE may only be used in begin/end transaction blocks."); + + n = makeNode(RecipeStmt); + n->recipeName = $3; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * define function + * (language = , returntype = + * [, arch_pct = ] + * [, disk_pct = ] + * [, byte_pct = ] + * [, perbyte_cpu = ] + * [, percall_cpu = ] + * [, iscachable]) + * [arg is ( { , })] + * as + * + *****************************************************************************/ + +ProcedureStmt: CREATE FUNCTION def_name def_args + RETURNS def_arg opt_with AS Sconst LANGUAGE Sconst + { + ProcedureStmt *n = makeNode(ProcedureStmt); + n->funcname = $3; + n->defArgs = $4; + n->returnType = (Node *)$6; + n->withClause = $7; + n->as = $9; + n->language = $11; + $$ = (Node *)n; + }; + +opt_with: WITH definition { $$ = $2; } + | /* EMPTY */ { $$ = NIL; } + ; + +def_args: '(' def_name_list ')' { $$ = $2; } + | '(' ')' { $$ = NIL; } + ; + +def_name_list: name_list; + + +/***************************************************************************** + * + * QUERY: + * purge [before ] [after ] + * or + * purge [after][before ] + * + *****************************************************************************/ + +PurgeStmt: PURGE relation_name purge_quals + { + $3->relname = $2; + $$ = (Node *)$3; + } + ; + +purge_quals: before_clause + { + $$ = makeNode(PurgeStmt); + $$->beforeDate = $1; + $$->afterDate = NULL; + } + | after_clause + { + $$ = makeNode(PurgeStmt); + $$->beforeDate = NULL; + $$->afterDate = $1; + } + | before_clause after_clause + { + $$ = makeNode(PurgeStmt); + $$->beforeDate = $1; + $$->afterDate = $2; + } + | after_clause before_clause + { + $$ = makeNode(PurgeStmt); + $$->beforeDate = $2; + $$->afterDate = $1; + } + | /*EMPTY*/ + { + $$ = makeNode(PurgeStmt); + $$->beforeDate = NULL; + $$->afterDate = NULL; + } + ; + +before_clause: BEFORE date { $$ = $2; } +after_clause: AFTER date { $$ = $2; } + + +/***************************************************************************** + * + * QUERY: + * + * remove function + * (REMOVE FUNCTION "funcname" (arg1, arg2, ...)) + * remove operator + * (REMOVE OPERATOR "opname" (leftoperand_typ rightoperand_typ)) + * remove type + * (REMOVE TYPE "typename") + * remove rule + * (REMOVE RULE "rulename") + * + *****************************************************************************/ + +RemoveStmt: DROP remove_type name + { + RemoveStmt *n = makeNode(RemoveStmt); + n->removeType = $2; + n->name = $3; + $$ = (Node *)n; + } + ; + +remove_type: AGGREGATE { $$ = AGGREGATE; } + | Type { $$ = P_TYPE; } + | INDEX { $$ = INDEX; } + | RULE { $$ = RULE; } + | VIEW { $$ = VIEW; } + ; + +RemoveFuncStmt: DROP FUNCTION name '(' func_argtypes ')' + { + RemoveFuncStmt *n = makeNode(RemoveFuncStmt); + n->funcname = $3; + n->args = $5; + $$ = (Node *)n; + } + ; + +func_argtypes: name_list { $$ = $1; } + | /*EMPTY*/ { $$ = NIL; } + ; + +RemoveOperStmt: DROP OPERATOR all_Op '(' oper_argtypes ')' + { + RemoveOperStmt *n = makeNode(RemoveOperStmt); + n->opname = $3; + n->args = $5; + $$ = (Node *)n; + } + ; + +all_Op: Op | MathOp; + +MathOp: '+' { $$ = "+"; } + | '-' { $$ = "-"; } + | '*' { $$ = "*"; } + | '/' { $$ = "/"; } + | '<' { $$ = "<"; } + | '>' { $$ = ">"; } + | '=' { $$ = "="; } + ; + +oper_argtypes: name + { + elog(WARN, "parser: argument type missing (use NONE for unary operators)"); + } + | name ',' name + { $$ = makeList(makeString($1), makeString($3), -1); } + | NONE ',' name /* left unary */ + { $$ = makeList(NULL, makeString($3), -1); } + | name ',' NONE /* right unary */ + { $$ = makeList(makeString($1), NULL, -1); } + ; + +/***************************************************************************** + * + * QUERY: + * rename in [*] to + * rename to + * + *****************************************************************************/ + +RenameStmt: ALTER TABLE relation_name opt_inh_star + RENAME opt_column opt_name TO name + { + RenameStmt *n = makeNode(RenameStmt); + n->relname = $3; + n->inh = $4; + n->column = $7; + n->newname = $9; + $$ = (Node *)n; + } + ; + +opt_name: name { $$ = $1; } + | /*EMPTY*/ { $$ = NULL; } + ; + +opt_column: COLUMN { $$ = COLUMN; } + | /*EMPTY*/ { $$ = 0; } + ; + + +/***************************************************************************** + * + * QUERY: Define Rewrite Rule , Define Tuple Rule + * Define Rule + * + * only rewrite rule is supported -- ay 9/94 + * + *****************************************************************************/ + +RuleStmt: CREATE RULE name AS + { QueryIsRule=TRUE; } + ON event TO event_object where_clause + DO opt_instead OptStmtList + { + RuleStmt *n = makeNode(RuleStmt); + n->rulename = $3; + n->event = $7; + n->object = $9; + n->whereClause = $10; + n->instead = $12; + n->actions = $13; + $$ = (Node *)n; + } + ; + +OptStmtList: NOTHING { $$ = NIL; } + | OptimizableStmt { $$ = lcons($1, NIL); } + | '[' OptStmtBlock ']' { $$ = $2; } + ; + +OptStmtBlock: OptimizableStmt + { $$ = lcons($1, NIL); } + | OptimizableStmt ';' + { $$ = lcons($1, NIL); } + | OptStmtBlock OptimizableStmt + { $$ = lappend($1, $2); } + ; + +event_object: relation_name '.' attr_name + { + $$ = makeNode(Attr); + $$->relname = $1; + $$->paramNo = NULL; + $$->attrs = lcons(makeString($3), NIL); + $$->indirection = NIL; + } + | relation_name + { + $$ = makeNode(Attr); + $$->relname = $1; + $$->paramNo = NULL; + $$->attrs = NIL; + $$->indirection = NIL; + } + ; + +/* change me to select, update, etc. some day */ +event: SELECT { $$ = CMD_SELECT; } + | UPDATE { $$ = CMD_UPDATE; } + | DELETE { $$ = CMD_DELETE; } + | INSERT { $$ = CMD_INSERT; } + ; + +opt_instead: INSTEAD { $$ = TRUE; } + | /* EMPTY */ { $$ = FALSE; } + ; + + +/***************************************************************************** + * + * QUERY: + * NOTIFY can appear both in rule bodies and + * as a query-level command + * + *****************************************************************************/ + +NotifyStmt: NOTIFY relation_name + { + NotifyStmt *n = makeNode(NotifyStmt); + n->relname = $2; + $$ = (Node *)n; + } + ; + +ListenStmt: LISTEN relation_name + { + ListenStmt *n = makeNode(ListenStmt); + n->relname = $2; + $$ = (Node *)n; + } +; + + +/***************************************************************************** + * + * Transactions: + * + * abort transaction + * (ABORT) + * begin transaction + * (BEGIN) + * end transaction + * (END) + * + *****************************************************************************/ + +TransactionStmt: ABORT_TRANS TRANSACTION + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = ABORT_TRANS; + $$ = (Node *)n; + } + | BEGIN_TRANS TRANSACTION + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = BEGIN_TRANS; + $$ = (Node *)n; + } + | BEGIN_TRANS WORK + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = BEGIN_TRANS; + $$ = (Node *)n; + } + | COMMIT WORK + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = END_TRANS; + $$ = (Node *)n; + } + | END_TRANS TRANSACTION + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = END_TRANS; + $$ = (Node *)n; + } + | ROLLBACK WORK + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = ABORT_TRANS; + $$ = (Node *)n; + } + + | ABORT_TRANS + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = ABORT_TRANS; + $$ = (Node *)n; + } + | BEGIN_TRANS + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = BEGIN_TRANS; + $$ = (Node *)n; + } + | COMMIT + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = END_TRANS; + $$ = (Node *)n; + } + + | END_TRANS + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = END_TRANS; + $$ = (Node *)n; + } + | ROLLBACK + { + TransactionStmt *n = makeNode(TransactionStmt); + n->command = ABORT_TRANS; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * define view '('target-list ')' [where ] + * + *****************************************************************************/ + +ViewStmt: CREATE VIEW name AS RetrieveStmt + { + ViewStmt *n = makeNode(ViewStmt); + n->viewname = $3; + n->query = (Query *)$5; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * load "filename" + * + *****************************************************************************/ + +LoadStmt: LOAD file_name + { + LoadStmt *n = makeNode(LoadStmt); + n->filename = $2; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * createdb dbname + * + *****************************************************************************/ + +CreatedbStmt: CREATE DATABASE database_name + { + CreatedbStmt *n = makeNode(CreatedbStmt); + n->dbname = $3; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * destroydb dbname + * + *****************************************************************************/ + +DestroydbStmt: DROP DATABASE database_name + { + DestroydbStmt *n = makeNode(DestroydbStmt); + n->dbname = $3; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * cluster on + * + *****************************************************************************/ + +ClusterStmt: CLUSTER index_name ON relation_name + { + ClusterStmt *n = makeNode(ClusterStmt); + n->relname = $4; + n->indexname = $2; + $$ = (Node*)n; + } + ; + +/***************************************************************************** + * + * QUERY: + * vacuum + * + *****************************************************************************/ + +VacuumStmt: VACUUM + { + $$ = (Node *)makeNode(VacuumStmt); + } + | VACUUM relation_name + { + VacuumStmt *n = makeNode(VacuumStmt); + n->vacrel = $2; + $$ = (Node *)n; + } + ; + +/***************************************************************************** + * + * QUERY: + * EXPLAIN query + * + *****************************************************************************/ + +ExplainStmt: EXPLAIN explain_options OptimizableStmt + { + ExplainStmt *n = makeNode(ExplainStmt); + n->query = (Query*)$3; + n->options = $2; + $$ = (Node *)n; + } + ; + +explain_options: WITH name_list + { $$ = $2; } + | /*EMPTY*/ + { $$ = NIL; } + ; + +/***************************************************************************** + * * + * Optimizable Stmts: * + * * + * one of the five queries processed by the planner * + * * + * [ultimately] produces query-trees as specified * + * in the query-spec document in ~postgres/ref * + * * + *****************************************************************************/ + +OptimizableStmt: RetrieveStmt + | CursorStmt + | ReplaceStmt + | AppendStmt + | NotifyStmt + | DeleteStmt /* by default all are $$=$1 */ + ; + + +/***************************************************************************** + * + * QUERY: + * INSERT STATEMENTS + * + *****************************************************************************/ + +AppendStmt: INSERT INTO relation_name opt_column_list insert_rest + { + $5->relname = $3; + $5->cols = $4; + $$ = (Node *)$5; + } + ; + +insert_rest: VALUES '(' exprList ')' + { + $$ = makeNode(AppendStmt); + $$->exprs = $3; + $$->fromClause = NIL; + $$->whereClause = NULL; + } + | SELECT exprList from_clause where_clause + { + $$ = makeNode(AppendStmt); + $$->exprs = $2; + $$->fromClause = $3; + $$->whereClause = $4; + } + ; + +opt_column_list: '(' columnList ')' { $$ = $2; } + | /*EMPTY*/ { $$ = NIL; } + ; + +columnList: + columnList ',' columnElem + { $$ = lappend($1, $3); } + | columnElem + { $$ = lcons($1, NIL); } + ; + +columnElem: Id opt_indirection + { + Ident *id = makeNode(Ident); + id->name = $1; + id->indirection = $2; + $$ = (Node *)id; + } + ; + +exprList: exprList ',' exprElem + { $$ = lappend($1, $3); } + | exprElem + { $$ = lcons($1, NIL); } + + ; + +exprElem: a_expr + { $$ = (Node *)$1; } + | relation_name '.' '*' + { + Attr *n = makeNode(Attr); + n->relname = $1; + n->paramNo = NULL; + n->attrs = lcons(makeString("*"), NIL); + n->indirection = NIL; + $$ = (Node *)n; + } + | '*' + { + Attr *n = makeNode(Attr); + n->relname = "*"; + n->paramNo = NULL; + n->attrs = NIL; + n->indirection = NIL; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * DELETE STATEMENTS + * + *****************************************************************************/ + +DeleteStmt: DELETE FROM relation_name + where_clause + { + DeleteStmt *n = makeNode(DeleteStmt); + n->relname = $3; + n->whereClause = $4; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * ReplaceStmt (UPDATE) + * + *****************************************************************************/ + +ReplaceStmt: UPDATE relation_name + SET res_target_list + from_clause + where_clause + { + ReplaceStmt *n = makeNode(ReplaceStmt); + n->relname = $2; + n->targetList = $4; + n->fromClause = $5; + n->whereClause = $6; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * CURSOR STATEMENTS + * + *****************************************************************************/ + +CursorStmt: DECLARE name opt_binary CURSOR FOR + SELECT opt_unique res_target_list2 + from_clause where_clause sort_clause + { + CursorStmt *n = makeNode(CursorStmt); + + /* from PORTAL name */ + /* + * 15 august 1991 -- since 3.0 postgres does locking + * right, we discovered that portals were violating + * locking protocol. portal locks cannot span xacts. + * as a short-term fix, we installed the check here. + * -- mao + */ + if (!IsTransactionBlock()) + elog(WARN, "Named portals may only be used in begin/end transaction blocks."); + + n->portalname = $2; + n->binary = $3; + n->unique = $7; + n->targetList = $8; + n->fromClause = $9; + n->whereClause = $10; + n->orderClause = $11; + $$ = (Node *)n; + } + ; + + +/***************************************************************************** + * + * QUERY: + * SELECT STATEMENTS + * + *****************************************************************************/ + +RetrieveStmt: SELECT opt_unique res_target_list2 + result from_clause where_clause + group_clause having_clause + sort_clause + { + RetrieveStmt *n = makeNode(RetrieveStmt); + n->unique = $2; + n->targetList = $3; + n->into = $4; + n->fromClause = $5; + n->whereClause = $6; + n->groupClause = $7; + n->havingClause = $8; + n->orderClause = $9; + $$ = (Node *)n; + } + ; + +result: INTO TABLE relation_name + { $$= $3; /* should check for archive level */ } + | /*EMPTY*/ + { $$ = NULL; } + ; + +opt_unique: DISTINCT { $$ = "*"; } + | DISTINCT ON Id { $$ = $3; } + | /*EMPTY*/ { $$ = NULL;} + ; + +sort_clause: ORDER BY sortby_list { $$ = $3; } + | /*EMPTY*/ { $$ = NIL; } + ; + +sortby_list: sortby + { $$ = lcons($1, NIL); } + | sortby_list ',' sortby + { $$ = lappend($1, $3); } + ; + +sortby: Id OptUseOp + { + $$ = makeNode(SortBy); + $$->name = $1; + $$->useOp = $2; + } + | attr OptUseOp + { + yyerror("parse error: use 'sort by attribute_name'"); + } + ; + +OptUseOp: USING Op { $$ = $2; } + | USING '<' { $$ = "<"; } + | USING '>' { $$ = ">"; } + | ASC { $$ = "<"; } + | DESC { $$ = ">"; } + | /*EMPTY*/ { $$ = "<"; /*default*/ } + ; + + +index_params: index_elem { $$ = lcons($1,NIL); } + | func_index { $$ = lcons($1,NIL); } + ; + +/*index_list: + index_list ',' index_elem + { $$ = lappend($1, $3); } + | index_elem + { $$ = lcons($1, NIL); } + ;*/ + +func_index: name '(' name_list ')' opt_class + { + $$ = makeNode(IndexElem); + $$->name = $1; + $$->args = $3; + $$->class = $5; + } + ; + +index_elem: attr_name opt_class + { + $$ = makeNode(IndexElem); + $$->name = $1; + $$->args = NIL; + $$->class = $2; + } + ; + +opt_class: class + | WITH class { $$ = $2; } + | /*EMPTY*/ { $$ = NULL; } + ; + +/* + * jimmy bell-style recursive queries aren't supported in the + * current system. + * + * ...however, recursive addattr and rename supported. make special + * cases for these. + * + * XXX i believe '*' should be the default behavior, but... + */ +opt_inh_star: '*' { $$ = TRUE; } + | /*EMPTY*/ { $$ = FALSE; } + ; + +relation_name_list: name_list ; + +name_list: name + { $$=lcons(makeString($1),NIL); } + | name_list ',' name + { $$=lappend($1,makeString($3)); } + ; + +group_clause: GROUP BY groupby_list { $$ = $3; } + | /*EMPTY*/ { $$ = NIL; } + ; + +groupby_list: groupby { $$ = lcons($1, NIL); } + | groupby_list ',' groupby { $$ = lappend($1, $3); } + ; + +groupby: Id + { + Ident *n = makeNode(Ident); + n->name = $1; + n->indirection = NULL; + $$ = (Node*)n; + } + | attr + { + $$ = (Node*)$1; + } + ; + +having_clause: HAVING a_expr { $$ = $2; } + | /*EMPTY*/ { $$ = NULL; } + ; + +/***************************************************************************** + * + * clauses common to all Optimizable Stmts: + * from_clause - + * where_clause - + * + *****************************************************************************/ + +from_clause: FROM from_list { $$ = $2; } + | /*EMPTY*/ { $$ = NIL; } + ; + +from_list: from_list ',' from_val + { $$ = lappend($1, $3); } + | from_val + { $$ = lcons($1, NIL); } + ; + +from_val: relation_expr AS var_name + { + $$ = makeNode(RangeVar); + $$->relExpr = $1; + $$->name = $3; + } + | relation_expr var_name + { + $$ = makeNode(RangeVar); + $$->relExpr = $1; + $$->name = $2; + } + | relation_expr + { + $$ = makeNode(RangeVar); + $$->relExpr = $1; + $$->name = NULL; + } + ; + +where_clause: WHERE a_expr { $$ = $2; } + | /*EMPTY*/ { $$ = NULL; /* no qualifiers */ } + ; + +relation_expr: relation_name + { + /* normal relations */ + $$ = makeNode(RelExpr); + $$->relname = $1; + $$->inh = FALSE; + $$->timeRange = NULL; + } + | relation_name '*' %prec '=' + { + /* inheiritance query */ + $$ = makeNode(RelExpr); + $$->relname = $1; + $$->inh = TRUE; + $$->timeRange = NULL; + } + | relation_name time_range + { + /* time-qualified query */ + $$ = makeNode(RelExpr); + $$->relname = $1; + $$->inh = FALSE; + $$->timeRange = $2; + } + ; + + +time_range: '[' opt_range_start ',' opt_range_end ']' + { + $$ = makeNode(TimeRange); + $$->startDate = $2; + $$->endDate = $4; + } + | '[' date ']' + { + $$ = makeNode(TimeRange); + $$->startDate = $2; + $$->endDate = NULL; + } + ; + +opt_range_start: date + | /*EMPTY*/ { $$ = "epoch"; } + ; + +opt_range_end: date + | /*EMPTY*/ { $$ = "now"; } + ; + +opt_array_bounds: '[' ']' nest_array_bounds + { $$ = lcons(makeInteger(-1), $3); } + | '[' Iconst ']' nest_array_bounds + { $$ = lcons(makeInteger($2), $4); } + | /* EMPTY */ + { $$ = NIL; } + ; + +nest_array_bounds: '[' ']' nest_array_bounds + { $$ = lcons(makeInteger(-1), $3); } + | '[' Iconst ']' nest_array_bounds + { $$ = lcons(makeInteger($2), $4); } + | /*EMPTY*/ + { $$ = NIL; } + ; + +typname: name + { + char *tname = xlateSqlType($1); + $$ = makeNode(TypeName); + $$->name = tname; + + /* Is this the name of a complex type? If so, implement + * it as a set. + */ + if (!strcmp(saved_relname, tname)) { + /* This attr is the same type as the relation + * being defined. The classic example: create + * emp(name=text,mgr=emp) + */ + $$->setof = TRUE; + }else if (get_typrelid((Type)type(tname)) + != InvalidOid) { + /* (Eventually add in here that the set can only + * contain one element.) + */ + $$->setof = TRUE; + } else { + $$->setof = FALSE; + } + } + | SETOF name + { + $$ = makeNode(TypeName); + $$->name = $2; + $$->setof = TRUE; + } + ; + +Typename: typname opt_array_bounds + { + $$ = $1; + $$->arrayBounds = $2; + } + | name '(' Iconst ')' + { + /* + * The following implements char() and varchar(). + * We do it here instead of the 'typname:' production + * because we don't want to allow arrays of varchar(). + * I haven't thought about whether that will work or not. + * - ay 6/95 + */ + $$ = makeNode(TypeName); + if (!strcasecmp($1, "char")) { + $$->name = "bpchar"; /* strdup("bpchar"); */ + } else if (!strcasecmp($1, "varchar")) { + $$->name = "varchar"; /* strdup("varchar"); */ + } else { + yyerror("parse error"); + } + if ($3 < 1) { + elog(WARN, "length for '%s' type must be at least 1", + $1); + } else if ($3 > 4096) { + /* we can store a char() of length up to the size + of a page (8KB) - page headers and friends but + just to be safe here... - ay 6/95 */ + elog(WARN, "length for '%s' type cannot exceed 4096", + $1); + } + /* we actually implement this sort of like a varlen, so + the first 4 bytes is the length. (the difference + between this and "text" is that we blank-pad and + truncate where necessary */ + $$->typlen = 4 + $3; + } + ; + + +/***************************************************************************** + * + * expression grammar, still needs some cleanup + * + *****************************************************************************/ + +a_expr: attr opt_indirection + { + $1->indirection = $2; + $$ = (Node *)$1; + } + | AexprConst + { $$ = $1; } + | '-' a_expr %prec UMINUS + { $$ = makeA_Expr(OP, "-", NULL, $2); } + | a_expr '+' a_expr + { $$ = makeA_Expr(OP, "+", $1, $3); } + | a_expr '-' a_expr + { $$ = makeA_Expr(OP, "-", $1, $3); } + | a_expr '/' a_expr + { $$ = makeA_Expr(OP, "/", $1, $3); } + | a_expr '*' a_expr + { $$ = makeA_Expr(OP, "*", $1, $3); } + | a_expr '<' a_expr + { $$ = makeA_Expr(OP, "<", $1, $3); } + | a_expr '>' a_expr + { $$ = makeA_Expr(OP, ">", $1, $3); } + | a_expr '=' a_expr + { $$ = makeA_Expr(OP, "=", $1, $3); } + | ':' a_expr + { $$ = makeA_Expr(OP, ":", NULL, $2); } + | ';' a_expr + { $$ = makeA_Expr(OP, ";", NULL, $2); } + | '|' a_expr + { $$ = makeA_Expr(OP, "|", NULL, $2); } + | AexprConst TYPECAST Typename + { + /* AexprConst can be either A_Const or ParamNo */ + if (nodeTag($1) == T_A_Const) { + ((A_Const *)$1)->typename = $3; + }else { + ((ParamNo *)$1)->typename = $3; + } + $$ = (Node *)$1; + } + | CAST AexprConst AS Typename + { + /* AexprConst can be either A_Const or ParamNo */ + if (nodeTag($2) == T_A_Const) { + ((A_Const *)$2)->typename = $4; + }else { + ((ParamNo *)$2)->typename = $4; + } + $$ = (Node *)$2; + } + | '(' a_expr ')' + { $$ = $2; } + | a_expr Op a_expr + { $$ = makeA_Expr(OP, $2, $1, $3); } + | a_expr LIKE a_expr + { $$ = makeA_Expr(OP, "~~", $1, $3); } + | a_expr NOT LIKE a_expr + { $$ = makeA_Expr(OP, "!~~", $1, $4); } + | Op a_expr + { $$ = makeA_Expr(OP, $1, NULL, $2); } + | a_expr Op + { $$ = makeA_Expr(OP, $2, $1, NULL); } + | Id + { /* could be a column name or a relation_name */ + Ident *n = makeNode(Ident); + n->name = $1; + n->indirection = NULL; + $$ = (Node *)n; + } + | name '(' '*' ')' + { + FuncCall *n = makeNode(FuncCall); + Ident *star = makeNode(Ident); + + /* cheap hack for aggregate (eg. count) */ + star->name = "oid"; + n->funcname = $1; + n->args = lcons(star, NIL); + $$ = (Node *)n; + } + | name '(' ')' + { + FuncCall *n = makeNode(FuncCall); + n->funcname = $1; + n->args = NIL; + $$ = (Node *)n; + } + | name '(' expr_list ')' + { + FuncCall *n = makeNode(FuncCall); + n->funcname = $1; + n->args = $3; + $$ = (Node *)n; + } + | a_expr ISNULL + { $$ = makeA_Expr(ISNULL, NULL, $1, NULL); } + | a_expr NOTNULL + { $$ = makeA_Expr(NOTNULL, NULL, $1, NULL); } + | a_expr AND a_expr + { $$ = makeA_Expr(AND, NULL, $1, $3); } + | a_expr OR a_expr + { $$ = makeA_Expr(OR, NULL, $1, $3); } + | NOT a_expr + { $$ = makeA_Expr(NOT, NULL, NULL, $2); } + ; + +opt_indirection: '[' a_expr ']' opt_indirection + { + A_Indices *ai = makeNode(A_Indices); + ai->lidx = NULL; + ai->uidx = $2; + $$ = lcons(ai, $4); + } + | '[' a_expr ':' a_expr ']' opt_indirection + { + A_Indices *ai = makeNode(A_Indices); + ai->lidx = $2; + ai->uidx = $4; + $$ = lcons(ai, $6); + } + | /* EMPTY */ + { $$ = NIL; } + ; + +expr_list: a_expr + { $$ = lcons($1, NIL); } + | expr_list ',' a_expr + { $$ = lappend($1, $3); } + ; + +attr: relation_name '.' attrs + { + $$ = makeNode(Attr); + $$->relname = $1; + $$->paramNo = NULL; + $$->attrs = $3; + $$->indirection = NULL; + } + | ParamNo '.' attrs + { + $$ = makeNode(Attr); + $$->relname = NULL; + $$->paramNo = $1; + $$->attrs = $3; + $$->indirection = NULL; + } + ; + +attrs: attr_name + { $$ = lcons(makeString($1), NIL); } + | attrs '.' attr_name + { $$ = lappend($1, makeString($3)); } + | attrs '.' '*' + { $$ = lappend($1, makeString("*")); } + ; + + +/***************************************************************************** + * + * target lists + * + *****************************************************************************/ + +res_target_list: res_target_list ',' res_target_el + { $$ = lappend($1,$3); } + | res_target_el + { $$ = lcons($1, NIL); } + | '*' + { + ResTarget *rt = makeNode(ResTarget); + Attr *att = makeNode(Attr); + att->relname = "*"; + att->paramNo = NULL; + att->attrs = NULL; + att->indirection = NIL; + rt->name = NULL; + rt->indirection = NULL; + rt->val = (Node *)att; + $$ = lcons(rt, NIL); + } + ; + +res_target_el: Id opt_indirection '=' a_expr + { + $$ = makeNode(ResTarget); + $$->name = $1; + $$->indirection = $2; + $$->val = (Node *)$4; + } + | attr opt_indirection + { + $$ = makeNode(ResTarget); + $$->name = NULL; + $$->indirection = $2; + $$->val = (Node *)$1; + } + | relation_name '.' '*' + { + Attr *att = makeNode(Attr); + att->relname = $1; + att->paramNo = NULL; + att->attrs = lcons(makeString("*"), NIL); + att->indirection = NIL; + $$ = makeNode(ResTarget); + $$->name = NULL; + $$->indirection = NULL; + $$->val = (Node *)att; + } + ; + +/* +** target list for select. +** should get rid of the other but is still needed by the defunct retrieve into +** and update (uses a subset) +*/ +res_target_list2: + res_target_list2 ',' res_target_el2 + { $$ = lappend($1, $3); } + | res_target_el2 + { $$ = lcons($1, NIL); } + | '*' + { + ResTarget *rt = makeNode(ResTarget); + Attr *att = makeNode(Attr); + att->relname = "*"; + att->paramNo = NULL; + att->attrs = NULL; + att->indirection = NIL; + rt->name = NULL; + rt->indirection = NULL; + rt->val = (Node *)att; + $$ = lcons(rt, NIL); + } + ; + +/* AS is not optional because shift/red conflict with unary ops */ +res_target_el2: a_expr AS Id + { + $$ = makeNode(ResTarget); + $$->name = $3; + $$->indirection = NULL; + $$->val = (Node *)$1; + } + | a_expr + { + $$ = makeNode(ResTarget); + $$->name = NULL; + $$->indirection = NULL; + $$->val = (Node *)$1; + } + | relation_name '.' '*' + { + Attr *att = makeNode(Attr); + att->relname = $1; + att->paramNo = NULL; + att->attrs = lcons(makeString("*"), NIL); + att->indirection = NIL; + $$ = makeNode(ResTarget); + $$->name = NULL; + $$->indirection = NULL; + $$->val = (Node *)att; + } + ; + +opt_id: Id { $$ = $1; } + | /* EMPTY */ { $$ = NULL; } + ; + +relation_name: SpecialRuleRelation + { + $$ = $1; + strcpy(saved_relname, $1); + } + | Id + { + /* disallow refs to magic system tables */ + if (strcmp(LogRelationName, $1) == 0 + || strcmp(VariableRelationName, $1) == 0 + || strcmp(TimeRelationName, $1) == 0 + || strcmp(MagicRelationName, $1) == 0) { + elog(WARN, "%s cannot be accessed by users", $1); + } else { + $$ = $1; + } + strcpy(saved_relname, $1); + } + ; + +database_name: Id { $$ = $1; }; +access_method: Id { $$ = $1; }; +attr_name: Id { $$ = $1; }; +class: Id { $$ = $1; }; +index_name: Id { $$ = $1; }; +var_name: Id { $$ = $1; }; +name: Id { $$ = $1; }; + +date: Sconst { $$ = $1; }; +file_name: Sconst { $$ = $1; }; +recipe_name: Id { $$ = $1; }; + +AexprConst: Iconst + { + A_Const *n = makeNode(A_Const); + n->val.type = T_Integer; + n->val.val.ival = $1; + $$ = (Node *)n; + } + | FCONST + { + A_Const *n = makeNode(A_Const); + n->val.type = T_Float; + n->val.val.dval = $1; + $$ = (Node *)n; + } + | Sconst + { + A_Const *n = makeNode(A_Const); + n->val.type = T_String; + n->val.val.str = $1; + $$ = (Node *)n; + } + | ParamNo + { $$ = (Node *)$1; } + | Pnull + { + A_Const *n = makeNode(A_Const); + n->val.type = T_Null; + $$ = (Node *)n; + } + ; + +ParamNo: PARAM + { + $$ = makeNode(ParamNo); + $$->number = $1; + } + ; + +NumConst: Iconst { $$ = makeInteger($1); } + | FCONST { $$ = makeFloat($1); } + ; + +Iconst: ICONST { $$ = $1; }; +Sconst: SCONST { $$ = $1; }; + +Id: IDENT { $$ = $1; }; + +SpecialRuleRelation: CURRENT + { + if (QueryIsRule) + $$ = "*CURRENT*"; + else + elog(WARN,"CURRENT used in non-rule query"); + } + | NEW + { + if (QueryIsRule) + $$ = "*NEW*"; + else + elog(WARN,"NEW used in non-rule query"); + } + ; + +Type: P_TYPE; +Pnull: PNULL; + + +%% + +static Node *makeA_Expr(int op, char *opname, Node *lexpr, Node *rexpr) +{ + A_Expr *a = makeNode(A_Expr); + a->oper = op; + a->opname = opname; + a->lexpr = lexpr; + a->rexpr = rexpr; + return (Node *)a; +} + +static char * +xlateSqlType(char *name) +{ + if (!strcasecmp(name,"int") || + !strcasecmp(name,"integer")) + return "int4"; /* strdup("int4") -- strdup leaks memory here */ + else if (!strcasecmp(name, "smallint")) + return "int2"; + else if (!strcasecmp(name, "float") || + !strcasecmp(name, "real")) + return "float4"; + else + return name; +} + +void parser_init(Oid *typev, int nargs) +{ + QueryIsRule = false; + saved_relname[0]= '\0'; + + param_type_init(typev, nargs); +} + diff --git a/src/backend/parser/keywords.c b/src/backend/parser/keywords.c new file mode 100644 index 00000000000..b6cd549bf9b --- /dev/null +++ b/src/backend/parser/keywords.c @@ -0,0 +1,179 @@ +/*------------------------------------------------------------------------- + * + * keywords.c-- + * lexical token lookup for reserved words in postgres SQL + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/keywords.c,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "postgres.h" + +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "parse.h" +#include "utils/elog.h" +#include "parser/keywords.h" +#include "parser/dbcommands.h" /* createdb, destroydb stop_vacuum */ + + +/* + * List of (keyword-name, keyword-token-value) pairs. + * + * !!WARNING!!: This list must be sorted, because binary + * search is used to locate entries. + */ +static ScanKeyword ScanKeywords[] = { + /* name value */ + { "abort", ABORT_TRANS }, + { "acl", ACL }, + { "add", ADD }, + { "after", AFTER }, + { "aggregate", AGGREGATE }, + { "all", ALL }, + { "alter", ALTER }, + { "and", AND }, + { "append", APPEND }, + { "archIve", ARCHIVE }, /* XXX crooked: I < _ */ + { "arch_store", ARCH_STORE }, + { "archive", ARCHIVE }, /* XXX crooked: i > _ */ + { "as", AS }, + { "asc", ASC }, + { "backward", BACKWARD }, + { "before", BEFORE }, + { "begin", BEGIN_TRANS }, + { "binary", BINARY }, + { "by", BY }, + { "cast", CAST }, + { "change", CHANGE }, + { "close", CLOSE }, + { "cluster", CLUSTER }, + { "column", COLUMN }, + { "commit", COMMIT }, + { "copy", COPY }, + { "create", CREATE }, + { "current", CURRENT }, + { "cursor", CURSOR }, + { "database", DATABASE }, + { "declare", DECLARE }, + { "delete", DELETE }, + { "delimiters", DELIMITERS }, + { "desc", DESC }, + { "distinct", DISTINCT }, + { "do", DO }, + { "drop", DROP }, + { "end", END_TRANS }, + { "execute", EXECUTE }, + { "explain", EXPLAIN }, + { "extend", EXTEND }, + { "fetch", FETCH }, + { "for", FOR }, + { "forward", FORWARD }, + { "from", FROM }, + { "function", FUNCTION }, + { "grant", GRANT }, + { "group", GROUP }, + { "having", HAVING }, + { "heavy", HEAVY }, + { "in", IN }, + { "index", INDEX }, + { "inherits", INHERITS }, + { "insert", INSERT }, + { "instead", INSTEAD }, + { "into", INTO }, + { "isnull", ISNULL }, + { "language", LANGUAGE }, + { "light", LIGHT }, + { "like", LIKE }, + { "listen", LISTEN }, + { "load", LOAD }, + { "merge", MERGE }, + { "move", MOVE }, + { "new", NEW }, + { "none", NONE }, + { "not", NOT }, + { "nothing", NOTHING }, + { "notify", NOTIFY }, + { "notnull", NOTNULL }, + { "null", PNULL }, + { "on", ON }, + { "operator", OPERATOR }, + { "option", OPTION }, + { "or", OR }, + { "order", ORDER }, + { "privileges", PRIVILEGES }, + { "public", PUBLIC }, + { "purge", PURGE }, + { "recipe", RECIPE }, + { "rename", RENAME }, + { "replace", REPLACE }, + { "retrieve", RETRIEVE }, + { "returns", RETURNS }, + { "revoke", REVOKE }, + { "rollback", ROLLBACK }, + { "rule", RULE }, + { "select", SELECT }, + { "set", SET }, + { "setof", SETOF }, + { "stdin", STDIN }, + { "stdout", STDOUT }, + { "store", STORE }, + { "table", TABLE }, + { "to", TO }, + { "transaction", TRANSACTION }, + { "type", P_TYPE }, + { "update", UPDATE }, + { "using", USING }, + { "vacuum", VACUUM }, + { "values", VALUES }, + { "version", VERSION }, + { "view", VIEW }, + { "where", WHERE }, + { "with", WITH }, + { "work", WORK }, +}; + +ScanKeyword * +ScanKeywordLookup(char *text) +{ + ScanKeyword *low = &ScanKeywords[0]; + ScanKeyword *high = endof(ScanKeywords) - 1; + ScanKeyword *middle; + int difference; + + while (low <= high) { + middle = low + (high - low) / 2; + /* keywords case-insensitive (for SQL) -- ay 8/94 */ + difference = strcasecmp(middle->name, text); + if (difference == 0) + return (middle); + else if (difference < 0) + low = middle + 1; + else + high = middle - 1; + } + + return (NULL); +} + +char* +AtomValueGetString(int atomval) +{ + ScanKeyword *low = &ScanKeywords[0]; + ScanKeyword *high = endof(ScanKeywords) - 1; + int keyword_list_length = (high-low); + int i; + + for (i=0; i < keyword_list_length ; i++ ) + if (ScanKeywords[i].value == atomval ) + return(ScanKeywords[i].name); + + elog(WARN,"AtomGetString called with bogus atom # : %d", atomval ); + return(NULL); +} diff --git a/src/backend/parser/keywords.h b/src/backend/parser/keywords.h new file mode 100644 index 00000000000..d26d76fbaeb --- /dev/null +++ b/src/backend/parser/keywords.h @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * keywords.h-- + * string,atom lookup thingy, reduces strcmp traffic greatly + * in the bowels of the system. Look for actual defs in lib/C/atoms.c + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: keywords.h,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef KEYWORDS_H +#define KEYWORDS_H + +typedef struct ScanKeyword { + char *name; + int value; +} ScanKeyword; + +extern ScanKeyword *ScanKeywordLookup(char *text); +extern char* AtomValueGetString(int atomval); + +#endif /* KEYWORDS_H */ diff --git a/src/backend/parser/parse_query.c b/src/backend/parser/parse_query.c new file mode 100644 index 00000000000..37c955017ef --- /dev/null +++ b/src/backend/parser/parse_query.c @@ -0,0 +1,653 @@ +/*------------------------------------------------------------------------- + * + * parse_query.c-- + * take an "optimizable" stmt and make the query tree that + * the planner requires. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/Attic/parse_query.c,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "postgres.h" + +#include "access/heapam.h" +#include "utils/tqual.h" +#include "access/tupmacs.h" +#include "utils/builtins.h" +#include "utils/elog.h" +#include "utils/palloc.h" +#include "utils/acl.h" /* for ACL_NO_PRIV_WARNING */ +#include "utils/rel.h" /* Relation stuff */ + +#include "utils/syscache.h" +#include "catalog/pg_type.h" +#include "catalog_utils.h" +#include "parser/parse_query.h" +/* #include "parser/io.h" */ +#include "utils/lsyscache.h" + +#include "nodes/pg_list.h" +#include "nodes/primnodes.h" +#include "nodes/parsenodes.h" +#include "nodes/makefuncs.h" + +Oid *param_type_info; +int pfunc_num_args; + +extern int Quiet; + + +/* given range variable, return id of variable; position starts with 1 */ +int +RangeTablePosn(List *rtable, char *rangevar) +{ + int index; + List *temp; + + index = 1; +/* temp = pstate->p_rtable; */ + temp = rtable; + while (temp != NIL) { + RangeTblEntry *rt_entry = lfirst(temp); + + if (!strcmp(rt_entry->refname, rangevar)) + return index; + + temp = lnext(temp); + index++; + } + return(0); +} + +char* +VarnoGetRelname(ParseState *pstate, int vnum) +{ + int i; + List *temp = pstate->p_rtable; + for( i = 1; i < vnum ; i++) + temp = lnext(temp); + return(((RangeTblEntry*)lfirst(temp))->relname); +} + + +RangeTblEntry * +makeRangeTableEntry(char *relname, + bool inh, + TimeRange *timeRange, + char *refname) +{ + Relation relation; + RangeTblEntry *ent = makeNode(RangeTblEntry); + + ent->relname = pstrdup(relname); + ent->refname = refname; + + relation = heap_openr(ent->relname); + if (relation == NULL) { + elog(WARN,"%s: %s", + relname, ACL_NO_PRIV_WARNING); + } + + /* + * Flags - zero or more from archive,inheritance,union,version + * or recursive (transitive closure) + * [we don't support them all -- ay 9/94 ] + */ + ent->inh = inh; + + ent->timeRange = timeRange; + + /* RelOID */ + ent->relid = RelationGetRelationId(relation); + + /* + * close the relation we're done with it for now. + */ + heap_close(relation); + return ent; +} + +/* + * expandAll - + * makes a list of attributes + * assumes reldesc caching works + */ +List * +expandAll(ParseState* pstate, char *relname, int *this_resno) +{ + Relation rdesc; + List *tall = NIL; + Var *varnode; + int i, maxattrs, first_resno; + int type_id, type_len, vnum; + char *physical_relname; + + first_resno = *this_resno; + + /* printf("\nExpanding %.*s.all\n", NAMEDATALEN, relname); */ + vnum = RangeTablePosn(pstate->p_rtable, relname); + if ( vnum == 0 ) { + pstate->p_rtable = lappend(pstate->p_rtable, + makeRangeTableEntry(relname, FALSE, NULL, + relname)); + vnum = RangeTablePosn(pstate->p_rtable, relname); + } + + physical_relname = VarnoGetRelname(pstate, vnum); + + rdesc = heap_openr(physical_relname); + + if (rdesc == NULL ) { + elog(WARN,"Unable to expand all -- heap_openr failed on %s", + physical_relname); + return NIL; + } + maxattrs = RelationGetNumberOfAttributes(rdesc); + + for ( i = maxattrs-1 ; i > -1 ; --i ) { + char *attrname; + TargetEntry *rte = makeNode(TargetEntry); + + attrname = pstrdup ((rdesc->rd_att->attrs[i]->attname).data); + varnode = (Var*)make_var(pstate, relname, attrname, &type_id); + type_len = (int)tlen(get_id_type(type_id)); + + /* Even if the elements making up a set are complex, the + * set itself is not. */ + + rte->resdom = makeResdom((AttrNumber) i + first_resno, + (Oid)type_id, + (Size)type_len, + attrname, + (Index)0, + (Oid)0, + 0); + rte->expr = (Node *)varnode; + tall = lcons(rte, tall); + } + + /* + * Close the reldesc - we're done with it now + */ + heap_close(rdesc); + *this_resno = first_resno + maxattrs; + return(tall); +} + +TimeQual +makeTimeRange(char *datestring1, + char *datestring2, + int timecode) /* 0 = snapshot , 1 = timerange */ +{ + TimeQual qual; + AbsoluteTime t1,t2; + + switch (timecode) { + case 0: + if (datestring1 == NULL) { + elog(WARN, "MakeTimeRange: bad snapshot arg"); + } + t1 = nabstimein(datestring1); + if (!AbsoluteTimeIsValid(t1)) { + elog(WARN, "bad snapshot time: \"%s\"", + datestring1); + } + qual = TimeFormSnapshotTimeQual(t1); + break; + case 1: + if (datestring1 == NULL) { + t1 = NOSTART_ABSTIME; + } else { + t1 = nabstimein(datestring1); + if (!AbsoluteTimeIsValid(t1)) { + elog(WARN, + "bad range start time: \"%s\"", + datestring1); + } + } + if (datestring2 == NULL) { + t2 = NOEND_ABSTIME; + } else { + t2 = nabstimein(datestring2); + if (!AbsoluteTimeIsValid(t2)) { + elog(WARN, + "bad range end time: \"%s\"", + datestring2); + } + } + qual = TimeFormRangedTimeQual(t1,t2); + break; + default: + elog(WARN, "MakeTimeRange: internal parser error"); + } + return qual; +} + +static void +disallow_setop(char *op, Type optype, Node *operand) +{ + if (operand==NULL) + return; + + if (nodeTag(operand) == T_Iter) { + elog(NOTICE, "An operand to the '%s' operator returns a set of %s,", + op, tname(optype)); + elog(WARN, "but '%s' takes single values, not sets.", + op); + } +} + +static Node * +make_operand(char *opname, + Node *tree, + int orig_typeId, + int true_typeId) +{ + Node *result; + Type true_type; + Datum val; + Oid infunc; + + if (tree != NULL) { + result = tree; + true_type = get_id_type(true_typeId); + disallow_setop(opname, true_type, result); + if (true_typeId != orig_typeId) { /* must coerce */ + Const *con= (Const *)result; + + Assert(nodeTag(result)==T_Const); + val = (Datum)textout((struct varlena *) + con->constvalue); + infunc = typeid_get_retinfunc(true_typeId); + con = makeNode(Const); + con->consttype = true_typeId; + con->constlen = tlen(true_type); + con->constvalue = (Datum)fmgr(infunc, + val, + get_typelem(true_typeId), + -1 /* for varchar() type */); + con->constisnull = false; + con->constbyval = true; + con->constisset = false; + result = (Node *)con; + } + }else { + Const *con= makeNode(Const); + + con->consttype = true_typeId; + con->constlen = 0; + con->constvalue = (Datum)(struct varlena *)NULL; + con->constisnull = true; + con->constbyval = true; + con->constisset = false; + result = (Node *)con; + } + + return result; +} + + +Expr * +make_op(char *opname, Node *ltree, Node *rtree) +{ + int ltypeId, rtypeId; + Operator temp; + OperatorTupleForm opform; + Oper *newop; + Node *left, *right; + Expr *result; + + if (rtree == NULL) { + + /* right operator */ + ltypeId = (ltree==NULL) ? UNKNOWNOID : exprType(ltree); + temp = right_oper(opname, ltypeId); + opform = (OperatorTupleForm) GETSTRUCT(temp); + left = make_operand(opname, ltree, ltypeId, opform->oprleft); + right = NULL; + + }else if (ltree == NULL) { + + /* left operator */ + rtypeId = (rtree==NULL) ? UNKNOWNOID : exprType(rtree); + temp = left_oper(opname, rtypeId); + opform = (OperatorTupleForm) GETSTRUCT(temp); + right = make_operand(opname, rtree, rtypeId, opform->oprright); + left = NULL; + + }else { + + /* binary operator */ + ltypeId = (ltree==NULL) ? UNKNOWNOID : exprType(ltree); + rtypeId = (rtree==NULL) ? UNKNOWNOID : exprType(rtree); + temp = oper(opname, ltypeId, rtypeId); + opform = (OperatorTupleForm) GETSTRUCT(temp); + left = make_operand(opname, ltree, ltypeId, opform->oprleft); + right = make_operand(opname, rtree, rtypeId, opform->oprright); + } + + newop = makeOper(oprid(temp), /* opno */ + InvalidOid, /* opid */ + opform->oprresult, /* operator result type */ + 0, + NULL); + + result = makeNode(Expr); + result->typeOid = opform->oprresult; + result->opType = OP_EXPR; + result->oper = (Node *)newop; + + if (!left) { + result->args = lcons(right, NIL); + } else if (!right) { + result->args = lcons(left, NIL); + } else { + result->args = lcons(left, lcons(right, NIL)); + } + + return result; +} + +int +find_atttype(Oid relid, char *attrname) +{ + int attid, vartype; + Relation rd; + + rd = heap_open(relid); + if (!RelationIsValid(rd)) { + rd = heap_openr(tname(get_id_type(relid))); + if (!RelationIsValid(rd)) + elog(WARN, "cannot compute type of att %s for relid %d", + attrname, relid); + } + + attid = nf_varattno(rd, attrname); + + if (attid == InvalidAttrNumber) + elog(WARN, "Invalid attribute %s\n", attrname); + + vartype = att_typeid(rd , attid); + + /* + * close relation we're done with it now + */ + heap_close(rd); + + return (vartype); +} + + +Var * +make_var(ParseState *pstate, char *relname, char *attrname, int *type_id) +{ + Var *varnode; + int vnum, attid, vartypeid; + Relation rd; + + vnum = RangeTablePosn(pstate->p_rtable, relname); + + if (vnum == 0) { + pstate->p_rtable = + lappend(pstate->p_rtable, + makeRangeTableEntry(relname, FALSE, + NULL, relname)); + vnum = RangeTablePosn (pstate->p_rtable, relname); + relname = VarnoGetRelname(pstate, vnum); + } else { + relname = VarnoGetRelname(pstate, vnum); + } + + rd = heap_openr(relname); +/* relid = RelationGetRelationId(rd); */ + attid = nf_varattno(rd, (char *) attrname); + if (attid == InvalidAttrNumber) + elog(WARN, "Invalid attribute %s\n", attrname); + vartypeid = att_typeid(rd, attid); + + varnode = makeVar(vnum, attid, vartypeid, vnum, attid); + + /* + * close relation we're done with it now + */ + heap_close(rd); + + *type_id = vartypeid; + return varnode; +} + +/* + * make_array_ref() -- Make an array reference node. + * + * Array references can hang off of arbitrary nested dot (or + * function invocation) expressions. This routine takes a + * tree generated by ParseFunc() and an array index and + * generates a new array reference tree. We do some simple + * typechecking to be sure the dereference is valid in the + * type system, but we don't do any bounds checking here. + * + * indirection is a list of A_Indices + */ +ArrayRef * +make_array_ref(Node *expr, + List *indirection) +{ + Oid typearray; + HeapTuple type_tuple; + TypeTupleForm type_struct_array, type_struct_element; + ArrayRef *aref; + int reftype; + List *upperIndexpr=NIL; + List *lowerIndexpr=NIL; + + typearray = (Oid) exprType(expr); + + type_tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(typearray), + 0,0,0); + + if (!HeapTupleIsValid(type_tuple)) + elog(WARN, "make_array_ref: Cache lookup failed for type %d\n", + typearray); + + /* get the array type struct from the type tuple */ + type_struct_array = (TypeTupleForm) GETSTRUCT(type_tuple); + + if (type_struct_array->typelem == InvalidOid) { + elog(WARN, "make_array_ref: type %s is not an array", + (Name)&(type_struct_array->typname.data[0])); + } + + /* get the type tuple for the element type */ + type_tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type_struct_array->typelem), + 0,0,0); + if (!HeapTupleIsValid(type_tuple)) + elog(WARN, "make_array_ref: Cache lookup failed for type %d\n", + typearray); + + type_struct_element = (TypeTupleForm) GETSTRUCT(type_tuple); + + while(indirection!=NIL) { + A_Indices *ind = lfirst(indirection); + if (ind->lidx) { + /* XXX assumes all lower indices non null in this case + */ + lowerIndexpr = lappend(lowerIndexpr, ind->lidx); + } + upperIndexpr = lappend(upperIndexpr, ind->uidx); + indirection = lnext(indirection); + } + aref = makeNode(ArrayRef); + aref->refattrlength = type_struct_array->typlen; + aref->refelemlength = type_struct_element->typlen; + aref->refelemtype = type_struct_array->typelem; + aref->refelembyval = type_struct_element->typbyval; + aref->refupperindexpr = upperIndexpr; + aref->reflowerindexpr = lowerIndexpr; + aref->refexpr = expr; + aref->refassgnexpr = NULL; + + if (lowerIndexpr == NIL) /* accessing a single array element */ + reftype = aref->refelemtype; + else /* request to clip a part of the array, the result is another array */ + reftype = typearray; + + /* we change it to reflect the true type; since the original refelemtype + * doesn't seem to get used anywhere. - ay 10/94 + */ + aref->refelemtype = reftype; + + return aref; +} + +ArrayRef * +make_array_set(Expr *target_expr, + List *upperIndexpr, + List *lowerIndexpr, + Expr *expr) +{ + Oid typearray; + HeapTuple type_tuple; + TypeTupleForm type_struct_array; + TypeTupleForm type_struct_element; + ArrayRef *aref; + int reftype; + + typearray = exprType((Node*)target_expr); + + type_tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(typearray), + 0,0,0); + + if (!HeapTupleIsValid(type_tuple)) + elog(WARN, "make_array_ref: Cache lookup failed for type %d\n", + typearray); + + /* get the array type struct from the type tuple */ + type_struct_array = (TypeTupleForm) GETSTRUCT(type_tuple); + + if (type_struct_array->typelem == InvalidOid) { + elog(WARN, "make_array_ref: type %s is not an array", + (Name)&(type_struct_array->typname.data[0])); + } + /* get the type tuple for the element type */ + type_tuple = SearchSysCacheTuple(TYPOID, + ObjectIdGetDatum(type_struct_array->typelem), + 0,0,0); + + if (!HeapTupleIsValid(type_tuple)) + elog(WARN, "make_array_ref: Cache lookup failed for type %d\n", + typearray); + + type_struct_element = (TypeTupleForm) GETSTRUCT(type_tuple); + + aref = makeNode(ArrayRef); + aref->refattrlength = type_struct_array->typlen; + aref->refelemlength = type_struct_element->typlen; + aref->refelemtype = type_struct_array->typelem; + aref->refelembyval = type_struct_element->typbyval; + aref->refupperindexpr = upperIndexpr; + aref->reflowerindexpr = lowerIndexpr; + aref->refexpr = (Node*)target_expr; + aref->refassgnexpr = (Node*)expr; + + if (lowerIndexpr == NIL) /* accessing a single array element */ + reftype = aref->refelemtype; + else /* request to set a part of the array, by another array */ + reftype = typearray; + + aref->refelemtype = reftype; + + return aref; +} + +/* + * + * make_const - + * + * - takes a lispvalue, (as returned to the yacc routine by the lexer) + * extracts the type, and makes the appropriate type constant + * by invoking the (c-callable) lisp routine c-make-const + * via the lisp_call() mechanism + * + * eventually, produces a "const" lisp-struct as per nodedefs.cl + */ +Const * +make_const(Value *value) +{ + Type tp; + Datum val; + Const *con; + + switch(nodeTag(value)) { + case T_Integer: + tp = type("int4"); + val = Int32GetDatum(intVal(value)); + break; + + case T_Float: + { + float32 dummy; + tp = type("float4"); + + dummy = (float32)palloc(sizeof(float32data)); + *dummy = floatVal(value); + + val = Float32GetDatum(dummy); + } + break; + + case T_String: + tp = type("unknown"); /* unknown for now, will be type coerced */ + val = PointerGetDatum(textin(strVal(value))); + break; + + case T_Null: + default: + { + if (nodeTag(value)!=T_Null) + elog(NOTICE,"unknown type : %d\n", nodeTag(value)); + + /* null const */ + con = makeConst(0, 0, (Datum)NULL, TRUE, 0, FALSE); + return NULL /*con*/; + } + } + + con = makeConst(typeid(tp), + tlen(tp), + val, + FALSE, + tbyval(tp), + FALSE); /* not a set */ + + return (con); +} + +/* + * param_type_init() + * + * keep enough information around fill out the type of param nodes + * used in postquel functions + */ +void +param_type_init(Oid* typev, int nargs) +{ + pfunc_num_args = nargs; + param_type_info = typev; +} + +Oid +param_type(int t) +{ + if ((t >pfunc_num_args) ||(t ==0)) return InvalidOid; + return param_type_info[t-1]; +} + diff --git a/src/backend/parser/parse_query.h b/src/backend/parser/parse_query.h new file mode 100644 index 00000000000..d9541c56cd4 --- /dev/null +++ b/src/backend/parser/parse_query.h @@ -0,0 +1,72 @@ +/*------------------------------------------------------------------------- + * + * parse_query.h-- + * prototypes for parse_query.c. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: parse_query.h,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PARSE_QUERY_H +#define PARSE_QUERY_H + +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "parser/catalog_utils.h" +#include "parser/parse_state.h" + +typedef struct QueryTreeList { + int len; /* number of queries */ + Query** qtrees; +} QueryTreeList; + +extern int RangeTablePosn(List *rtable, char *rangevar); +extern char *VarnoGetRelname(ParseState *pstate, int vnum); +extern RangeTblEntry *makeRangeTableEntry(char *relname, bool inh, + TimeRange *timeRange, char *refname); +extern List *expandAll(ParseState *pstate, char *relname, int *this_resno); +extern TimeQual makeTimeRange(char *datestring1, char *datestring2, + int timecode); +extern Expr *make_op(char *opname, Node *ltree, Node *rtree); + +extern int find_atttype(Oid relid, char *attrname); +extern Var *make_var(ParseState *pstate, + char *relname, char *attrname, int *type_id); +extern ArrayRef *make_array_ref(Node *array, List *indirection); +extern ArrayRef *make_array_set(Expr *target_expr, List *upperIndexpr, + List *lowerIndexpr, Expr *expr); +extern Const *make_const(Value *value); + +extern void param_type_init(Oid* typev, int nargs); +extern Oid param_type(int t); + +/* parser.c (was ylib.c) */ +extern QueryTreeList *parser(char *str, Oid *typev, int nargs); +extern Node *parser_typecast(Value *expr, TypeName *typename, int typlen); +extern Node *parser_typecast2(Node *expr, int exprType, Type tp, int typlen); +extern Aggreg *ParseAgg(char *aggname, Oid basetype, Node *target); + +/* + * analyze.c + */ + +#if 0 +extern List *p_rtable; +extern int NumLevels; +#endif + +Oid exprType(Node *expr); +ParseState* makeParseState(); +QueryTreeList *parse_analyze(List *querytree_list); + +/* define in parse_query.c, used in gram.y */ +extern Oid *param_type_info; +extern int pfunc_num_args; + +/* useful macros */ +#define ISCOMPLEX(type) (typeid_get_relid((Oid)type) ? true : false) + +#endif /* PARSE_QUERY_H */ diff --git a/src/backend/parser/parse_state.h b/src/backend/parser/parse_state.h new file mode 100644 index 00000000000..6ea7219e6bf --- /dev/null +++ b/src/backend/parser/parse_state.h @@ -0,0 +1,27 @@ +/*------------------------------------------------------------------------- + * + * parse_state.h-- + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: parse_state.h,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#ifndef PARSE_STATE_H +#define PARSE_STATE_H + +/* state information used during parse analysis */ +typedef struct ParseState { + int p_last_resno; + List *p_target_resnos; + Relation parser_current_rel; + List *p_rtable; + int p_query_is_rule; + int p_numAgg; + List *p_aggs; +} ParseState; + + +#endif /*PARSE_QUERY_H*/ diff --git a/src/backend/parser/parser.c b/src/backend/parser/parser.c new file mode 100644 index 00000000000..e0dae907588 --- /dev/null +++ b/src/backend/parser/parser.c @@ -0,0 +1,449 @@ +/*------------------------------------------------------------------------- + * + * ylib.c-- + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/parser.c,v 1.1.1.1 1996/07/09 06:21:40 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#ifndef WIN32 +#include +#endif /*WIN32 */ +#include /* for MAXPATHLEN */ + +#include "utils/elog.h" +#include "parser/catalog_utils.h" +#include "nodes/pg_list.h" +#include "utils/exc.h" +#include "utils/excid.h" +#include "utils/palloc.h" +#include "catalog/pg_aggregate.h" +#include "catalog/pg_type.h" +#include "nodes/primnodes.h" +#include "nodes/plannodes.h" +#include "nodes/execnodes.h" +#include "nodes/relation.h" +#include "parser/parse_query.h" +#include "utils/builtins.h" +#include "utils/lsyscache.h" +#include "access/heapam.h" +#include "nodes/makefuncs.h" +#include "optimizer/clauses.h" + +char *parseString; /* the char* which holds the string to be parsed */ +char *parseCh; /* a pointer used during parsing to walk down ParseString*/ + +List *parsetree = NIL; + +static void fixupsets(); +static void define_sets(); +/* + * parser-- returns a list of parse trees + * + * CALLER is responsible for free'ing the list returned + */ +QueryTreeList * +parser(char *str, Oid *typev, int nargs) +{ + QueryTreeList* queryList; + int yyresult; + +#if defined(FLEX_SCANNER) + extern void DeleteBuffer(void); +#endif /* FLEX_SCANNER */ + + init_io(); + + /* Set things up to read from the string, if there is one */ + if (strlen(str) != 0) { + parseString = (char *) palloc(strlen(str) + 1); + memmove(parseString,str,strlen(str)+1); + } + + parser_init(typev, nargs); + yyresult = yyparse(); + +#if defined(FLEX_SCANNER) + DeleteBuffer(); +#endif /* FLEX_SCANNER */ + + clearerr(stdin); + + if (yyresult) { /* error */ + return((QueryTreeList*)NULL); + } + + queryList = parse_analyze(parsetree); + +#ifdef SETS_FIXED + /* Fixing up sets calls the parser, so it reassigns the global + * variable parsetree. So save the real parsetree. + */ + savetree = parsetree; + foreach (parse, savetree) { /* savetree is really a list of parses */ + + /* find set definitions embedded in query */ + fixupsets((Query *)lfirst(parse)); + + } + return savetree; +#endif + + return queryList; +} + +static void +fixupsets(Query *parse) +{ + if (parse == NULL) + return; + if (parse->commandType==CMD_UTILITY) /* utility */ + return; + if (parse->commandType!=CMD_INSERT) + return; + define_sets(parse); +} + +/* Recursively find all of the Consts in the parsetree. Some of + * these may represent a set. The value of the Const will be the + * query (a string) which defines the set. Call SetDefine to define + * the set, and store the OID of the new set in the Const instead. + */ +static void +define_sets(Node *clause) +{ +#ifdef SETS_FIXED + Oid setoid; + Type t = type("oid"); + Oid typeoid = typeid(t); + Size oidsize = tlen(t); + bool oidbyval = tbyval(t); + + if (clause==NULL) { + return; + } else if (IsA(clause,LispList)) { + define_sets(lfirst(clause)); + define_sets(lnext(clause)); + } else if (IsA(clause,Const)) { + if (get_constisnull((Const)clause) || + !get_constisset((Const)clause)) { + return; + } + setoid = SetDefine(((Const*)clause)->constvalue, + get_id_typname(((Const*)clause)->consttype)); + set_constvalue((Const)clause, setoid); + set_consttype((Const)clause,typeoid); + set_constlen((Const)clause,oidsize); + set_constbyval((Const)clause,oidbyval); + } else if ( IsA(clause,Iter) ) { + define_sets(((Iter*)clause)->iterexpr); + } else if (single_node (clause)) { + return; + } else if (or_clause(clause)) { + List *temp; + /* mapcan */ + foreach (temp, ((Expr*)clause)->args) { + define_sets(lfirst(temp)); + } + } else if (is_funcclause (clause)) { + List *temp; + /* mapcan */ + foreach(temp, ((Expr*)clause)->args) { + define_sets(lfirst(temp)); + } + } else if (IsA(clause,ArrayRef)) { + define_sets(((ArrayRef*)clause)->refassgnexpr); + } else if (not_clause (clause)) { + define_sets (get_notclausearg (clause)); + } else if (is_opclause (clause)) { + define_sets(get_leftop (clause)); + define_sets(get_rightop (clause)); + } +#endif +} + +#define PSIZE(PTR) (*((int32 *)(PTR) - 1)) +Node * +parser_typecast(Value *expr, TypeName *typename, int typlen) +{ + /* check for passing non-ints */ + Const *adt; + Datum lcp; + Type tp; + char type_string[16]; + int32 len; + char *cp = NULL; + char *const_string; + bool string_palloced = false; + + switch(nodeTag(expr)) { + case T_String: + const_string = DatumGetPointer(expr->val.str); + break; + case T_Integer: + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string, "%ld", expr->val.ival); + break; + default: + elog(WARN, + "parser_typecast: cannot cast this expression to type \"%s\"", + typename->name); + } + + if (typename->arrayBounds != NIL) { + sprintf(type_string,"_%s", typename->name); + tp = (Type) type(type_string); + } else { + tp = (Type) type(typename->name); + } + + len = tlen(tp); + +#if 0 /* fix me */ + switch ( CInteger(lfirst(expr)) ) { + case 23: /* int4 */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%d", ((Const*)lnext(expr))->constvalue); + break; + + case 19: /* char16 */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%s", ((Const*)lnext(expr))->constvalue); + break; + + case 18: /* char */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%c", ((Const)lnext(expr))->constvalue); + break; + + case 701:/* float8 */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%f", ((Const)lnext(expr))->constvalue); + break; + + case 25: /* text */ + const_string = DatumGetPointer(((Const)lnext(expr))->constvalue); + const_string = (char *) textout((struct varlena *)const_string); + break; + + case 705: /* unknown */ + const_string = DatumGetPointer(((Const)lnext(expr))->constvalue); + const_string = (char *) textout((struct varlena *)const_string); + break; + + default: + elog(WARN,"unknown type %d", CInteger(lfirst(expr))); + } +#endif + + cp = instr2 (tp, const_string, typlen); + + if (!tbyvalue(tp)) { + if (len >= 0 && len != PSIZE(cp)) { + char *pp; + pp = (char *) palloc(len); + memmove(pp, cp, len); + cp = pp; + } + lcp = PointerGetDatum(cp); + } else { + switch(len) { + case 1: + lcp = Int8GetDatum(cp); + break; + case 2: + lcp = Int16GetDatum(cp); + break; + case 4: + lcp = Int32GetDatum(cp); + break; + default: + lcp = PointerGetDatum(cp); + break; + } + } + + adt = makeConst(typeid(tp), + len, + (Datum)lcp , + 0, + tbyvalue(tp), + 0 /* not a set */); + + if (string_palloced) + pfree(const_string); + + return (Node*)adt; +} + +Node * +parser_typecast2(Node *expr, int exprType, Type tp, int typlen) +{ + /* check for passing non-ints */ + Const *adt; + Datum lcp; + int32 len = tlen(tp); + char *cp = NULL; + + char *const_string; + bool string_palloced = false; + + Assert(IsA(expr,Const)); + + switch (exprType) { + case 23: /* int4 */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%d", + (int) ((Const*)expr)->constvalue); + break; + case 19: /* char16 */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%s", + (char*) ((Const*)expr)->constvalue); + break; + case 18: /* char */ + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%c", + (char) ((Const*)expr)->constvalue); + break; + case 700: /* float4 */ + { + float32 floatVal = + DatumGetFloat32(((Const*)expr)->constvalue); + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%f", *floatVal); + break; + } + case 701:/* float8 */ + { + float64 floatVal = + DatumGetFloat64(((Const*)expr)->constvalue); + const_string = (char *) palloc(256); + string_palloced = true; + sprintf(const_string,"%f", *floatVal); + break; + } + case 25: /* text */ + const_string = + DatumGetPointer(((Const*)expr)->constvalue ); + const_string = (char *) textout((struct varlena *)const_string); + break; + case 705: /* unknown */ + const_string = + DatumGetPointer(((Const*)expr)->constvalue ); + const_string = (char *) textout((struct varlena *)const_string); + break; + default: + elog(WARN,"unknown type%d ",exprType); + } + + cp = instr2 (tp, const_string, typlen); + + + if (!tbyvalue(tp)) { + if (len >= 0 && len != PSIZE(cp)) { + char *pp; + pp = (char *) palloc(len); + memmove(pp, cp, len); + cp = pp; + } + lcp = PointerGetDatum(cp); + } else { + switch(len) { + case 1: + lcp = Int8GetDatum(cp); + break; + case 2: + lcp = Int16GetDatum(cp); + break; + case 4: + lcp = Int32GetDatum(cp); + break; + default: + lcp = PointerGetDatum(cp); + break; + } + } + + adt = makeConst((Oid)typeid(tp), + (Size)len, + (Datum)lcp, + 0, + 0 /*was omitted*/, + 0 /* not a set */); + /* + printf("adt %s : %d %d %d\n",CString(expr),typeid(tp) , + len,cp); + */ + if (string_palloced) pfree(const_string); + + return ((Node*) adt); +} + +Aggreg * +ParseAgg(char *aggname, Oid basetype, Node *target) +{ + Oid fintype; + Oid vartype; + Oid xfn1; + Form_pg_aggregate aggform; + Aggreg *aggreg; + HeapTuple theAggTuple; + + theAggTuple = SearchSysCacheTuple(AGGNAME, PointerGetDatum(aggname), + ObjectIdGetDatum(basetype), + 0, 0); + if (!HeapTupleIsValid(theAggTuple)) { + elog(WARN, "aggregate %s does not exist", aggname); + } + + aggform = (Form_pg_aggregate) GETSTRUCT(theAggTuple); + fintype = aggform->aggfinaltype; + xfn1 = aggform->aggtransfn1; + + if (nodeTag(target) != T_Var) + elog(WARN, "parser: aggregate can only be applied on an attribute"); + + /* only aggregates with transfn1 need a base type */ + if (OidIsValid(xfn1)) { + basetype = aggform->aggbasetype; + vartype = ((Var*)target)->vartype; + + if (basetype != vartype) { + Type tp1, tp2, get_id_type(); + + tp1 = get_id_type(basetype); + tp2 = get_id_type(vartype); + elog(NOTICE, "Aggregate type mismatch:"); + elog(WARN, "%s works on %s, not %s", aggname, + tname(tp1), tname(tp2)); + } + } + + aggreg = makeNode(Aggreg); + aggreg->aggname = pstrdup(aggname); + aggreg->basetype = aggform->aggbasetype; + aggreg->aggtype = fintype; + + aggreg->target = target; + + return aggreg; +} + + + diff --git a/src/backend/parser/parsetree.h b/src/backend/parser/parsetree.h new file mode 100644 index 00000000000..37a9f4a1765 --- /dev/null +++ b/src/backend/parser/parsetree.h @@ -0,0 +1,80 @@ +/*------------------------------------------------------------------------- + * + * parsetree.h-- + * Routines to access various components and subcomponents of + * parse trees. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: parsetree.h,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PARSETREE_H +#define PARSETREE_H /* include once only */ + +/* ---------------- + * need pg_list.h for definitions of CAR(), etc. macros + * ---------------- + */ +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" + +/* ---------------- + * range table macros + * + * parse tree: + * (root targetlist qual) + * ^^^^ + * parse root: + * (numlevels cmdtype resrel rangetable priority ruleinfo nestdotinfo) + * ^^^^^^^^^^ + * range table: + * (rtentry ...) + * + * rtentry: + * note: this might be wrong, I don't understand how + * rt_time / rt_archive_time work together. anyways it + * looks something like: + * + * (relname ? relid timestuff flags rulelocks) + * or (new/cur relname relid timestuff flags rulelocks) + * + * someone who knows more should correct this -cim 6/9/91 + * ---------------- + */ + +#define rt_relname(rt_entry) \ + ((!strcmp(((rt_entry)->refname),"*CURRENT*") ||\ + !strcmp(((rt_entry)->refname),"*NEW*")) ? ((rt_entry)->refname) : \ + ((char *)(rt_entry)->relname)) + +/* + * rt_fetch + * rt_store + * + * Access and (destructively) replace rangetable entries. + * + */ +#define rt_fetch(rangetable_index, rangetable) \ + ((RangeTblEntry*)nth((rangetable_index)-1, rangetable)) + +#define rt_store(rangetable_index, rangetable, rt) \ + set_nth(rangetable, (rangetable_index)-1, rt) + +/* + * getrelid + * getrelname + * + * Given the range index of a relation, return the corresponding + * relation id or relation name. + */ +#define getrelid(rangeindex,rangetable) \ + ((RangeTblEntry*)nth((rangeindex)-1, rangetable))->relid + +#define getrelname(rangeindex, rangetable) \ + rt_relname((RangeTblEntry*)nth((rangeindex)-1, rangetable)) + +#endif /* PARSETREE_H */ + diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l new file mode 100644 index 00000000000..d3b3b9a3f26 --- /dev/null +++ b/src/backend/parser/scan.l @@ -0,0 +1,255 @@ +%{ +/*------------------------------------------------------------------------- + * + * scan.l-- + * lexical scanner for POSTGRES + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/scan.l,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#ifndef WIN32 +#include +#endif /* WIN32 */ +#ifndef __linux__ +#include +#else +#include +#endif /* __linux__ */ +#include + +#include "postgres.h" +#include "miscadmin.h" +#include "nodes/pg_list.h" +#include "nodes/parsenodes.h" +#include "parser/keywords.h" +#include "parser/scansup.h" +#include "parse.h" +#include "utils/elog.h" +#include "utils/palloc.h" + +extern char *parseString; +extern char *parseCh; + +/* some versions of lex define this as a macro */ +#if defined(yywrap) +#undef yywrap +#endif /* yywrap */ + +#if defined(FLEX_SCANNER) +/* MAX_PARSE_BUFFER is defined in miscadmin.h */ +#define YYLMAX MAX_PARSE_BUFFER +extern int myinput(char* buf, int max); +#undef YY_INPUT +#define YY_INPUT(buf,result,max) {result = myinput(buf,max);} +#else +#undef input +int input(); +#undef unput +void unput(char); +#endif /* FLEX_SCANNER */ + +extern YYSTYPE yylval; +%} + +digit [0-9] +letter [_A-Za-z] +letter_or_digit [_A-Za-z0-9] + +identifier {letter}{letter_or_digit}* + +self [,()\[\].;$\:\+\-\*\/\<\>\=\|] +op_and_self [\~\!\@\#\%\^\&\|\`\?\$\:\+\-\*\/\<\>\=] +op_only [\~\!\@\#\%\^\&\`\?] + +operator ({op_and_self}{op_and_self}+)|{op_only}+ + /* we used to allow double-quoted strings, but SQL doesn't */ + /* so we won't either*/ +quote ' + +integer -?{digit}+ +real -?{digit}+\.{digit}+([Ee][-+]?{digit}+)? + +param \${integer} + +comment "--".*\n + +space [ \t\n\f] +other . + +%% +{comment} { /* ignore */ } + +"::" { return TYPECAST; } + +{self} { return (yytext[0]); } + +{operator} { + yylval.str = pstrdup((char*)yytext); + return (Op); + } +{param} { yylval.ival = atoi((char*)&yytext[1]); + return (PARAM); + } +{integer} { + yylval.ival = atoi((char*)yytext); + return (ICONST); + } +{real} { + yylval.dval = atof((char*)yytext); + return (FCONST); + } +{quote} { + char literal[MAX_PARSE_BUFFER]; + int i = 0; + int c = 0; + /* quote_seen can be either \ or ' because + we handle both cases of \' and '' for + quoting quotes*/ + int quote_seen = 0; + + while (i < MAX_PARSE_BUFFER - 1) { + c = input(); + if (quote_seen != 0) { + if (quote_seen == '\'' && + c != '\'') { + /* a non-quote follows a single quote */ + /* so we've hit the end of the literal */ + if (c != '\0' && c != EOF) + unput(c); /* put back the extra char we read*/ + i = i - 1; + break; /* break out of the while loop */ + } + /* if we reach here, we're still in */ + /* the string literal */ + literal[i++] = c; + quote_seen = 0; + continue; + } + if (c == '\0' || c == EOF) { + elog(WARN,"unterminated quoted string literal"); + /* not reached */ + } + literal[i++] = c; + if (c == '\'' || c == '\\') + quote_seen = c; + } + if ( i == MAX_PARSE_BUFFER - 1) { + elog (WARN, "unterminated quote string. parse buffer of %d chars exceeded", MAX_PARSE_BUFFER); + /* not reached */ + } + literal[i] = '\0'; + yylval.str = pstrdup(scanstr(literal)); + return (SCONST); + } +{identifier} { + ScanKeyword *keyword; + + keyword = ScanKeywordLookup((char*)yytext); + if (keyword != NULL) { + return (keyword->value); + } else { + yylval.str = pstrdup((char*)yytext); + return (IDENT); + } + } +{space} { /* ignore */ } + +{other} { return (yytext[0]); } + +%% + +void yyerror(char message[]) +{ + elog(WARN, "parser: %s at or near \"%s\"\n", message, yytext); +} + +int yywrap() +{ + return(1); +} + +/* + init_io: + called by postgres before any actual parsing is done +*/ +void +init_io() +{ + /* it's important to set this to NULL + because input()/myinput() checks the non-nullness of parseCh + to know when to pass the string to lex/flex */ + parseCh = NULL; +#if defined(FLEX_SCANNER) + if (YY_CURRENT_BUFFER) + yy_flush_buffer(YY_CURRENT_BUFFER); +#endif /* FLEX_SCANNER */ + BEGIN INITIAL; +} + + +#if !defined(FLEX_SCANNER) +/* get lex input from a string instead of from stdin */ +int +input() +{ + if (parseCh == NULL) { + parseCh = parseString; + return(*parseCh++); + } else if (*parseCh == '\0') { + return(0); + } else { + return(*parseCh++); + } +} + +/* undo lex input from a string instead of from stdin */ +void +unput(char c) +{ + if (parseCh == NULL) { + elog(FATAL, "Unput() failed.\n"); + } else if (c != 0) { + *--parseCh = c; + } +} +#endif /* !defined(FLEX_SCANNER) */ + +#ifdef FLEX_SCANNER +/* input routine for flex to read input from a string instead of a file */ +int +myinput(char* buf, int max) +{ + int len, copylen; + + if (parseCh == NULL) { + len = strlen(parseString); + if (len >= max) + copylen = max - 1; + else + copylen = len; + if (copylen > 0) + memcpy(buf, parseString, copylen); + buf[copylen] = '\0'; + parseCh = parseString; + return copylen; + } else { + return 0; /* end of string */ + } +} + +char* +CurScan(void) +{ +/* + return (InputFrag ? InputFrag : parseCh) + + (yy_c_buf_p - &yy_current_buffer->yy_ch_buf[yy_n_chars]); +*/ +} +#endif /* FLEX_SCANNER */ + diff --git a/src/backend/parser/scansup.c b/src/backend/parser/scansup.c new file mode 100644 index 00000000000..bd7ef26004e --- /dev/null +++ b/src/backend/parser/scansup.c @@ -0,0 +1,148 @@ +/*------------------------------------------------------------------------- + * + * scansup.c-- + * support routines for the lex/flex scanner, used by both the normal + * backend as well as the bootstrap backend + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/parser/scansup.c,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "c.h" +#include "postgres.h" +#include "miscadmin.h" +#include "utils/elog.h" +#include "parser/scansup.h" + +/* + * Scanner error handler. + */ +static void +serror(char *str) +{ + elog(WARN, "*** scanner error: %s\n", str); +} + +/* ---------------- + * scanstr + * + * if the string passed in has escaped codes, map the escape codes to actual + * chars + * + * also, remove leading and ending quotes '"' if any + * + * the string passed in must be non-null + * + * the string returned is a pointer to static storage and should NOT + * be freed by the CALLER. + * ---------------- + */ + +char* +scanstr(char *s) +{ + static char newStr[MAX_PARSE_BUFFER]; + int len, i, start, j; + char delimiter; + + if (s == NULL || s[0] == '\0') + return s; + + len = strlen(s); + start = 0; + + /* remove leading and trailing quotes, if any */ + /* the normal backend lexer only accepts single quotes, but the + bootstrap lexer accepts double quotes */ + delimiter = 0; + if (s[0] == '"' || s[0] == '\''){ + delimiter = s[0]; + start = 1; + } + if (delimiter != 0) { + if (s[len-1] == delimiter) + len = len - 1; + else + serror("mismatched quote delimiters"); + } + + for (i = start, j = 0; i < len ; i++) { + if (s[i] == '\'') { + i = i + 1; + if (s[i] == '\'') + newStr[j] = '\''; + } + else { + if (s[i] == '\\') { + i = i + 1; + switch (s[i]) { + case '\\': + newStr[j] = '\\'; + break; + case 'b': + newStr[j] = '\b'; + break; + case 'f': + newStr[j] = '\f'; + break; + case 'n': + newStr[j] = '\n'; + break; + case 'r': + newStr[j] = '\r'; + break; + case 't': + newStr[j] = '\t'; + break; + case '"': + newStr[j] = '"'; + break; + case '\'': + newStr[j] = '\''; + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + { + char octal[4]; + int k; + long octVal; + + for (k=0; + s[i+k] >= '0' && s[i+k] <= '7' && k < 3; + k++) + octal[k] = s[i+k]; + i += k-1; + octal[3] = '\0'; + + octVal = strtol(octal,0,8); +/* elog (NOTICE, "octal = %s octVal = %d, %od", octal, octVal, octVal);*/ + if (octVal <= 0377) { + newStr[j] = ((char)octVal); + break; + } + } + default: + elog (WARN, "Bad escape sequence, s[i] = %d", s[i]); + } /* switch */ + } /* s[i] == '\\' */ + else + newStr[j] = s[i]; + } + j++; + } + newStr[j] = '\0'; + return newStr; +} + diff --git a/src/backend/parser/scansup.h b/src/backend/parser/scansup.h new file mode 100644 index 00000000000..95e625aabcf --- /dev/null +++ b/src/backend/parser/scansup.h @@ -0,0 +1,17 @@ +/*------------------------------------------------------------------------- + * + * scansup.h-- + * scanner support routines. used by both the bootstrap lexer + * as well as the normal lexer + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: scansup.h,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +extern char* scanstr(char *s); + + + diff --git a/src/backend/port/BSD44_derived/Makefile.inc b/src/backend/port/BSD44_derived/Makefile.inc new file mode 100644 index 00000000000..9ef7ffc463d --- /dev/null +++ b/src/backend/port/BSD44_derived/Makefile.inc @@ -0,0 +1,28 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/BSD44_derived (for OSs derived from 4.4-lite BSD) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/BSD44_derived/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ +# +#------------------------------------------------------------------------- + +CFLAGS+= -DUSE_POSIX_TIME + +# +# 4.4-lite BSD-derived OSs require that the lex library be included, +# in case yywrap is defined +# +LDADD+= -ll + +# +# 4.4-lite BSD-derived OSs have a little trouble with partially-implemented +# dynamic loading soutines. See the comments in port-protos.h. +# +SUBSRCS= dl.c + +HEADERS+= float.h machine.h port-protos.h diff --git a/src/backend/port/BSD44_derived/README b/src/backend/port/BSD44_derived/README new file mode 100644 index 00000000000..acfd1e66af1 --- /dev/null +++ b/src/backend/port/BSD44_derived/README @@ -0,0 +1,4 @@ +The NetBSD port was done by Alistair G. Crooks (agc@uts.amdahl.com). + +It was extended to cover Operating Systems derived from the 4.4-lite +BSD release by Alistair G. Crooks. diff --git a/src/backend/port/BSD44_derived/dl.c b/src/backend/port/BSD44_derived/dl.c new file mode 100644 index 00000000000..394fb8a2e0a --- /dev/null +++ b/src/backend/port/BSD44_derived/dl.c @@ -0,0 +1,88 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)dl.c 5.4 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include +#include +#include + +#include +#include + +static char error_message[BUFSIZ]; + +char * +BSD44_derived_dlerror(void) +{ + static char ret[BUFSIZ]; + + (void) strcpy(ret, error_message); + error_message[0] = 0; + return((ret[0] == 0) ? (char *) NULL : ret); +} + +void * +BSD44_derived_dlopen(char *file, int num) +{ + void *vp; + + if ((vp = dlopen(file, num)) == (void *) NULL) { + (void) sprintf(error_message, "dlopen (%s) failed", file); + } + return(vp); +} + +void * +BSD44_derived_dlsym(void *handle, char *name) +{ + void *vp; + char buf[BUFSIZ]; + + if (*name != '_') { + (void) sprintf(buf, "_%s", name); + name = buf; + } + if ((vp = dlsym(handle, name)) == (void *) NULL) { + (void) sprintf(error_message, "dlsym (%s) failed", name); + } + return(vp); +} + +void +BSD44_derived_dlclose(void *handle) +{ + dlclose(handle); +} diff --git a/src/backend/port/BSD44_derived/float.h b/src/backend/port/BSD44_derived/float.h new file mode 100644 index 00000000000..f1348bf9eaf --- /dev/null +++ b/src/backend/port/BSD44_derived/float.h @@ -0,0 +1,30 @@ +/*------------------------------------------------------------------------- + * + * float.h-- + * definitions for ANSI floating point + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: float.h,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + * NOTES + * These come straight out of ANSI X3.159-1989 (p.18) and + * would be unnecessary if SunOS 4 were ANSI-compliant. + * + * This is only a partial listing because I'm lazy to type + * the whole thing in. + * + *------------------------------------------------------------------------- + */ +#ifndef FLOAT_H +#define FLOAT_H + +#define FLT_DIG 6 +#define FLT_MIN ((float) 1.17549435e-38) +#define FLT_MAX ((float) 3.40282347e+38) +#define DBL_DIG 15 +#define DBL_MIN 2.2250738585072014e-308 +#define DBL_MAX 1.7976931348623157e+308 + +#endif /* FLOAT_H */ diff --git a/src/backend/port/BSD44_derived/machine.h b/src/backend/port/BSD44_derived/machine.h new file mode 100644 index 00000000000..3d4e7656609 --- /dev/null +++ b/src/backend/port/BSD44_derived/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/BSD44_derived/port-protos.h b/src/backend/port/BSD44_derived/port-protos.h new file mode 100644 index 00000000000..d397b4d3ed8 --- /dev/null +++ b/src/backend/port/BSD44_derived/port-protos.h @@ -0,0 +1,41 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for NetBSD 1.0 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include +#include + +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" + +/* dynloader.c */ +/* + * Dynamic Loader on NetBSD 1.0. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + * agc - I know this is all a bit crufty, but it does work, is fairly + * portable, and works (the stipulation that the d.l. function must + * begin with an underscore is fairly tricky, and some versions of + * NetBSD (like 1.0, and 1.0A pre June 1995) have no dlerror.) + */ +#define pg_dlopen(f) BSD44_derived_dlopen(f, 1) +#define pg_dlsym BSD44_derived_dlsym +#define pg_dlclose BSD44_derived_dlclose +#define pg_dlerror BSD44_derived_dlerror + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/Makefile.inc b/src/backend/port/Makefile.inc new file mode 100644 index 00000000000..a21fd46968f --- /dev/null +++ b/src/backend/port/Makefile.inc @@ -0,0 +1,21 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for the port module (for code specific to various UNIX +# platforms) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ +# +#------------------------------------------------------------------------- + +portdir= $(CURDIR)/port/$(PORTNAME) +VPATH:= $(VPATH):$(portdir) + +SUBSRCS= +include $(portdir)/Makefile.inc +SRCS_PORT:= $(SUBSRCS) + diff --git a/src/backend/port/aix/Makefile.inc b/src/backend/port/aix/Makefile.inc new file mode 100644 index 00000000000..6954174c7d2 --- /dev/null +++ b/src/backend/port/aix/Makefile.inc @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/aix (AIX specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/aix/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ +# +#------------------------------------------------------------------------- + +# +# aix has fast linkers and don't need the BIGOBJ stuff. +# +BIGOBJS=false + +CFLAGS+= -DCLASS_CONFLICT -DDISABLE_XOPEN_NLS -DNEED_ISINF + +LDFLAGS+= -bE:$(objdir)/$(PROG).exp + +LDADD+= -ll -lld + +HEADERS+= dlfcn.h machine.h port-protos.h + +SUBSRCS+= dlfcn.c + +${PROG}.exp: ${PROG}.noexp + mv -f $(objdir)/${PROG}.noexp $(objdir)/${PROG} + $(CURDIR)/port/aix/mkldexport.sh $(objdir)/${PROG} ${BINDIR} > $(objdir)/${PROG}.exp + mv -f $(objdir)/${PROG} $(objdir)/${PROG}.noexp + +${PROG}.noexp: ${OBJS} + touch -f $(objdir)/${PROG}.exp + ${CC} ${LDFLAGS} -o $(objdir)/${PROG}.noexp ${OBJS} ${LDADD} + +EXPORTS= ${PROG}.exp + +CLEANFILES+= ${PROG}.noexp ${PROG}.exp diff --git a/src/backend/port/aix/README.dlfcn b/src/backend/port/aix/README.dlfcn new file mode 100644 index 00000000000..f64446d49b5 --- /dev/null +++ b/src/backend/port/aix/README.dlfcn @@ -0,0 +1,167 @@ +Copyright (c) 1992,1993,1995, Jens-Uwe Mager, Helios Software GmbH +Not derived from licensed software. + +Permission is granted to freely use, copy, modify, and redistribute +this software, provided that no attempt is made to gain profit from it, +the author is not construed to be liable for any results of using the +software, alterations are clearly marked as such, and this notice is +not modified. + +libdl.a +------- + +This is an emulation library to emulate the SunOS/System V.4 functions +to access the runtime linker. The functions are emulated by using the +AIX load() function and by reading the .loader section of the loaded +module to find the exports. The to be loaded module should be linked as +follows (if using AIX 3): + + cc -o module.so -bM:SRE -bE:module.exp -e _nostart $(OBJS) + +For AIX 4: + + cc -o module.so -bM:SRE -bE:module.exp -bnoentry $(OBJS) + +The module export file contains the symbols to be exported. Because +this library uses the loader section, the final module.so file can be +stripped. C++ users should build their shared objects using the script +makeC++SharedLib (part of the IBM C++ compiler), this will make sure +that constructors and destructors for static and global objects will be +called upon loading and unloading the module. + +Usage +----- + +void *dlopen(const char *path, int mode); + +This routine loads the module pointed to by path and reads its export +table. If the path does not contain a '/' character, dlopen will search +for the module using the LIBPATH environment variable. It returns an +opaque handle to the module or NULL on error. The mode parameter can be +either RTLD_LAZY (for lazy function binding) or RTLD_NOW for immediate +function binding. The AIX implementation currently does treat RTLD_NOW +the same as RTLD_LAZY. The flag RTLD_GLOBAL might be or'ed into the +mode parameter to allow loaded modules to bind to global variables or +functions in other loaded modules loaded by dlopen(). If RTLD_GLOBAL is +not specified, only globals from the main part of the executable or +shared libraries are used to look for undefined symbols in loaded +modules. + + +void *dlsym(void *handle, const char *symbol); + +This routine searches for the symbol in the module referred to by +handle and returns its address. If the symbol could not be found, the +function returns NULL. The return value must be casted to a proper +function pointer before it can be used. SunOS/System V.4 allow handle +to be a NULL pointer to refer to the module the call is made from, this +is not implemented. + +int dlclose(void *handle); + +This routine unloads the module referred to by the handle and disposes +of any local storage. this function returns -1 on failure. + +char *dlerror(void); + +This routine can be used to retrieve a text message describing the most +recent error that occured on on of the above routines. This function +returns NULL if there is not error information. + +Initialization and termination handlers +--------------------------------------- + +The emulation provides for an initialization and a termination +handler. The dlfcn.h file contains a structure declaration named +dl_info with following members: + + void (*init)(void); + void (*fini)(void); + +The init function is called upon first referencing the library. The +fini function is called at dlclose() time or when the process exits. +The module should declare a variable named dl_info that contains this +structure which must be exported. These functions correspond to the +documented _init() and _fini() functions of SunOS 4.x, but these are +appearently not implemented in SunOS. When using SunOS 5.0, these +correspond to #pragma init and #pragma fini respectively. At the same +time any static or global C++ object's constructors or destructors will +be called. + +Jens-Uwe Mager + +HELIOS Software GmbH +Lavesstr. 80 +30159 Hannover +Germany + +Phone: +49 511 36482-0 +FAX: +49 511 36482-69 +AppleLink: helios.de Attn: Jens-Uwe Mager +Internet: jum@helios.de + +Revison History +--------------- + +SCCS/s.dlfcn.h: + +D 1.4 95/04/25 09:36:52 jum 4 3 00018/00004/00028 +MRs: +COMMENTS: +added RTLD_GLOBAL, include and C++ guards + +D 1.3 92/12/27 20:58:32 jum 3 2 00001/00001/00031 +MRs: +COMMENTS: +we always have prototypes on RS/6000 + +D 1.2 92/08/16 17:45:11 jum 2 1 00009/00000/00023 +MRs: +COMMENTS: +added dl_info structure to implement initialize and terminate functions + +D 1.1 92/08/02 18:08:45 jum 1 0 00023/00000/00000 +MRs: +COMMENTS: +Erstellungsdatum und -uhrzeit 92/08/02 18:08:45 von jum + +SCCS/s.dlfcn.c: + +D 1.7 95/08/14 19:08:38 jum 8 6 00026/00004/00502 +MRs: +COMMENTS: +Integrated the fixes from Kirk Benell (kirk@rsinc.com) to allow loading of +shared objects generated under AIX 4. Fixed bug that symbols with exactly +8 characters would use garbage characters from the following symbol value. + +D 1.6 95/04/25 09:38:03 jum 6 5 00046/00006/00460 +MRs: +COMMENTS: +added handling of C++ static constructors and destructors, added RTLD_GLOBAL to bind against other loaded modules + +D 1.5 93/02/14 20:14:17 jum 5 4 00002/00000/00464 +MRs: +COMMENTS: +added path to dlopen error message to make clear where there error occured. + +D 1.4 93/01/03 19:13:56 jum 4 3 00061/00005/00403 +MRs: +COMMENTS: +to allow calling symbols in the main module call load with L_NOAUTODEFER and +do a loadbind later with the main module. + +D 1.3 92/12/27 20:59:55 jum 3 2 00066/00008/00342 +MRs: +COMMENTS: +added search by L_GETINFO if module got loaded by LIBPATH + +D 1.2 92/08/16 17:45:43 jum 2 1 00074/00006/00276 +MRs: +COMMENTS: +implemented initialize and terminate functions, added reference counting to avoid multiple loads of the same library + +D 1.1 92/08/02 18:08:45 jum 1 0 00282/00000/00000 +MRs: +COMMENTS: +Erstellungsdatum und -uhrzeit 92/08/02 18:08:45 von jum + diff --git a/src/backend/port/aix/dlfcn.c b/src/backend/port/aix/dlfcn.c new file mode 100644 index 00000000000..9ae113ce06e --- /dev/null +++ b/src/backend/port/aix/dlfcn.c @@ -0,0 +1,528 @@ +/* + * @(#)dlfcn.c 1.7 revision of 95/08/14 19:08:38 + * This is an unpublished work copyright (c) 1992 HELIOS Software GmbH + * 30159 Hannover, Germany + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "dlfcn.h" + +/* + * We simulate dlopen() et al. through a call to load. Because AIX has + * no call to find an exported symbol we read the loader section of the + * loaded module and build a list of exported symbols and their virtual + * address. + */ + +typedef struct { + char *name; /* the symbols's name */ + void *addr; /* its relocated virtual address */ +} Export, *ExportPtr; + +/* + * xlC uses the following structure to list its constructors and + * destructors. This is gleaned from the output of munch. + */ +typedef struct { + void (*init)(void); /* call static constructors */ + void (*term)(void); /* call static destructors */ +} Cdtor, *CdtorPtr; + +/* + * The void * handle returned from dlopen is actually a ModulePtr. + */ +typedef struct Module { + struct Module *next; + char *name; /* module name for refcounting */ + int refCnt; /* the number of references */ + void *entry; /* entry point from load */ + struct dl_info *info; /* optional init/terminate functions */ + CdtorPtr cdtors; /* optional C++ constructors */ + int nExports; /* the number of exports found */ + ExportPtr exports; /* the array of exports */ +} Module, *ModulePtr; + +/* + * We keep a list of all loaded modules to be able to call the fini + * handlers and destructors at atexit() time. + */ +static ModulePtr modList; + +/* + * The last error from one of the dl* routines is kept in static + * variables here. Each error is returned only once to the caller. + */ +static char errbuf[BUFSIZ]; +static int errvalid; + +extern char *strdup(const char *); +static void caterr(char *); +static int readExports(ModulePtr); +static void terminate(void); +static void *findMain(void); + +void *dlopen(const char *path, int mode) +{ + register ModulePtr mp; + static void *mainModule; + + /* + * Upon the first call register a terminate handler that will + * close all libraries. Also get a reference to the main module + * for use with loadbind. + */ + if (!mainModule) { + if ((mainModule = findMain()) == NULL) + return NULL; + atexit(terminate); + } + /* + * Scan the list of modules if we have the module already loaded. + */ + for (mp = modList; mp; mp = mp->next) + if (strcmp(mp->name, path) == 0) { + mp->refCnt++; + return mp; + } + if ((mp = (ModulePtr)calloc(1, sizeof(*mp))) == NULL) { + errvalid++; + strcpy(errbuf, "calloc: "); + strcat(errbuf, strerror(errno)); + return NULL; + } + if ((mp->name = strdup(path)) == NULL) { + errvalid++; + strcpy(errbuf, "strdup: "); + strcat(errbuf, strerror(errno)); + free(mp); + return NULL; + } + /* + * load should be declared load(const char *...). Thus we + * cast the path to a normal char *. Ugly. + */ + if ((mp->entry = (void *)load((char *)path, L_NOAUTODEFER, NULL)) == NULL) { + free(mp->name); + free(mp); + errvalid++; + strcpy(errbuf, "dlopen: "); + strcat(errbuf, path); + strcat(errbuf, ": "); + /* + * If AIX says the file is not executable, the error + * can be further described by querying the loader about + * the last error. + */ + if (errno == ENOEXEC) { + char *tmp[BUFSIZ/sizeof(char *)]; + if (loadquery(L_GETMESSAGES, tmp, sizeof(tmp)) == -1) + strcpy(errbuf, strerror(errno)); + else { + char **p; + for (p = tmp; *p; p++) + caterr(*p); + } + } else + strcat(errbuf, strerror(errno)); + return NULL; + } + mp->refCnt = 1; + mp->next = modList; + modList = mp; + if (loadbind(0, mainModule, mp->entry) == -1) { + dlclose(mp); + errvalid++; + strcpy(errbuf, "loadbind: "); + strcat(errbuf, strerror(errno)); + return NULL; + } + /* + * If the user wants global binding, loadbind against all other + * loaded modules. + */ + if (mode & RTLD_GLOBAL) { + register ModulePtr mp1; + for (mp1 = mp->next; mp1; mp1 = mp1->next) + if (loadbind(0, mp1->entry, mp->entry) == -1) { + dlclose(mp); + errvalid++; + strcpy(errbuf, "loadbind: "); + strcat(errbuf, strerror(errno)); + return NULL; + } + } + if (readExports(mp) == -1) { + dlclose(mp); + return NULL; + } + /* + * If there is a dl_info structure, call the init function. + */ + if (mp->info = (struct dl_info *)dlsym(mp, "dl_info")) { + if (mp->info->init) + (*mp->info->init)(); + } else + errvalid = 0; + /* + * If the shared object was compiled using xlC we will need + * to call static constructors (and later on dlclose destructors). + */ + if (mp->cdtors = (CdtorPtr)dlsym(mp, "__cdtors")) { + while (mp->cdtors->init) { + (*mp->cdtors->init)(); + mp->cdtors++; + } + } else + errvalid = 0; + return mp; +} + +/* + * Attempt to decipher an AIX loader error message and append it + * to our static error message buffer. + */ +static void caterr(char *s) +{ + register char *p = s; + + while (*p >= '0' && *p <= '9') + p++; + switch(atoi(s)) { + case L_ERROR_TOOMANY: + strcat(errbuf, "to many errors"); + break; + case L_ERROR_NOLIB: + strcat(errbuf, "can't load library"); + strcat(errbuf, p); + break; + case L_ERROR_UNDEF: + strcat(errbuf, "can't find symbol"); + strcat(errbuf, p); + break; + case L_ERROR_RLDBAD: + strcat(errbuf, "bad RLD"); + strcat(errbuf, p); + break; + case L_ERROR_FORMAT: + strcat(errbuf, "bad exec format in"); + strcat(errbuf, p); + break; + case L_ERROR_ERRNO: + strcat(errbuf, strerror(atoi(++p))); + break; + default: + strcat(errbuf, s); + break; + } +} + +void *dlsym(void *handle, const char *symbol) +{ + register ModulePtr mp = (ModulePtr)handle; + register ExportPtr ep; + register int i; + + /* + * Could speed up the search, but I assume that one assigns + * the result to function pointers anyways. + */ + for (ep = mp->exports, i = mp->nExports; i; i--, ep++) + if (strcmp(ep->name, symbol) == 0) + return ep->addr; + errvalid++; + strcpy(errbuf, "dlsym: undefined symbol "); + strcat(errbuf, symbol); + return NULL; +} + +char *dlerror(void) +{ + if (errvalid) { + errvalid = 0; + return errbuf; + } + return NULL; +} + +int dlclose(void *handle) +{ + register ModulePtr mp = (ModulePtr)handle; + int result; + register ModulePtr mp1; + + if (--mp->refCnt > 0) + return 0; + if (mp->info && mp->info->fini) + (*mp->info->fini)(); + if (mp->cdtors) + while (mp->cdtors->term) { + (*mp->cdtors->term)(); + mp->cdtors++; + } + result = unload(mp->entry); + if (result == -1) { + errvalid++; + strcpy(errbuf, strerror(errno)); + } + if (mp->exports) { + register ExportPtr ep; + register int i; + for (ep = mp->exports, i = mp->nExports; i; i--, ep++) + if (ep->name) + free(ep->name); + free(mp->exports); + } + if (mp == modList) + modList = mp->next; + else { + for (mp1 = modList; mp1; mp1 = mp1->next) + if (mp1->next == mp) { + mp1->next = mp->next; + break; + } + } + free(mp->name); + free(mp); + return result; +} + +static void terminate(void) +{ + while (modList) + dlclose(modList); +} + +/* + * Build the export table from the XCOFF .loader section. + */ +static int readExports(ModulePtr mp) +{ + LDFILE *ldp = NULL; + SCNHDR sh, shdata; + LDHDR *lhp; + char *ldbuf; + LDSYM *ls; + int i; + ExportPtr ep; + + if ((ldp = ldopen(mp->name, ldp)) == NULL) { + struct ld_info *lp; + char *buf; + int size = 4*1024; + if (errno != ENOENT) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + return -1; + } + /* + * The module might be loaded due to the LIBPATH + * environment variable. Search for the loaded + * module using L_GETINFO. + */ + if ((buf = malloc(size)) == NULL) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + return -1; + } + while ((i = loadquery(L_GETINFO, buf, size)) == -1 && errno == ENOMEM) { + free(buf); + size += 4*1024; + if ((buf = malloc(size)) == NULL) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + return -1; + } + } + if (i == -1) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + free(buf); + return -1; + } + /* + * Traverse the list of loaded modules. The entry point + * returned by load() does actually point to the data + * segment origin. + */ + lp = (struct ld_info *)buf; + while (lp) { + if (lp->ldinfo_dataorg == mp->entry) { + ldp = ldopen(lp->ldinfo_filename, ldp); + break; + } + if (lp->ldinfo_next == 0) + lp = NULL; + else + lp = (struct ld_info *)((char *)lp + lp->ldinfo_next); + } + free(buf); + if (!ldp) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + return -1; + } + } + if (TYPE(ldp) != U802TOCMAGIC) { + errvalid++; + strcpy(errbuf, "readExports: bad magic"); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + /* + * Get the padding for the data section. This is needed for + * AIX 4.1 compilers. This is used when building the final + * function pointer to the exported symbol. + */ + if (ldnshread(ldp, _DATA, &shdata) != SUCCESS) { + errvalid++; + strcpy(errbuf, "readExports: cannot read data section header"); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + if (ldnshread(ldp, _LOADER, &sh) != SUCCESS) { + errvalid++; + strcpy(errbuf, "readExports: cannot read loader section header"); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + /* + * We read the complete loader section in one chunk, this makes + * finding long symbol names residing in the string table easier. + */ + if ((ldbuf = (char *)malloc(sh.s_size)) == NULL) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + if (FSEEK(ldp, sh.s_scnptr, BEGINNING) != OKFSEEK) { + errvalid++; + strcpy(errbuf, "readExports: cannot seek to loader section"); + free(ldbuf); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + if (FREAD(ldbuf, sh.s_size, 1, ldp) != 1) { + errvalid++; + strcpy(errbuf, "readExports: cannot read loader section"); + free(ldbuf); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + lhp = (LDHDR *)ldbuf; + ls = (LDSYM *)(ldbuf+LDHDRSZ); + /* + * Count the number of exports to include in our export table. + */ + for (i = lhp->l_nsyms; i; i--, ls++) { + if (!LDR_EXPORT(*ls)) + continue; + mp->nExports++; + } + if ((mp->exports = (ExportPtr)calloc(mp->nExports, sizeof(*mp->exports))) == NULL) { + errvalid++; + strcpy(errbuf, "readExports: "); + strcat(errbuf, strerror(errno)); + free(ldbuf); + while(ldclose(ldp) == FAILURE) + ; + return -1; + } + /* + * Fill in the export table. All entries are relative to + * the entry point we got from load. + */ + ep = mp->exports; + ls = (LDSYM *)(ldbuf+LDHDRSZ); + for (i = lhp->l_nsyms; i; i--, ls++) { + char *symname; + char tmpsym[SYMNMLEN+1]; + if (!LDR_EXPORT(*ls)) + continue; + if (ls->l_zeroes == 0) + symname = ls->l_offset+lhp->l_stoff+ldbuf; + else { + /* + * The l_name member is not zero terminated, we + * must copy the first SYMNMLEN chars and make + * sure we have a zero byte at the end. + */ + strncpy(tmpsym, ls->l_name, SYMNMLEN); + tmpsym[SYMNMLEN] = '\0'; + symname = tmpsym; + } + ep->name = strdup(symname); + ep->addr = (void *)((unsigned long)mp->entry + + ls->l_value - shdata.s_vaddr); + ep++; + } + free(ldbuf); + while(ldclose(ldp) == FAILURE) + ; + return 0; +} + +/* + * Find the main modules entry point. This is used as export pointer + * for loadbind() to be able to resolve references to the main part. + */ +static void * findMain(void) +{ + struct ld_info *lp; + char *buf; + int size = 4*1024; + int i; + void *ret; + + if ((buf = malloc(size)) == NULL) { + errvalid++; + strcpy(errbuf, "findMain: "); + strcat(errbuf, strerror(errno)); + return NULL; + } + while ((i = loadquery(L_GETINFO, buf, size)) == -1 && errno == ENOMEM) { + free(buf); + size += 4*1024; + if ((buf = malloc(size)) == NULL) { + errvalid++; + strcpy(errbuf, "findMain: "); + strcat(errbuf, strerror(errno)); + return NULL; + } + } + if (i == -1) { + errvalid++; + strcpy(errbuf, "findMain: "); + strcat(errbuf, strerror(errno)); + free(buf); + return NULL; + } + /* + * The first entry is the main module. The entry point + * returned by load() does actually point to the data + * segment origin. + */ + lp = (struct ld_info *)buf; + ret = lp->ldinfo_dataorg; + free(buf); + return ret; +} diff --git a/src/backend/port/aix/dlfcn.h b/src/backend/port/aix/dlfcn.h new file mode 100644 index 00000000000..5671e9caa3a --- /dev/null +++ b/src/backend/port/aix/dlfcn.h @@ -0,0 +1,46 @@ +/* + * @(#)dlfcn.h 1.4 revision of 95/04/25 09:36:52 + * This is an unpublished work copyright (c) 1992 HELIOS Software GmbH + * 30159 Hannover, Germany + */ + +#ifndef __dlfcn_h__ +#define __dlfcn_h__ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * Mode flags for the dlopen routine. + */ +#define RTLD_LAZY 1 /* lazy function call binding */ +#define RTLD_NOW 2 /* immediate function call binding */ +#define RTLD_GLOBAL 0x100 /* allow symbols to be global */ + +/* + * To be able to intialize, a library may provide a dl_info structure + * that contains functions to be called to initialize and terminate. + */ +struct dl_info { + void (*init)(void); + void (*fini)(void); +}; + +#if __STDC__ || defined(_IBMR2) +void *dlopen(const char *path, int mode); +void *dlsym(void *handle, const char *symbol); +char *dlerror(void); +int dlclose(void *handle); +#else +void *dlopen(); +void *dlsym(); +char *dlerror(); +int dlclose(); +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* __dlfcn_h__ */ diff --git a/src/backend/port/aix/machine.h b/src/backend/port/aix/machine.h new file mode 100644 index 00000000000..e05a1346495 --- /dev/null +++ b/src/backend/port/aix/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/aix/mkldexport.sh b/src/backend/port/aix/mkldexport.sh new file mode 100755 index 00000000000..378baf92626 --- /dev/null +++ b/src/backend/port/aix/mkldexport.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# +# mkldexport +# create an AIX exports file from an object file +# +# Usage: +# mkldexport objectfile [location] +# where +# objectfile is the current location of the object file. +# location is the eventual (installed) location of the +# object file (if different from the current +# working directory). +# +# [This file comes from the Postgres 4.2 distribution. - ay 7/95] +# +# Header: /usr/local/devel/postgres/src/tools/mkldexport/RCS/mkldexport.sh,v 1.2 1994/03/13 04:59:12 aoki Exp +# + +# setting this to nm -B might be better +NM = /usr/ucb/nm + +CMDNAME=`basename $0` +if [ -z "$1" ]; then + echo "Usage: $CMDNAME object [location]" + exit 1 +fi +OBJNAME=`basename $1` +if [ "`basename $OBJNAME`" != "`basename $OBJNAME .o`" ]; then + OBJNAME=`basename $OBJNAME .o`.so +fi +if [ -z "$2" ]; then + echo '#!' +else + echo '#!' $2/$OBJNAME +fi +$NM -g $1 | \ + egrep ' [TD] ' | \ + sed -e 's/.* //' | \ + egrep -v '\$' | \ + sed -e 's/^[.]//' | \ + sort | \ + uniq diff --git a/src/backend/port/aix/port-protos.h b/src/backend/port/aix/port-protos.h new file mode 100644 index 00000000000..986e5bd7d48 --- /dev/null +++ b/src/backend/port/aix/port-protos.h @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for AIX + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:41 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "dlfcn.h" /* this is from jum's libdl package */ + +/* dynloader.c */ + +#define pg_dlopen(f) dlopen(filename, RTLD_LAZY) +#define pg_dlsym(h,f) dlsym(h, f) +#define pg_dlclose(h) dlclose(h) +#define pg_dlerror() dlerror() + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/alpha/Makefile.inc b/src/backend/port/alpha/Makefile.inc new file mode 100644 index 00000000000..52ebfe486f7 --- /dev/null +++ b/src/backend/port/alpha/Makefile.inc @@ -0,0 +1,27 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/alpha (Alpha OSF/1 specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/alpha/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ +# +#------------------------------------------------------------------------- + +CFLAGS+= -DUSE_POSIX_TIME -DDISABLE_XOPEN_NLS -DNEED_ISINF -DHAS_LONG_LONG + +LDADD+= -lln + +# +# The YACC grammar is too big.. +# +#.if !defined(CDEBUG) +##CFLAGS+= -Olimit 2000 +#.endif + +HEADERS+= machine.h port-protos.h + +SUBSRCS= port.c diff --git a/src/backend/port/alpha/machine.h b/src/backend/port/alpha/machine.h new file mode 100644 index 00000000000..3d4e7656609 --- /dev/null +++ b/src/backend/port/alpha/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/alpha/port-protos.h b/src/backend/port/alpha/port-protos.h new file mode 100644 index 00000000000..3bd8d454a5e --- /dev/null +++ b/src/backend/port/alpha/port-protos.h @@ -0,0 +1,39 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * prototypes for OSF/1-specific routines + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ + +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "utils/dynamic_loader.h" + +/* dynloader.c */ + +/* + * Dynamic Loader on Alpha OSF/1.x + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + */ +#define pg_dlopen(f) dlopen(f, RTLD_LAZY) +#define pg_dlsym(h, f) ((func_ptr)dlsym(h, f)) +#define pg_dlclose(h) dlclose(h) +#define pg_dlerror() dlerror() + +/* port.c */ + +extern void init_address_fixup(void); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/alpha/port.c b/src/backend/port/alpha/port.c new file mode 100644 index 00000000000..d7c17b0a5ba --- /dev/null +++ b/src/backend/port/alpha/port.c @@ -0,0 +1,34 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * OSF/1-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/alpha/Attic/port.c,v 1.1.1.1 1996/07/09 06:21:42 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include +#include "c.h" +#include "utils/elog.h" + +void +init_address_fixup() +{ +#ifdef NOFIXADE + int buffer[] = { SSIN_UACPROC, UAC_SIGBUS }; +#endif /* NOFIXADE */ +#ifdef NOPRINTADE + int buffer[] = { SSIN_UACPROC, UAC_NOPRINT }; +#endif /* NOPRINTADE */ + + if (setsysinfo(SSI_NVPAIRS, buffer, 1, (caddr_t) NULL, + (unsigned long) NULL) < 0) { + elog(NOTICE, "setsysinfo failed: %d\n", errno); + } +} diff --git a/src/backend/port/bsdi/Makefile.inc b/src/backend/port/bsdi/Makefile.inc new file mode 100644 index 00000000000..96eb2c5fc43 --- /dev/null +++ b/src/backend/port/bsdi/Makefile.inc @@ -0,0 +1,15 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/bsdi +# +# NOTES +# The BSD/OS port is included here by courtesy of Kurt Lidl. +# +# (5) 1994, Kurt Lidl, lidl@pix.com +# +#------------------------------------------------------------------------- + +CFLAGS+=-DUSE_POSIX_TIME -DNEED_CBRT +LDADD+= -ldld -lipc +SUBSRCS= dynloader.c diff --git a/src/backend/port/bsdi/dynloader.c b/src/backend/port/bsdi/dynloader.c new file mode 100644 index 00000000000..c167a01897e --- /dev/null +++ b/src/backend/port/bsdi/dynloader.c @@ -0,0 +1,93 @@ +/*------------------------------------------------------------------------- + * + * dynloader.c-- + * Dynamic Loader for Postgres for Linux, generated from those for + * Ultrix. + * + * You need to install the dld library on your Linux system! + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * /usr/local/devel/pglite/cvs/src/backend/port/linux/dynloader.c,v 1.1.1.1 1994/11/07 05:19:37 andrew Exp + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "postgres.h" +#include "port-protos.h" +#include "utils/elog.h" +#include "fmgr.h" + +extern char pg_pathname[]; + +void * +pg_dlopen(char *filename) +{ + static int dl_initialized= 0; + + /* + * initializes the dynamic loader with the executable's pathname. + * (only needs to do this the first time pg_dlopen is called.) + */ + if (!dl_initialized) { + if (dld_init (dld_find_executable (pg_pathname))) { + return NULL; + } + /* + * if there are undefined symbols, we want dl to search from the + * following libraries also. + */ + dl_initialized= 1; + } + + /* + * link the file, then check for undefined symbols! + */ + if (dld_link(filename)) { + return NULL; + } + + /* + * If undefined symbols: try to link with the C and math libraries! + * This could be smarter, if the dynamic linker was able to handle + * shared libs! + */ + if(dld_undefined_sym_count > 0) { + if (dld_link("/usr/lib/libc.a")) { + elog(NOTICE, "dld: Cannot link C library!"); + return NULL; + } + if(dld_undefined_sym_count > 0) { + if (dld_link("/usr/lib/libm.a")) { + elog(NOTICE, "dld: Cannot link math library!"); + return NULL; + } + if(dld_undefined_sym_count > 0) { + int count = dld_undefined_sym_count; + char **list= dld_list_undefined_sym(); + + /* list the undefined symbols, if any */ + elog(NOTICE, "dld: Undefined:"); + do { + elog(NOTICE, " %s", *list); + list++; + count--; + } while(count > 0); + + dld_unlink_by_file(filename, 1); + return NULL; + } + } + } + + return (void *) strdup(filename); +} + +char * +pg_dlerror() +{ + return dld_strerror(dld_errno); +} diff --git a/src/backend/port/bsdi/machine.h b/src/backend/port/bsdi/machine.h new file mode 100644 index 00000000000..d53defbfd4c --- /dev/null +++ b/src/backend/port/bsdi/machine.h @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * machine.h,v 1.1.1.1 1994/11/07 05:19:37 andrew Exp + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif diff --git a/src/backend/port/bsdi/port-protos.h b/src/backend/port/bsdi/port-protos.h new file mode 100644 index 00000000000..6583571d356 --- /dev/null +++ b/src/backend/port/bsdi/port-protos.h @@ -0,0 +1,33 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for SunOS 4 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * port-protos.h,v 1.2 1995/05/25 22:51:03 andrew Exp + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" + +/* dynloader.c */ + +#ifndef LINUX_ELF +#define pg_dlsym(handle, funcname) ((func_ptr) dld_get_func((funcname))) +#define pg_dlclose(handle) ({ dld_unlink_by_file(handle, 1); free(handle); }) +#else +#define pg_dlopen(f) dlopen(f, 1) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror +#endif + +/* port.c */ + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/bsdi/port.c b/src/backend/port/bsdi/port.c new file mode 100644 index 00000000000..8819b1a6481 --- /dev/null +++ b/src/backend/port/bsdi/port.c @@ -0,0 +1,13 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * Linux-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * /usr/local/devel/pglite/cvs/src/backend/port/linux/port.c,v 1.1.1.1 1994/11/07 05:19:38 andrew Exp + * + *------------------------------------------------------------------------- + */ diff --git a/src/backend/port/hpux/Makefile.inc b/src/backend/port/hpux/Makefile.inc new file mode 100644 index 00000000000..4ff60d20195 --- /dev/null +++ b/src/backend/port/hpux/Makefile.inc @@ -0,0 +1,68 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/hpux (HP-UX specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/hpux/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ +# +#------------------------------------------------------------------------- + +# +# HP-UX needs: +# -W l,-E export symbols for linking with the shared libraries +# dynamic loader +# -W p,-H400000 expand cpp #define table size so the Nodes files don't +# break it +# +# -W p,-H400000 +ifeq ($(CC), cc) +CFLAGS+= -W l,-E +LDFLAGS+= -W l,-E +LDADD+= -ll -ldld +else +ifeq ($(CC), gcc) +LDADD+= -ll /usr/lib/libdld.sl +endif +endif + +CFLAGS+= -DUSE_POSIX_TIME + +# +# cbrt(3m) and rint(3m) are missing from 8.07. +# cbrt(3m) and rint(3m) are broken in 9.01. +# cbrt(3m) seems to be missing on 9.00 even though it is documented. +# +CFLAGS+= -DNEED_RINT -DNEED_CBRT + +# +# The #pragma trick required on 8.07 no longer works -- the #pragma +# is thoroughly broken. However, the +u flag has been extended to +# handle alignment requirement arguments (defaulting to 2) for things +# other than struct references, so the #pragma is no longer needed. +# + +# +# (1) The YACC grammar is too big.. +# (HP-UX 9.0x, x<2, added basic block limits for +O2; 9.0x, x>=2, changed +# the syntax to something else.) +# +# (2) The 9.00 optimizer chokes on some of our source. +# +#.if (${HPUX_MAJOR} == "09") +#. if !defined(CDEBUG) +#. if (${HPUX_MINOR} == "00" || ${HPUX_MINOR} == "01") +#CFLAGS+= +Obb600 +#CFLAGS+= -DWEAK_C_OPTIMIZER +#. else +#CFLAGS+= +Onolimit +#. endif +#. endif +#.endif + +HEADERS+= fixade.h machine.h port-protos.h + +SUBSRCS+= dynloader.c port.c tas.s diff --git a/src/backend/port/hpux/dynloader.c b/src/backend/port/hpux/dynloader.c new file mode 100644 index 00000000000..deea2e1dc29 --- /dev/null +++ b/src/backend/port/hpux/dynloader.c @@ -0,0 +1,57 @@ +/*------------------------------------------------------------------------- + * + * dynloader.c-- + * dynamic loader for HP-UX using the shared library mechanism + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/hpux/Attic/dynloader.c,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ + * + * NOTES + * all functions are defined here -- it's impossible to trace the + * shl_* routines from the bundled HP-UX debugger. + * + *------------------------------------------------------------------------- + */ +/* System includes */ +#include +#include +#include +#include "c.h" +#include "fmgr.h" +#include "utils/dynamic_loader.h" +#include "port-protos.h" + +void * +pg_dlopen(char *filename) +{ + shl_t handle = shl_load(filename, BIND_DEFERRED, 0); + + return((void *) handle); +} + +func_ptr +pg_dlsym(void *handle, char *funcname) +{ + func_ptr f; + + if (shl_findsym((shl_t *) &handle, funcname, TYPE_PROCEDURE, &f) == -1) { + f = (func_ptr) NULL; + } + return(f); +} + +void +pg_dlclose(void *handle) +{ + shl_unload((shl_t) handle); +} + +char * +pg_dlerror() +{ + static char errmsg[]= "shl_load failed"; + return errmsg; +} diff --git a/src/backend/port/hpux/fixade.h b/src/backend/port/hpux/fixade.h new file mode 100644 index 00000000000..62324eb05d7 --- /dev/null +++ b/src/backend/port/hpux/fixade.h @@ -0,0 +1,63 @@ +/*------------------------------------------------------------------------- + * + * fixade.h-- + * compiler tricks to make things work while POSTGRES does non-native + * dereferences on PA-RISC. + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: fixade.h,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ + * + * NOTES + * This must be included in EVERY source file. + * + *------------------------------------------------------------------------- + */ +#ifndef FIXADE_H +#define FIXADE_H + +#if !defined(NOFIXADE) + +#if defined(HP_S500_ALIGN) +/* ---------------- + * This cheesy hack turns ON unaligned-access fixup on H-P PA-RISC; + * the resulting object files contain code that explicitly handles + * realignment on reference, so it slows memory access down by a + * considerable factor. It must be used in conjunction with the +u + * flag to cc. The #pragma is included in c.h to be safe since EVERY + * source file that performs unaligned access must contain the #pragma. + * ---------------- + */ +#pragma HP_ALIGN HPUX_NATURAL_S500 + +#if defined(BROKEN_STRUCT_INIT) +/* ---------------- + * This is so bogus. The HP-UX 9.01 compiler has totally broken + * struct initialization code. It actually length-checks ALL + * array initializations within structs against the FIRST one that + * it sees (when #pragma HP_ALIGN HPUX_NATURAL_S500 is defined).. + * we have to throw in this unused structure before struct varlena + * is defined. + * + * XXX guess you don't need the #pragma anymore after all :-) + * since no one looks at this except me i think i'll just leave + * this here for now.. + * ---------------- + */ +struct HP_WAY_BOGUS { + char hpwb_bogus[8192]; +}; +struct HP_TOO_BOGUS { + int hptb_bogus[8192]; +}; +#endif /* BROKEN_STRUCT_INIT */ +#endif /* HP_S500_ALIGN */ + +#if defined(WEAK_C_OPTIMIZER) +#pragma OPT_LEVEL 1 +#endif /* WEAK_C_OPTIMIZER */ + +#endif /* !NOFIXADE */ + +#endif /* FIXADE_H */ diff --git a/src/backend/port/hpux/machine.h b/src/backend/port/hpux/machine.h new file mode 100644 index 00000000000..b178a621726 --- /dev/null +++ b/src/backend/port/hpux/machine.h @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif diff --git a/src/backend/port/hpux/port-protos.h b/src/backend/port/hpux/port-protos.h new file mode 100644 index 00000000000..16206bb2509 --- /dev/null +++ b/src/backend/port/hpux/port-protos.h @@ -0,0 +1,34 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for HP-UX + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include /* for struct rusage */ +#include /* for shl_t */ + +#include "utils/dynamic_loader.h" + +/* dynloader.c */ + +/* pg_dl{open,close,sym} prototypes are in utils/dynamic_loader.h */ + +/* port.c */ + +extern int init_address_fixup(void); +extern double rint(double x); +extern double cbrt(double x); +extern long random(void); +extern void srandom(int seed); +extern int getrusage(int who, struct rusage *ru); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/hpux/port.c b/src/backend/port/hpux/port.c new file mode 100644 index 00000000000..eccf3dc5c12 --- /dev/null +++ b/src/backend/port/hpux/port.c @@ -0,0 +1,47 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * port-specific routines for HP-UX + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/hpux/Attic/port.c,v 1.1.1.1 1996/07/09 06:21:43 scrappy Exp $ + * + * NOTES + * For the most part, this file gets around some non-POSIX calls + * in POSTGRES. + * + *------------------------------------------------------------------------- + */ +#include /* for rand()/srand() prototypes */ +#include /* for pow() prototype */ +#include /* for syscall #defines */ + +#include "c.h" + +void +init_address_fixup() +{ + /* + * On PA-RISC, unaligned access fixup is handled by the compiler, + * not by the kernel. + */ +} + +long +random() +{ + return(lrand48()); +} + +void srandom(int seed) +{ + srand48((long int) seed); +} + +getrusage(int who, struct rusage *ru) +{ + return(syscall(SYS_GETRUSAGE, who, ru)); +} diff --git a/src/backend/port/hpux/tas.c.template b/src/backend/port/hpux/tas.c.template new file mode 100644 index 00000000000..3ab37eb966e --- /dev/null +++ b/src/backend/port/hpux/tas.c.template @@ -0,0 +1,36 @@ +/* + * To generate tas.s using this template: + * 1. cc +O2 -S -c tas.c + * 2. edit tas.s: + * - replace the LDW with LDCWX + * For details about the LDCWX instruction, see the "Precision + * Architecture and Instruction Reference Manual" (09740-90014 of June + * 1987), p. 5-38. + */ + +int +tas(lock) + int *lock; /* LDCWX is a word instruction */ +{ + /* + * LDCWX requires that we align the "semaphore" to a 16-byte + * boundary. The actual datum is a single word (4 bytes). + */ + lock = ((long) lock + 15) & ~15; + + /* + * The LDCWX instruction atomically clears the target word and + * returns the previous value. Hence, if the instruction returns + * 0, someone else has already acquired the lock before we tested + * it (i.e., we have failed). + * + * Notice that this means that we actually clear the word to set + * the lock and set the word to clear the lock. This is the + * opposite behavior from the SPARC LDSTUB instruction. For some + * reason everything that H-P does is rather baroque... + */ + if (*lock) { /* this generates the LDW */ + return(0); /* success */ + } + return(1); /* failure */ +} diff --git a/src/backend/port/hpux/tas.s b/src/backend/port/hpux/tas.s new file mode 100644 index 00000000000..d978a7cb030 --- /dev/null +++ b/src/backend/port/hpux/tas.s @@ -0,0 +1,28 @@ + + .SPACE $TEXT$,SORT=8 + .SUBSPA $CODE$,QUAD=0,ALIGN=4,ACCESS=44,CODE_ONLY,SORT=24 +tas + .PROC + .CALLINFO CALLER,FRAME=0,ENTRY_SR=3 + .ENTRY + LDO 15(%r26),%r31 ;offset 0x0 + DEPI 0,31,4,%r31 ;offset 0x4 + LDCWX 0(0,%r31),%r23 ;offset 0x8 + COMICLR,= 0,%r23,%r0 ;offset 0xc + DEP,TR %r0,31,32,%r28 ;offset 0x10 +$00000001 + LDI 1,%r28 ;offset 0x14 +$L0 + .EXIT + BV,N %r0(%r2) ;offset 0x18 + .PROCEND ;in=26;out=28; + + + .SPACE $TEXT$ + .SUBSPA $CODE$ + .SPACE $PRIVATE$,SORT=16 + .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31,SORT=16 + .SPACE $TEXT$ + .SUBSPA $CODE$ + .EXPORT tas,ENTRY,PRIV_LEV=3,ARGW0=GR,RTNVAL=GR + .END diff --git a/src/backend/port/irix5/Makefile.inc b/src/backend/port/irix5/Makefile.inc new file mode 100644 index 00000000000..40527b3b1fc --- /dev/null +++ b/src/backend/port/irix5/Makefile.inc @@ -0,0 +1,20 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/irix5 (IRIX 5 specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# /usr/local/devel/pglite/cvs/src/backend/port/sparc_solaris/Makefile.inc,v 1.3 1995/03/21 06:51:21 andrew Exp +# +#------------------------------------------------------------------------- + +CFLAGS+= -DUSE_POSIX_TIME -DNEED_ISINF -DNO_EMPTY_STMTS + +LDADD+= -ll + +SUBSRCS+= port.c + +HEADERS+= machine.h port-protos.h diff --git a/src/backend/port/irix5/README b/src/backend/port/irix5/README new file mode 100644 index 00000000000..2c66463018c --- /dev/null +++ b/src/backend/port/irix5/README @@ -0,0 +1,2 @@ +The IRIX 5 port was contributed by + Paul 'Shag' Walmsley diff --git a/src/backend/port/irix5/machine.h b/src/backend/port/irix5/machine.h new file mode 100644 index 00000000000..fd1f22c8a93 --- /dev/null +++ b/src/backend/port/irix5/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * machine.h,v 1.1.1.1 1994/11/07 05:19:38 andrew Exp + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/irix5/port-protos.h b/src/backend/port/irix5/port-protos.h new file mode 100644 index 00000000000..7313e49c138 --- /dev/null +++ b/src/backend/port/irix5/port-protos.h @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for Irix 5 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * port-protos.h,v 1.2 1995/03/17 06:40:18 andrew Exp + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" + +/* dynloader.c */ +/* + * Dynamic Loader on SunOS 4. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + */ +#define pg_dlopen(f) dlopen(f,1) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +/* port.c */ +extern long random(void); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/irix5/port.c b/src/backend/port/irix5/port.c new file mode 100644 index 00000000000..82303ed7fcb --- /dev/null +++ b/src/backend/port/irix5/port.c @@ -0,0 +1,16 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * Irix5-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * /usr/local/devel/pglite/cvs/src/backend/port/sparc_solaris/port.c,v 1.2 1995/03/17 06:40:19 andrew Exp + * + *------------------------------------------------------------------------- + */ +#include /* for pow() prototype */ + +#include diff --git a/src/backend/port/linux/Makefile.inc b/src/backend/port/linux/Makefile.inc new file mode 100644 index 00000000000..cc35929a260 --- /dev/null +++ b/src/backend/port/linux/Makefile.inc @@ -0,0 +1,36 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/linux (Linux specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/linux/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ +# +# NOTES +# The Linux port is included here by courtesy of Kai Petzke. +# +# (C) 1994, Kai Petzke, wpp@marie.physik.tu-berlin.de +# +#------------------------------------------------------------------------- + +# +# linux has fast linkers and don't need the BIGOBJ stuff. +# +BIGOBJS= false + + +ifdef LINUX_ELF +CC=gcc +LDADD+= -ldl +CFLAGS+= -DLINUX_ELF +else +LDADD+= -ldld +SUBSRCS+= dynloader.c +endif + +HEADERS+= machine.h port-protos.h +CFLAGS+= -DNEED_CBRT + diff --git a/src/backend/port/linux/dynloader.c b/src/backend/port/linux/dynloader.c new file mode 100644 index 00000000000..ebf0625dd73 --- /dev/null +++ b/src/backend/port/linux/dynloader.c @@ -0,0 +1,93 @@ +/*------------------------------------------------------------------------- + * + * dynloader.c-- + * Dynamic Loader for Postgres for Linux, generated from those for + * Ultrix. + * + * You need to install the dld library on your Linux system! + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/linux/Attic/dynloader.c,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include +#include "postgres.h" +#include "port-protos.h" +#include "utils/elog.h" +#include "fmgr.h" + +extern char pg_pathname[]; + +void * +pg_dlopen(char *filename) +{ + static int dl_initialized= 0; + + /* + * initializes the dynamic loader with the executable's pathname. + * (only needs to do this the first time pg_dlopen is called.) + */ + if (!dl_initialized) { + if (dld_init (dld_find_executable (pg_pathname))) { + return NULL; + } + /* + * if there are undefined symbols, we want dl to search from the + * following libraries also. + */ + dl_initialized= 1; + } + + /* + * link the file, then check for undefined symbols! + */ + if (dld_link(filename)) { + return NULL; + } + + /* + * If undefined symbols: try to link with the C and math libraries! + * This could be smarter, if the dynamic linker was able to handle + * shared libs! + */ + if(dld_undefined_sym_count > 0) { + if (dld_link("/usr/lib/libc.a")) { + elog(NOTICE, "dld: Cannot link C library!"); + return NULL; + } + if(dld_undefined_sym_count > 0) { + if (dld_link("/usr/lib/libm.a")) { + elog(NOTICE, "dld: Cannot link math library!"); + return NULL; + } + if(dld_undefined_sym_count > 0) { + int count = dld_undefined_sym_count; + char **list= dld_list_undefined_sym(); + + /* list the undefined symbols, if any */ + elog(NOTICE, "dld: Undefined:"); + do { + elog(NOTICE, " %s", *list); + list++; + count--; + } while(count > 0); + + dld_unlink_by_file(filename, 1); + return NULL; + } + } + } + + return (void *) strdup(filename); +} + +char * +pg_dlerror() +{ + return dld_strerror(dld_errno); +} diff --git a/src/backend/port/linux/machine.h b/src/backend/port/linux/machine.h new file mode 100644 index 00000000000..90458810e98 --- /dev/null +++ b/src/backend/port/linux/machine.h @@ -0,0 +1,18 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif diff --git a/src/backend/port/linux/port-protos.h b/src/backend/port/linux/port-protos.h new file mode 100644 index 00000000000..f80cd62d424 --- /dev/null +++ b/src/backend/port/linux/port-protos.h @@ -0,0 +1,37 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for SunOS 4 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" +#ifdef LINUX_ELF +#include "dlfcn.h" +#endif + +/* dynloader.c */ + +#ifndef LINUX_ELF +#define pg_dlsym(handle, funcname) ((func_ptr) dld_get_func((funcname))) +#define pg_dlclose(handle) ({ dld_unlink_by_file(handle, 1); free(handle); }) +#else +/* #define pg_dlopen(f) dlopen(f, 1) */ +#define pg_dlopen(f) dlopen(f, 2) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror +#endif + +/* port.c */ + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/linux/port.c b/src/backend/port/linux/port.c new file mode 100644 index 00000000000..e4c5edfb9e5 --- /dev/null +++ b/src/backend/port/linux/port.c @@ -0,0 +1,13 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * Linux-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/linux/Attic/port.c,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ diff --git a/src/backend/port/sparc/Makefile.inc b/src/backend/port/sparc/Makefile.inc new file mode 100644 index 00000000000..3b102fa3249 --- /dev/null +++ b/src/backend/port/sparc/Makefile.inc @@ -0,0 +1,23 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/sparc (SPARC/SunOS 4 specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/sparc/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ +# +#------------------------------------------------------------------------- + +CFLAGS+= -DUSE_POSIX_TIME + +LDADD+= -lln -ldl + +# +# SunOS 4 strtol is broken -- doesn't report overflow using errno. +# +SUBSRCS= strtol.c + +HEADERS+= float.h machine.h port-protos.h diff --git a/src/backend/port/sparc/float.h b/src/backend/port/sparc/float.h new file mode 100644 index 00000000000..13e284571fe --- /dev/null +++ b/src/backend/port/sparc/float.h @@ -0,0 +1,30 @@ +/*------------------------------------------------------------------------- + * + * float.h-- + * definitions for ANSI floating point + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: float.h,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + * NOTES + * These come straight out of ANSI X3.159-1989 (p.18) and + * would be unnecessary if SunOS 4 were ANSI-compliant. + * + * This is only a partial listing because I'm lazy to type + * the whole thing in. + * + *------------------------------------------------------------------------- + */ +#ifndef FLOAT_H +#define FLOAT_H + +#define FLT_DIG 6 +#define FLT_MIN ((float) 1.17549435e-38) +#define FLT_MAX ((float) 3.40282347e+38) +#define DBL_DIG 15 +#define DBL_MIN 2.2250738585072014e-308 +#define DBL_MAX 1.7976931348623157e+308 + +#endif /* FLOAT_H */ diff --git a/src/backend/port/sparc/machine.h b/src/backend/port/sparc/machine.h new file mode 100644 index 00000000000..d34b65ff333 --- /dev/null +++ b/src/backend/port/sparc/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/sparc/port-protos.h b/src/backend/port/sparc/port-protos.h new file mode 100644 index 00000000000..2765c9307a9 --- /dev/null +++ b/src/backend/port/sparc/port-protos.h @@ -0,0 +1,34 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for SunOS 4 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:44 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" + +/* dynloader.c */ +/* + * Dynamic Loader on SunOS 4. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + */ +#define pg_dlopen(f) dlopen(f, 1) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/sparc/strtol.c b/src/backend/port/sparc/strtol.c new file mode 100644 index 00000000000..5850848e66a --- /dev/null +++ b/src/backend/port/sparc/strtol.c @@ -0,0 +1,130 @@ +/*- + * Copyright (c) 1990 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +static char sccsid[] = "@(#)strtol.c 5.4 (Berkeley) 2/23/91"; +#endif /* LIBC_SCCS and not lint */ + +#include +#include +#include +#include + +#define const + +/* + * Convert a string to a long integer. + * + * Ignores `locale' stuff. Assumes that the upper and lower case + * alphabets and digits are each contiguous. + */ +long +strtol(nptr, endptr, base) + const char *nptr; + char **endptr; + register int base; +{ + register const char *s = nptr; + register unsigned long acc; + register int c; + register unsigned long cutoff; + register int neg = 0, any, cutlim; + + /* + * Skip white space and pick up leading +/- sign if any. + * If base is 0, allow 0x for hex and 0 for octal, else + * assume decimal; if base is already 16, allow 0x. + */ + do { + c = *s++; + } while (isspace(c)); + if (c == '-') { + neg = 1; + c = *s++; + } else if (c == '+') + c = *s++; + if ((base == 0 || base == 16) && + c == '0' && (*s == 'x' || *s == 'X')) { + c = s[1]; + s += 2; + base = 16; + } + if (base == 0) + base = c == '0' ? 8 : 10; + + /* + * Compute the cutoff value between legal numbers and illegal + * numbers. That is the largest legal value, divided by the + * base. An input number that is greater than this value, if + * followed by a legal input character, is too big. One that + * is equal to this value may be valid or not; the limit + * between valid and invalid numbers is then based on the last + * digit. For instance, if the range for longs is + * [-2147483648..2147483647] and the input base is 10, + * cutoff will be set to 214748364 and cutlim to either + * 7 (neg==0) or 8 (neg==1), meaning that if we have accumulated + * a value > 214748364, or equal but the next digit is > 7 (or 8), + * the number is too big, and we will return a range error. + * + * Set any if any `digits' consumed; make it negative to indicate + * overflow. + */ + cutoff = neg ? -(unsigned long)LONG_MIN : LONG_MAX; + cutlim = cutoff % (unsigned long)base; + cutoff /= (unsigned long)base; + for (acc = 0, any = 0;; c = *s++) { + if (isdigit(c)) + c -= '0'; + else if (isalpha(c)) + c -= isupper(c) ? 'A' - 10 : 'a' - 10; + else + break; + if (c >= base) + break; + if (any < 0 || acc > cutoff || acc == cutoff && c > cutlim) + any = -1; + else { + any = 1; + acc *= base; + acc += c; + } + } + if (any < 0) { + acc = neg ? LONG_MIN : LONG_MAX; + errno = ERANGE; + } else if (neg) + acc = -acc; + if (endptr != 0) + *endptr = any ? s - 1 : (char *)nptr; + return (acc); +} diff --git a/src/backend/port/sparc_solaris/Makefile.inc b/src/backend/port/sparc_solaris/Makefile.inc new file mode 100644 index 00000000000..ee4a8f82f02 --- /dev/null +++ b/src/backend/port/sparc_solaris/Makefile.inc @@ -0,0 +1,20 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/sparc_solaris (SPARC/Solaris 2.x specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/sparc_solaris/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ +# +#------------------------------------------------------------------------- + +CFLAGS+= -DUSE_POSIX_TIME -DNEED_ISINF -DNEED_RUSAGE -DNO_EMPTY_STMTS + +LDADD+= -ll -ldl + +SUBSRCS+= port.c tas.s + +HEADERS+= machine.h port-protos.h rusagestub.h diff --git a/src/backend/port/sparc_solaris/machine.h b/src/backend/port/sparc_solaris/machine.h new file mode 100644 index 00000000000..35fe7afe61e --- /dev/null +++ b/src/backend/port/sparc_solaris/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/sparc_solaris/port-protos.h b/src/backend/port/sparc_solaris/port-protos.h new file mode 100644 index 00000000000..777e66310f4 --- /dev/null +++ b/src/backend/port/sparc_solaris/port-protos.h @@ -0,0 +1,38 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * port-specific prototypes for SunOS 4 + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PROTOS_H +#define PORT_PROTOS_H + +#include +#include "fmgr.h" /* for func_ptr */ +#include "utils/dynamic_loader.h" + +/* dynloader.c */ +/* + * Dynamic Loader on SunOS 4. + * + * this dynamic loader uses the system dynamic loading interface for shared + * libraries (ie. dlopen/dlsym/dlclose). The user must specify a shared + * library as the file to be dynamically loaded. + * + */ +#define pg_dlopen(f) dlopen(f,1) +#define pg_dlsym dlsym +#define pg_dlclose dlclose +#define pg_dlerror dlerror + +/* port.c */ +extern long random(void); +extern void srandom(int seed); + +#endif /* PORT_PROTOS_H */ diff --git a/src/backend/port/sparc_solaris/port.c b/src/backend/port/sparc_solaris/port.c new file mode 100644 index 00000000000..f3c6b8a7943 --- /dev/null +++ b/src/backend/port/sparc_solaris/port.c @@ -0,0 +1,66 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * SunOS5-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/sparc_solaris/Attic/port.c,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include /* for pow() prototype */ + +#include +#include "rusagestub.h" + +long +random() +{ + return(lrand48()); +} + +void +srandom(int seed) +{ + srand48((long int) seed); +} + +int +getrusage(int who, struct rusage *rusage) +{ + struct tms tms; + register int tick_rate = CLK_TCK; /* ticks per second */ + clock_t u, s; + + if (rusage == (struct rusage *) NULL) { + errno = EFAULT; + return(-1); + } + if (times(&tms) < 0) { + /* errno set by times */ + return(-1); + } + switch (who) { + case RUSAGE_SELF: + u = tms.tms_utime; + s = tms.tms_stime; + break; + case RUSAGE_CHILDREN: + u = tms.tms_cutime; + s = tms.tms_cstime; + break; + default: + errno = EINVAL; + return(-1); + } +#define TICK_TO_SEC(T, RATE) ((T)/(RATE)) +#define TICK_TO_USEC(T,RATE) (((T)%(RATE)*1000000)/RATE) + rusage->ru_utime.tv_sec = TICK_TO_SEC(u, tick_rate); + rusage->ru_utime.tv_usec = TICK_TO_USEC(u, tick_rate); + rusage->ru_stime.tv_sec = TICK_TO_SEC(s, tick_rate); + rusage->ru_stime.tv_usec = TICK_TO_USEC(u, tick_rate); + return(0); +} diff --git a/src/backend/port/sparc_solaris/rusagestub.h b/src/backend/port/sparc_solaris/rusagestub.h new file mode 100644 index 00000000000..5e413bd0d9a --- /dev/null +++ b/src/backend/port/sparc_solaris/rusagestub.h @@ -0,0 +1,30 @@ +/*------------------------------------------------------------------------- + * + * rusagestub.h-- + * Stubs for getrusage(3). + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: rusagestub.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef RUSAGESTUB_H +#define RUSAGESTUB_H + +#include /* for struct timeval */ +#include /* for struct tms */ +#include /* for CLK_TCK */ + +#define RUSAGE_SELF 0 +#define RUSAGE_CHILDREN -1 + +struct rusage { + struct timeval ru_utime; /* user time used */ + struct timeval ru_stime; /* system time used */ +}; + +extern int getrusage(int who, struct rusage *rusage); + +#endif /* RUSAGESTUB_H */ diff --git a/src/backend/port/sparc_solaris/tas.s b/src/backend/port/sparc_solaris/tas.s new file mode 100644 index 00000000000..e09c2d4ad3c --- /dev/null +++ b/src/backend/port/sparc_solaris/tas.s @@ -0,0 +1,50 @@ + !! + !! $Header: /cvsroot/pgsql/src/backend/port/sparc_solaris/Attic/tas.s,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + !! + !! this would be a piece of inlined assembler but it appears + !! to be easier to just write the assembler than to try to + !! figure out how to make sure that in/out registers are kept + !! straight in the asm's. + !! + .file "tas.c" +.section ".text" + .align 4 + .global tas + .type tas,#function + .proc 04 +tas: + !! + !! this is a leaf procedure - no need to save windows and + !! diddle the CWP. + !! + !#PROLOGUE# 0 + !#PROLOGUE# 1 + + !! + !! write 0xFF into the lock address, saving the old value in %o0. + !! this is an atomic action, even on multiprocessors. + !! + ldstub [%o0],%o0 + + !! + !! if it was already set when we set it, somebody else already + !! owned the lock -- return 1. + !! + cmp %o0,0 + bne .LL2 + mov 1,%o0 + + !! + !! otherwise, it was clear and we now own the lock -- return 0. + !! + mov 0,%o0 +.LL2: + !! + !! this is a leaf procedure - no need to restore windows and + !! diddle the CWP. + !! + retl + nop +.LLfe1: + .size tas,.LLfe1-tas + .ident "GCC: (GNU) 2.5.8" diff --git a/src/backend/port/ultrix4/Makefile.inc b/src/backend/port/ultrix4/Makefile.inc new file mode 100644 index 00000000000..fca568f5d5d --- /dev/null +++ b/src/backend/port/ultrix4/Makefile.inc @@ -0,0 +1,27 @@ +#------------------------------------------------------------------------- +# +# Makefile.inc-- +# Makefile for port/ultrix (Ultrix4.x specific stuff) +# +# Copyright (c) 1994, Regents of the University of California +# +# +# IDENTIFICATION +# $Header: /cvsroot/pgsql/src/backend/port/ultrix4/Attic/Makefile.inc,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ +# +#------------------------------------------------------------------------- + +CFLAGS+= -DNEED_ISINF -DUSE_POSIX_TIME + +LDADD+= -ldl -lln + +# +# The YACC grammar is too big.. +# +#.if !defined(CDEBUG) +#CFLAGS+= -Olimit 2000 +#.endif + +HEADERS+= dl.h machine.h port-protos.h + +SUBSRCS+= dynloader.c port.c strdup.c diff --git a/src/backend/port/ultrix4/dl.h b/src/backend/port/ultrix4/dl.h new file mode 100644 index 00000000000..bca3602a21c --- /dev/null +++ b/src/backend/port/ultrix4/dl.h @@ -0,0 +1,117 @@ +/*------------------------------------------------------------------------- + * + * dl.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: dl.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +/* + * Ultrix 4.x Dynamic Loader Library Version 1.0 + * + * dl.h-- + * header file for the Dynamic Loader Library + * + * + * Copyright (c) 1993 Andrew K. Yu, University of California at Berkeley + * All rights reserved. + * + * Permission to use, copy, modify, and distribute this software and its + * documentation for educational, research, and non-profit purposes and + * without fee is hereby granted, provided that the above copyright + * notice appear in all copies and that both that copyright notice and + * this permission notice appear in supporting documentation. Permission + * to incorporate this software into commercial products can be obtained + * from the author. The University of California and the author make + * no representations about the suitability of this software for any + * purpose. It is provided "as is" without express or implied warranty. + * + */ +#ifndef _DL_HEADER_ +#define _DL_HEADER_ + +#include +#include +#include +#include +#include + + +typedef long CoreAddr; + + +typedef struct ScnInfo { + CoreAddr addr; /* starting address of the section */ + SCNHDR hdr; /* section header */ + RELOC *relocEntries; /* relocation entries */ +} ScnInfo; + +typedef enum { + DL_NEEDRELOC, /* still need relocation */ + DL_RELOCATED, /* no relocation necessary */ + DL_INPROG /* relocation in progress */ +} dlRStatus; + +typedef struct JmpTbl { + char *block; /* the jump table memory block */ + struct JmpTbl *next; /* next block */ +} JmpTbl; + +typedef struct dlFile { + char *filename; /* file name of the object file */ + + int textSize; /* used by mprotect */ + CoreAddr textAddress; /* start addr of text section */ + long textVaddr; /* vaddr of text section in obj file */ + CoreAddr rdataAddress; /* start addr of rdata section */ + long rdataVaddr; /* vaddr of text section in obj file */ + CoreAddr dataAddress; /* start addr of data section */ + long dataVaddr; /* vaddr of text section in obj file */ + CoreAddr bssAddress; /* start addr of bss section */ + long bssVaddr; /* vaddr of text section in obj file */ + + int nsect; /* number of sections */ + ScnInfo *sect; /* details of each section (array) */ + + int issExtMax; /* size of string space */ + char *extss; /* extern sym string space (in core) */ + int iextMax; /* maximum number of Symbols */ + pEXTR extsyms; /* extern syms */ + + dlRStatus relocStatus; /* what relocation needed? */ + int needReloc; + + JmpTbl *jmptable; /* the jump table for R_JMPADDR */ + + struct dlFile *next; /* next member of the archive */ +} dlFile; + +typedef struct dlSymbol { + char *name; /* name of the symbol */ + long addr; /* address of the symbol */ + dlFile *objFile; /* from which file */ +} dlSymbol; + +/* + * prototypes for the dl* interface + */ +extern void *dl_open(/* char *filename, int mode */); +extern void *dl_sym(/* void *handle, char *name */); +extern void dl_close(/* void *handle */); +extern char *dl_error(/* void */); + +#define DL_LAZY 0 /* lazy resolution */ +#define DL_NOW 1 /* immediate resolution */ + +/* + * Miscellaneous utility routines: + */ +extern char **dl_undefinedSymbols(/* int *count */); +extern void dl_printAllSymbols(/* void *handle */); +extern void dl_setLibraries(/* char *libs */); + +#endif _DL_HEADER_ diff --git a/src/backend/port/ultrix4/dynloader.c b/src/backend/port/ultrix4/dynloader.c new file mode 100644 index 00000000000..887dff44abd --- /dev/null +++ b/src/backend/port/ultrix4/dynloader.c @@ -0,0 +1,68 @@ +/*------------------------------------------------------------------------- + * + * dynloader.c-- + * This dynamic loader uses Andrew Yu's libdl-1.0 package for Ultrix 4.x. + * (Note that pg_dlsym and pg_dlclose are actually macros defined in + * "port-protos.h".) + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/ultrix4/Attic/dynloader.c,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include "dl.h" +#include "c.h" +#include "fmgr.h" +#include "port-protos.h" +#include "utils/elog.h" + +extern char pg_pathname[]; + +void * +pg_dlopen(char *filename) +{ + static int dl_initialized= 0; + void *handle; + + /* + * initializes the dynamic loader with the executable's pathname. + * (only needs to do this the first time pg_dlopen is called.) + */ + if (!dl_initialized) { + if (!dl_init(pg_pathname)) { + return NULL; + } + /* + * if there are undefined symbols, we want dl to search from the + * following libraries also. + */ + dl_setLibraries("/usr/lib/libm_G0.a:/usr/lib/libc_G0.a"); + dl_initialized= 1; + } + + /* + * open the file. We do the symbol resolution right away so that we + * will know if there are undefined symbols. (This is in fact the + * same semantics as "ld -A". ie. you cannot have undefined symbols. + */ + if ((handle=dl_open(filename, DL_NOW))==NULL) { + int count; + char **list= dl_undefinedSymbols(&count); + + /* list the undefined symbols, if any */ + if(count) { + elog(NOTICE, "dl: Undefined:"); + while(*list) { + elog(NOTICE, " %s", *list); + list++; + } + } + } + + return (void *)handle; +} + diff --git a/src/backend/port/ultrix4/machine.h b/src/backend/port/ultrix4/machine.h new file mode 100644 index 00000000000..35fe7afe61e --- /dev/null +++ b/src/backend/port/ultrix4/machine.h @@ -0,0 +1,19 @@ +/*------------------------------------------------------------------------- + * + * machine.h-- + * + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: machine.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef MACHINE_H +#define MACHINE_H + +#define BLCKSZ 8192 + +#endif + diff --git a/src/backend/port/ultrix4/port-protos.h b/src/backend/port/ultrix4/port-protos.h new file mode 100644 index 00000000000..ed055dd19c0 --- /dev/null +++ b/src/backend/port/ultrix4/port-protos.h @@ -0,0 +1,36 @@ +/*------------------------------------------------------------------------- + * + * port-protos.h-- + * prototypes for Ultrix-specific routines + * + * + * Copyright (c) 1994, Regents of the University of California + * + * $Id: port-protos.h,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#ifndef PORT_PORTOS_H +#define PORT_PORTOS_H + +#include "utils/dynamic_loader.h" +#include "dl.h" + +/* dynloader.c */ +/* + * New dynamic loader. + * + * This dynamic loader uses Andrew Yu's libdl-1.0 package for Ultrix 4.x. + * (Note that pg_dlsym and pg_dlclose are actually macros defined in + * "port-protos.h".) + */ + +#define pg_dlsym(h, f) ((func_ptr)dl_sym(h, f)) +#define pg_dlclose(h) dl_close(h) +#define pg_dlerror() dl_error() + +/* port.c */ + +extern void init_address_fixup(void); + +#endif /* PORT_PORTOS_H */ diff --git a/src/backend/port/ultrix4/port.c b/src/backend/port/ultrix4/port.c new file mode 100644 index 00000000000..6ece3210ec1 --- /dev/null +++ b/src/backend/port/ultrix4/port.c @@ -0,0 +1,25 @@ +/*------------------------------------------------------------------------- + * + * port.c-- + * Ultrix-specific routines + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/ultrix4/Attic/port.c,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include +#include + +#include "c.h" + +void +init_address_fixup() +{ +#ifdef NOFIXADE + syscall(SYS_sysmips, MIPS_FIXADE, 0, NULL, NULL, NULL); +#endif /* NOFIXADE */ +} diff --git a/src/backend/port/ultrix4/strdup.c b/src/backend/port/ultrix4/strdup.c new file mode 100644 index 00000000000..f140711226e --- /dev/null +++ b/src/backend/port/ultrix4/strdup.c @@ -0,0 +1,23 @@ +/*------------------------------------------------------------------------- + * + * strdup.c-- + * copies a null-terminated string. + * + * Copyright (c) 1994, Regents of the University of California + * + * + * IDENTIFICATION + * $Header: /cvsroot/pgsql/src/backend/port/ultrix4/Attic/strdup.c,v 1.1.1.1 1996/07/09 06:21:45 scrappy Exp $ + * + *------------------------------------------------------------------------- + */ +#include + +char * +strdup(char *string) +{ + char *nstr; + + nstr = strcpy((char *)palloc(strlen(string)+1), string); + return nstr; +} diff --git a/src/backend/port/win32/machine.h b/src/backend/port/win32/machine.h new file mode 100644 index 00000000000..43e40574966 --- /dev/null +++ b/src/backend/port/win32/machine.h @@ -0,0 +1,2 @@ +#define BLCKSZ 8192 +#define NOFILE 100 diff --git a/src/backend/port/win32/nt.c b/src/backend/port/win32/nt.c new file mode 100644 index 00000000000..b5940051045 --- /dev/null +++ b/src/backend/port/win32/nt.c @@ -0,0 +1,625 @@ +#include +#include +#include "postgres.h" +#include "storage/ipc.h" + +/* The name of the Postgres 95 ipc file mapping object */ +#define IPC_NAME "PG95_IPC" + +/* The name of the Postgres 95 ipc file mapping object semaphore */ +#define IPC_SEM_NAME "PG95_IPC_SEM" + +/* The maximum length of a shared memory object name */ +#define IPC_MAX_SHMEM_NAME 32 + +/* The maximum number of emulated Unix shared memory segments */ +#define IPC_NMAXSHM 10 + +/* The Maximum number of elements in a semaphore set. Note that this +** is just a guess. +*/ +#define IPC_NMAXSEMGRP 7 + +/* The various states of a semaphore */ +#define SIGNALED 1 +#define UNSIGNALED 0 +#define UNUSED -1 + +/* The security attribute structure necessary for handles to be inhereted */ +SECURITY_ATTRIBUTES sec_attrib = { sizeof (LPSECURITY_ATTRIBUTES), + NULL, TRUE}; + +/* +Postgres95 uses semaphores and shared memory. Both are provided by +Unix and NT, although NT uses a different method for referencing +them. Rather than changing the function calls used by Postgres95 +to use NT system services, we've written code to emulate the Unix +system calls. We deliberately don't do a complete emulation of the +Unix calls, partly because it doesn't appear possible, but also +because only a few options of the Unix calls are actually used by +Postgres95. + +The most noticable difference between the way Unix and NT use semaphores +is that the central entity on Unix is a semaphore set consisting of +potientially many actual semaphores whereas on NT a semaphore handle +represents just one actual semaphore. Furthermore, a Unix semaphore set +is identified by one semaphore id no matter how many elements there +are in the set. Given a Unix semaphore id, the Unix API provides a way +to index into the set to reference a specific semaphore. + +You might think that since both a semaphore id and a semaphore handle +is just an integer there won't be any changes necessary to the Postgres95 +code to deal with NT semaphores. If it weren't for the existence of +multi-semaphore semaphore sets this would be true. + +To handle semaphore sets a fixed-size table, whose size is partially +based on the sum of the maximum number of semaphores times the maximum +number of semaphores per semaphore set, is created and kept in shared +memory that is visable to every backend started by the Postmaster. + +Each semaphore set entry consists of an arbitrary key value, which serves +to identify the semaphore set, and IPC_NMAXSEMGRP array elements to +store the NT semaphore handles representing the NT semaphore used for +the semaphore set. Semaphore IDs are just indices into this table. +In order to distinguish occupied entries in this table -1 is always +considered an invalid semaphore ID. + +This table is also used to store information about shared memory +segments. Fortunately, there is a one-to-one mapping between Unix +shared memory IDs and NT shared memory handles so the code to emulate +Unix shared memory is simple. +*/ + +/* We need one of these for each emulated semaphore set */ +struct Pg_sem +{ + key_t Pg_sem_key; + HANDLE Pg_sem_handle[IPC_NMAXSEMGRP]; + int Pg_sem_nsems; +}; + +/* We need one of these for each emulated shared memory segment */ +struct Pg_shm +{ + key_t Pg_shm_key; + HANDLE Pg_shm_handle; +}; + +/* This structure is what's stored in shared memory. Note that +** since both the shared memory and semaphore data is in the same +** table, and the table is protected by a single NT semaphore, there's +** a chance that semaphore manipulation could be slowed down by +** shared memory manipulation, and vice versa. But, since both are +** allocated primarily when the Postmaster starts up, which isn't time +** critical, I don't think this will prove to be a problem. +*/ + +static struct Pg_shared +{ + int Pg_next_sem; + int Pg_next_shm; + struct Pg_sem Pg_sem[IPC_NMAXSEM]; + struct Pg_shm Pg_shm[IPC_NMAXSHM]; +} *Pg_shared_ptr; + +/* The semaphore that protects the shared memory table */ +HANDLE Pg_shared_hnd; + +/* +** Perform a semaphore operation. We're passed a semaphore set id, +** a pointer to an array of sembuf structures, and the number +** of elements in the array. Each element in the sembuf structure +** describes a specific semaphore within the semaphore set and the +** operation to perform on it. +*/ + +int +semop(int semid, struct sembuf *sops, u_int nsops) +{ + u_int i; + int result; + HANDLE hndl; + + /* Go through all the sops structures */ + for (i = 0; i < nsops; i++) + { + struct sembuf *sptr; + int semval; + int av_sem_op; + + sptr = &sops[i]; + /* + printf("performing %d in sem # %d\n", sptr->sem_op, sptr->sem_num); + */ + + /* + ** Postgres95 uses -255 to represent a lock request + ** and 255 to show a lock release. Changing these values + ** to -1 and 1 make it easier to keep track of the state + ** of the semaphore. + */ + if (sptr->sem_op == -255) + sptr->sem_op = -1; + else if (sptr->sem_op == 255) + sptr->sem_op = 1; + else + printf("invalid sem_op %d\n", sptr->sem_op); + + _get_ipc_sem(); + hndl = Pg_shared_ptr->Pg_sem[semid].Pg_sem_handle[sptr->sem_num]; + _rel_ipc_sem(); + semval = _get_sem_val(hndl); + + if (sptr->sem_op == 0) + { + if (semval == UNSIGNALED) + return(semval); + else + { + if (sptr->sem_flg & IPC_NOWAIT) + return(SIGNALED); + else + result = WaitForSingleObject(hndl, 5000); + } + } + + av_sem_op = abs(sptr->sem_op); + + /* If a lock is being attempted */ + if (sptr->sem_op < 0) + { + if (semval >= av_sem_op) + { + semval -= av_sem_op; + if (semval <= UNSIGNALED) + result = WaitForSingleObject(hndl, 5000); + } + else + { + if (sptr->sem_flg & IPC_NOWAIT) + return(SIGNALED); + else + result = WaitForSingleObject(hndl, 5000); + } + } + + /* If a lock is being released */ + if (sptr->sem_op > 0) + { + semval += av_sem_op; + if (semval > 0) + ReleaseSemaphore(hndl, 1, NULL); + } + } +} + +int +semget(key_t key, int nsems, int semflg) +{ + int id, new_sem, ret_val; + + /* If nmsems is 0 then assume that we're just checking whether + ** the semaphore identified by key exists. Assume that + ** if key is IPC_PRIVATE that this should always fail. + */ + if (nsems == 0) + { + if (key == IPC_PRIVATE) + ret_val = -1; + else + { + _get_ipc_sem(); + id = _get_sem_id(key); + _rel_ipc_sem(); + ret_val = id; + } + return(ret_val); + } + + /* See if there's already a semaphore with the key. + ** If not, record the key, allocate enough space for the + ** handles of the semaphores, and then create the semaphores. + */ + _get_ipc_sem(); + id = _get_sem_id(key); + if (id == UNUSED) + { + register int i; + struct Pg_sem *pg_ptr; + + new_sem = Pg_shared_ptr->Pg_next_sem++; + + pg_ptr = &(Pg_shared_ptr->Pg_sem[new_sem]); + pg_ptr->Pg_sem_key = key; + pg_ptr->Pg_sem_nsems = nsems; + + for (i = 0; i < nsems; i++) + pg_ptr->Pg_sem_handle[i] = CreateSemaphore(&sec_attrib, 1, 255, NULL); + ret_val = new_sem; + } + else + ret_val = id; + _rel_ipc_sem(); + return(ret_val); +} + +/* These next two functions could be written as one function, although +** doing so would require some additional logic. +*/ + +/* Given a semaphore key, return the corresponding id. +** This function assumes that the shared memory table is being +** protected by the shared memory table semaphore. +*/ +_get_sem_id(key_t key) +{ + register int i; + + /* Go through the shared memory table looking for a semaphore + ** whose key matches what we're looking for + */ + for (i = 0; i < Pg_shared_ptr->Pg_next_sem; i++) + if (Pg_shared_ptr->Pg_sem[i].Pg_sem_key == key) + return(i); + + /* Return UNUSED if we didn't find a match */ + return(UNUSED); +} + +/* Given a shared memory key, return the corresponding id +** This function assumes that the shared memory table is being +** protected by the shared memory table semaphore. +*/ +_get_shm_id(key_t key) +{ + register int i; + + /* Go through the shared memory table looking for a semaphore + ** whose key matches what we're looking for + */ + for (i = 0; i < Pg_shared_ptr->Pg_next_shm; i++) + if (Pg_shared_ptr->Pg_shm[i].Pg_shm_key == key) + return(i); + + /* Return UNUSED if we didn't find a match */ + return(UNUSED); +} + +int +semctl(int semid, int semnum, int cmd, void *y) +{ + int old_val; + HANDLE hndl; + + switch (cmd) + { + case SETALL: + case SETVAL: + /* We can't change the value of a semaphore under + ** NT except by releasing it or waiting for it. + */ + return(0); + + case GETVAL: + _get_ipc_sem(); + hndl = Pg_shared_ptr->Pg_sem[semid].Pg_sem_handle[semnum]; + _rel_ipc_sem(); + old_val = _get_sem_val(hndl); + return(old_val); + } +} + +/* Get the current value of the semaphore whose handle is passed in hnd +** This function does NOT assume that the shared memory table is being +** protected by the shared memory table semaphore. +*/ + +int +_get_sem_val(HANDLE hnd) +{ + DWORD waitresult; + + /* Try to get the semaphore */ + waitresult = WaitForSingleObject(hnd, 0L); + + /* Check what the value of the semaphore was */ + switch(waitresult) + { + /* The semaphore was signaled so we just got it. + ** Since we don't really want to keep it, since we just + ** wanted to test its value, go ahead and release it. + */ + case WAIT_OBJECT_0: + ReleaseSemaphore(hnd, 1, NULL); + return(SIGNALED); + + /* The semaphore was non-signaled meaning someone else had it. */ + case WAIT_TIMEOUT: + return(UNSIGNALED); + } +} + +int +shmget(key_t key, uint32 size, int flags) +{ + HANDLE hnd; + char name[IPC_MAX_SHMEM_NAME]; + int id; + + /* Get the handle for the key, if any. */ + _get_ipc_sem(); + id = _get_shm_id(key); + _rel_ipc_sem(); + + /* If we're really going to create a new mapping */ + if (flags != 0) + { + /* if the key is already being used return an error */ + if (id != UNUSED) + return(-1); + + /* convert the key to a character string */ + sprintf(name, "%d", key); + + hnd = CreateFileMapping((HANDLE)0xffffffff, + &sec_attrib, PAGE_READWRITE, + 0, size, name); + + if (hnd == NULL) + return(-1); + else + { + int new_ipc; + struct Pg_shm *pg_ptr; + + _get_ipc_sem(); + new_ipc = Pg_shared_ptr->Pg_next_shm++; + + pg_ptr = &(Pg_shared_ptr->Pg_shm[new_ipc]); + pg_ptr->Pg_shm_key = key; + pg_ptr->Pg_shm_handle = hnd; + _rel_ipc_sem(); + return(new_ipc); + } + } + + /* flags is 0 so we just want the id for the existing mapping */ + else + return(id); +} + +shmdt(char *shmaddr) +{ + UnmapViewOfFile(shmaddr); +} + +int +shmctl(IpcMemoryId shmid, int cmd, struct shmid_ds *buf) +{ + int x = 0; + + if (cmd == IPC_RMID) + { + _get_ipc_sem(); + CloseHandle(Pg_shared_ptr->Pg_shm[shmid].Pg_shm_handle); + _rel_ipc_sem(); + return(0); + } + x = x / x; +} + +/* Attach to the already created shared memory segment */ +LPVOID * +shmat(int shmid, void *shmaddr, int shmflg) +{ + LPVOID *ret_addr; + + _get_ipc_sem(); + ret_addr = MapViewOfFile(Pg_shared_ptr->Pg_shm[shmid].Pg_shm_handle, + FILE_MAP_ALL_ACCESS, 0, 0, 0); + _rel_ipc_sem(); + if (ret_addr == NULL) + { + int jon; + + jon = GetLastError(); + } + return(ret_addr); +} + +/* This is the function that is called when the postmaster starts up. +** It is here that the shared memory table is created. Also, create +** the semaphore that will be used to protect the shared memory table. +** TODO - do something with the return value. +*/ +_nt_init() +{ + HANDLE hnd; + int size = sizeof (struct Pg_shared); + + /* Create the file mapping for the shared memory to be + ** used to store the ipc table. + */ + hnd = CreateFileMapping((HANDLE)0xffffffff, + &sec_attrib, PAGE_READWRITE, + 0, size, IPC_NAME); + + if (hnd == NULL) + { + size = GetLastError(); + return(-1); + } + + Pg_shared_hnd = CreateSemaphore(&sec_attrib, 1, 255, IPC_SEM_NAME); + if (Pg_shared_hnd == NULL) + { + size = GetLastError(); + return(-1); + } +} + +/* This function gets called by every backend at startup time. Its +** main duty is to put the address of the shared memory table pointed +** to by Pg_shared_ptr. There's no need to get the IPC_SEM_NAME semaphore +** because this function is called before we start manipulating the +** shared memory table. +*/ +void +_nt_attach() +{ + HANDLE hnd; + + /* Get a handle to the shared memory table */ + hnd = OpenFileMapping(FILE_MAP_ALL_ACCESS, + FALSE, IPC_NAME); + + /* Map the ipc shared memory table into the address space + ** of this process at an address returned by MapViewOfFile + */ + Pg_shared_ptr = (struct Pg_shared *) MapViewOfFile(hnd, + FILE_MAP_ALL_ACCESS, 0, 0, 0); + + if (Pg_shared_ptr == NULL) + { + hnd = GetLastError(); + return(-1); + } +} + +_get_ipc_sem() +{ + WaitForSingleObject(Pg_shared_hnd, 5000); +} + +_rel_ipc_sem() +{ + ReleaseSemaphore(Pg_shared_hnd, 1, NULL); +} + +pg_dlerror(void) +{ + int x = 0; + x = x / x; +} + +pg_dlclose(void *handle) +{ + FreeLibrary(handle); +} + +void * +pg_dlopen(char *filename) +{ + HINSTANCE hinstlib; + + hinstlib = LoadLibrary(filename); + return (hinstlib); +} + +void * +pg_dlsym(void *handle, char *funcname) +{ + void *proc; + + proc = GetProcAddress(handle, funcname); + return (proc); +} + +void +ftruncate(int fd, int offset) +{ + HANDLE hnd; + + _lseek(fd, offset, SEEK_SET); + hnd = _get_osfhandle(fd); + SetEndOfFile(hnd); +} + +/* The rest are just stubs that are intended to serve as place holders +** in case we want to set breakpoints to see what's happening when +** these routines are called. They'll eventually have to be filled +** in but they're not necessary to get Postgres95 going. +*/ +setuid(int i) +{ + int x = 1; + x = x / x; +} + +setsid() +{ + int x = 1; + x = x / x; +} + +vfork(void) +{ + int x = 0; + x = x / x; +} + +ttyname(int y) +{ + int x = 0; + x = x / x; +} + +step(char *string, char *expbuf) +{ + int x = 0; + x = x / x; +} + +siglongjmp(int env, int value) +{ + int x = 0; + x = x / x; +} + +pause(void) +{ + int x = 0; + x = x / x; +} + +kill(int process, int signal) +{ + int x = 1; + x = x / x; +} + +getuid(void) +{ + int x = 1; + x = x / x; +} + +geteuid( void ) +{ + int x = 1; + x = x / x; +} + +int +fsync(int filedes) +{ +} + +fork(void) +{ + int x = 0; + x = x / x; +} + +char * +compile(char *instring,char *expbuf,char *endbuf,int eof) +{ + int x = 0; + x = x / x; +} + +beginRecipe(char *s) +{ + int x = 0; + x = x / x; +} diff --git a/src/backend/port/win32/nt.h b/src/backend/port/win32/nt.h new file mode 100644 index 00000000000..abe3519ab5c --- /dev/null +++ b/src/backend/port/win32/nt.h @@ -0,0 +1,54 @@ +typedef char * caddr_t; +typedef unsigned long u_long; +typedef unsigned int u_int; +typedef unsigned short u_short; +typedef unsigned char u_char; +typedef unsigned int mode_t; + +typedef u_int uid_t; +typedef u_int gid_t; +typedef int key_t; +#define IPC_PRIVATE ((key_t)0) + +/* Common IPC operation flag definitions. We'll use +** the Unix values unless we find a reason not to. +*/ +#define IPC_CREAT 0001000 /* create entry if key doesn't exist */ +#define IPC_EXCL 0002000 /* fail if key exists */ +#define IPC_NOWAIT 0004000 /* error if request must wait */ + + +struct sembuf +{ + u_short sem_num; + short sem_op; + short sem_flg; +}; + +#define USE_POSIX_TIME +#define NEED_RINT + +#define MAXHOSTNAMELEN 12 /* where is the official definition of this? */ +#define MAXPATHLEN _MAX_PATH /* in winsock.h */ +#define POSTPORT "5432" + +/* NT has stricmp not strcasecmp. Which is ANSI? */ +#define strcasecmp(a,b) _stricmp(a,b) + +#define isascii(a) __isascii(a) + +#define random() rand() + +/* These are bogus values used so that we can compile ipc.c */ +#define SETALL 2 +#define SETVAL 3 +#define IPC_RMID 4 +#define GETNCNT 5 +#define GETVAL 6 + +/* for float.c */ +#define NEED_CBRT +#define NEED_ISINF + +#define POSTGRESDIR "d:\\pglite" +#define PGDATADIR "d:\\pglite\\data" diff --git a/src/backend/port/win32/pglite.mak b/src/backend/port/win32/pglite.mak new file mode 100644 index 00000000000..c922191c5df --- /dev/null +++ b/src/backend/port/win32/pglite.mak @@ -0,0 +1,3323 @@ +# Microsoft Visual C++ Generated NMAKE File, Format Version 2.00 +# ** DO NOT EDIT ** + +# TARGTYPE "Win32 (x86) Console Application" 0x0103 + +!IF "$(CFG)" == "" +CFG=Win32 Debug +!MESSAGE No configuration specified. Defaulting to Win32 Debug. +!ENDIF + +!IF "$(CFG)" != "Win32 Release" && "$(CFG)" != "Win32 Debug" +!MESSAGE Invalid configuration "$(CFG)" specified. +!MESSAGE You can specify a configuration when running NMAKE on this makefile +!MESSAGE by defining the macro CFG on the command line. For example: +!MESSAGE +!MESSAGE NMAKE /f "pglite.mak" CFG="Win32 Debug" +!MESSAGE +!MESSAGE Possible choices for configuration are: +!MESSAGE +!MESSAGE "Win32 Release" (based on "Win32 (x86) Console Application") +!MESSAGE "Win32 Debug" (based on "Win32 (x86) Console Application") +!MESSAGE +!ERROR An invalid configuration is specified. +!ENDIF + +################################################################################ +# Begin Project +# PROP Target_Last_Scanned "Win32 Debug" +CPP=cl.exe +RSC=rc.exe + +!IF "$(CFG)" == "Win32 Release" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 0 +# PROP BASE Output_Dir "WinRel" +# PROP BASE Intermediate_Dir "WinRel" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 0 +# PROP Output_Dir "g:\users\forrest\pglite" +# PROP Intermediate_Dir "g:\users\forrest\pglite" +OUTDIR=g:\users\forrest\pglite +INTDIR=g:\users\forrest\pglite + +ALL : $(OUTDIR)/pglite.exe $(OUTDIR)/pglite.bsc + +$(OUTDIR) : + if not exist $(OUTDIR)/nul mkdir $(OUTDIR) + +# ADD BASE CPP /nologo /W3 /GX /YX /O2 /D "WIN32" /D "NDEBUG" /D "_CONSOLE" /FR /c +# ADD CPP /nologo /W3 /GX /YX /Od /I "g:\pglite\src\backend\include" /I "g:\pglite\src\backend" /I "g:\pglite\src\backend\port\win32" /I "g:\pglite\src\backend\obj" /D "NDEBUG" /D "WIN32" /D "_CONSOLE" /D "__STDC__" /D "_POSIX_" /c +# SUBTRACT CPP /Fr +CPP_PROJ=/nologo /W3 /GX /YX /Od /I "g:\pglite\src\backend\include" /I\ + "g:\pglite\src\backend" /I "g:\pglite\src\backend\port\win32" /I\ + "g:\pglite\src\backend\obj" /D "NDEBUG" /D "WIN32" /D "_CONSOLE" /D "__STDC__"\ + /Fp$(OUTDIR)/"pglite.pch" /Fo$(INTDIR)/ /D "_POSIX_" /c +CPP_OBJS=g:\users\forrest\pglite/ +# ADD BASE RSC /l 0x409 /d "NDEBUG" +# ADD RSC /l 0x409 /d "NDEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +BSC32_FLAGS=/nologo /o$(OUTDIR)/"pglite.bsc" +BSC32_SBRS= \ + + +$(OUTDIR)/pglite.bsc : $(OUTDIR) $(BSC32_SBRS) +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /NOLOGO /SUBSYSTEM:console /MACHINE:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /NOLOGO /SUBSYSTEM:console /MACHINE:I386 +LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\ + advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\ + odbccp32.lib /NOLOGO /SUBSYSTEM:console /INCREMENTAL:no\ + /PDB:$(OUTDIR)/"pglite.pdb" /MACHINE:I386 /OUT:$(OUTDIR)/"pglite.exe" +DEF_FILE= +LINK32_OBJS= \ + $(INTDIR)/scankey.obj \ + $(INTDIR)/printtup.obj \ + $(INTDIR)/indexvalid.obj \ + $(INTDIR)/heaptuple.obj \ + $(INTDIR)/tupdesc.obj \ + $(INTDIR)/indextuple.obj \ + $(INTDIR)/heapvalid.obj \ + $(INTDIR)/hashinsert.obj \ + $(INTDIR)/hashstrat.obj \ + $(INTDIR)/hashutil.obj \ + $(INTDIR)/hashpage.obj \ + $(INTDIR)/hashsearch.obj \ + $(INTDIR)/hashscan.obj \ + $(INTDIR)/hashfunc.obj \ + $(INTDIR)/hash.obj \ + $(INTDIR)/hashovfl.obj \ + $(INTDIR)/bootstrap.obj \ + $(INTDIR)/genam.obj \ + $(INTDIR)/creatinh.obj \ + $(INTDIR)/nodeSeqscan.obj \ + $(INTDIR)/nodeUnique.obj \ + $(INTDIR)/rename.obj \ + $(INTDIR)/transsup.obj \ + $(INTDIR)/transam.obj \ + $(INTDIR)/define.obj \ + $(INTDIR)/execMain.obj \ + $(INTDIR)/xid.obj \ + $(INTDIR)/nodeAgg.obj \ + $(INTDIR)/nbtpage.obj \ + $(INTDIR)/execScan.obj \ + $(INTDIR)/nbtree.obj \ + $(INTDIR)/rtscan.obj \ + $(INTDIR)/indexam.obj \ + $(INTDIR)/execQual.obj \ + $(INTDIR)/nodeHash.obj \ + $(INTDIR)/nbtscan.obj \ + $(INTDIR)/hio.obj \ + $(INTDIR)/pg_proc.obj \ + $(INTDIR)/stats.obj \ + $(INTDIR)/nodeMaterial.obj \ + $(INTDIR)/varsup.obj \ + $(INTDIR)/copy.obj \ + $(INTDIR)/rtproc.obj \ + $(INTDIR)/functions.obj \ + $(INTDIR)/nodeHashjoin.obj \ + $(INTDIR)/catalog.obj \ + $(INTDIR)/nbtinsert.obj \ + $(INTDIR)/rtree.obj \ + $(INTDIR)/version.obj \ + $(INTDIR)/async.obj \ + $(INTDIR)/nbtutils.obj \ + $(INTDIR)/vacuum.obj \ + $(INTDIR)/rtstrat.obj \ + $(INTDIR)/execFlatten.obj \ + $(INTDIR)/nodeTee.obj \ + $(INTDIR)/nodeIndexscan.obj \ + $(INTDIR)/remove.obj \ + $(INTDIR)/indexing.obj \ + $(INTDIR)/command.obj \ + $(INTDIR)/nbtsearch.obj \ + $(INTDIR)/heapam.obj \ + $(INTDIR)/nodeSort.obj \ + $(INTDIR)/execProcnode.obj \ + $(INTDIR)/nodeResult.obj \ + $(INTDIR)/index.obj \ + $(INTDIR)/xact.obj \ + $(INTDIR)/nodeMergejoin.obj \ + $(INTDIR)/pg_operator.obj \ + $(INTDIR)/execJunk.obj \ + $(INTDIR)/pg_aggregate.obj \ + $(INTDIR)/istrat.obj \ + $(INTDIR)/execUtils.obj \ + $(INTDIR)/purge.obj \ + $(INTDIR)/heap.obj \ + $(INTDIR)/nbtstrat.obj \ + $(INTDIR)/execAmi.obj \ + $(INTDIR)/execTuples.obj \ + $(INTDIR)/pg_type.obj \ + $(INTDIR)/view.obj \ + $(INTDIR)/nodeAppend.obj \ + $(INTDIR)/defind.obj \ + $(INTDIR)/nodeNestloop.obj \ + $(INTDIR)/nbtcompare.obj \ + $(INTDIR)/rtget.obj \ + $(INTDIR)/catalog_utils.obj \ + $(INTDIR)/setrefs.obj \ + $(INTDIR)/mergeutils.obj \ + $(INTDIR)/oset.obj \ + $(INTDIR)/arrayutils.obj \ + $(INTDIR)/nodeFuncs.obj \ + $(INTDIR)/rewriteSupport.obj \ + $(INTDIR)/bufpage.obj \ + $(INTDIR)/fd.obj \ + $(INTDIR)/clauseinfo.obj \ + $(INTDIR)/nabstime.obj \ + $(INTDIR)/mcxt.obj \ + $(INTDIR)/ipci.obj \ + $(INTDIR)/qsort.obj \ + $(INTDIR)/outfuncs.obj \ + $(INTDIR)/tqual.obj \ + $(INTDIR)/keys.obj \ + $(INTDIR)/clauses.obj \ + $(INTDIR)/print.obj \ + $(INTDIR)/postinit.obj \ + $(INTDIR)/oidchar16.obj \ + $(INTDIR)/name.obj \ + $(INTDIR)/tid.obj \ + $(INTDIR)/"be-fsstubs.obj" \ + $(INTDIR)/elog.obj \ + $(INTDIR)/bufmgr.obj \ + $(INTDIR)/portalbuf.obj \ + $(INTDIR)/psort.obj \ + $(INTDIR)/syscache.obj \ + $(INTDIR)/exc.obj \ + $(INTDIR)/selfuncs.obj \ + $(INTDIR)/var.obj \ + $(INTDIR)/oid.obj \ + $(INTDIR)/"be-pqexec.obj" \ + $(INTDIR)/ordering.obj \ + $(INTDIR)/inv_api.obj \ + $(INTDIR)/buf_table.obj \ + $(INTDIR)/acl.obj \ + $(INTDIR)/costsize.obj \ + $(INTDIR)/catcache.obj \ + $(INTDIR)/rewriteRemove.obj \ + $(INTDIR)/parse_query.obj \ + $(INTDIR)/excabort.obj \ + $(INTDIR)/lmgr.obj \ + $(INTDIR)/excid.obj \ + $(INTDIR)/int.obj \ + $(INTDIR)/auth.obj \ + $(INTDIR)/regexp.obj \ + $(INTDIR)/proc.obj \ + $(INTDIR)/dbcommands.obj \ + $(INTDIR)/dynahash.obj \ + $(INTDIR)/shmem.obj \ + $(INTDIR)/relnode.obj \ + $(INTDIR)/fstack.obj \ + $(INTDIR)/smgr.obj \ + $(INTDIR)/magic.obj \ + $(INTDIR)/relcache.obj \ + $(INTDIR)/varlena.obj \ + $(INTDIR)/allpaths.obj \ + $(INTDIR)/portalmem.obj \ + $(INTDIR)/bit.obj \ + $(INTDIR)/readfuncs.obj \ + $(INTDIR)/nodes.obj \ + $(INTDIR)/chunk.obj \ + $(INTDIR)/datum.obj \ + $(INTDIR)/analyze.obj \ + $(INTDIR)/oidint4.obj \ + $(INTDIR)/hasht.obj \ + $(INTDIR)/numutils.obj \ + $(INTDIR)/pqcomm.obj \ + $(INTDIR)/indxpath.obj \ + $(INTDIR)/lispsort.obj \ + $(INTDIR)/arrayfuncs.obj \ + $(INTDIR)/copyfuncs.obj \ + $(INTDIR)/planmain.obj \ + $(INTDIR)/makefuncs.obj \ + $(INTDIR)/lsyscache.obj \ + $(INTDIR)/multi.obj \ + $(INTDIR)/freelist.obj \ + $(INTDIR)/aclchk.obj \ + $(INTDIR)/initsplan.obj \ + $(INTDIR)/prune.obj \ + $(INTDIR)/sinvaladt.obj \ + $(INTDIR)/orindxpath.obj \ + $(INTDIR)/joinrels.obj \ + $(INTDIR)/rewriteManip.obj \ + $(INTDIR)/itemptr.obj \ + $(INTDIR)/s_lock.obj \ + $(INTDIR)/miscinit.obj \ + $(INTDIR)/postgres.obj \ + $(INTDIR)/parser.obj \ + $(INTDIR)/tlist.obj \ + $(INTDIR)/dt.obj \ + $(INTDIR)/sinval.obj \ + $(INTDIR)/pqpacket.obj \ + $(INTDIR)/assert.obj \ + $(INTDIR)/utility.obj \ + $(INTDIR)/bool.obj \ + $(INTDIR)/md.obj \ + $(INTDIR)/pqsignal.obj \ + $(INTDIR)/globals.obj \ + $(INTDIR)/postmaster.obj \ + $(INTDIR)/joinpath.obj \ + $(INTDIR)/fastpath.obj \ + $(INTDIR)/archive.obj \ + $(INTDIR)/fcache.obj \ + $(INTDIR)/mm.obj \ + $(INTDIR)/createplan.obj \ + $(INTDIR)/read.obj \ + $(INTDIR)/stringinfo.obj \ + $(INTDIR)/hashfn.obj \ + $(INTDIR)/regproc.obj \ + $(INTDIR)/main.obj \ + $(INTDIR)/enbl.obj \ + $(INTDIR)/prepunion.obj \ + $(INTDIR)/prepqual.obj \ + $(INTDIR)/planner.obj \ + $(INTDIR)/clausesel.obj \ + $(INTDIR)/portal.obj \ + $(INTDIR)/spin.obj \ + $(INTDIR)/lock.obj \ + $(INTDIR)/single.obj \ + $(INTDIR)/io.obj \ + $(INTDIR)/"geo-ops.obj" \ + $(INTDIR)/dest.obj \ + $(INTDIR)/rewriteDefine.obj \ + $(INTDIR)/keywords.obj \ + $(INTDIR)/hashutils.obj \ + $(INTDIR)/format.obj \ + $(INTDIR)/scanner.obj \ + $(INTDIR)/aset.obj \ + $(INTDIR)/"geo-selfuncs.obj" \ + $(INTDIR)/float.obj \ + $(INTDIR)/pquery.obj \ + $(INTDIR)/"be-dumpdata.obj" \ + $(INTDIR)/filename.obj \ + $(INTDIR)/misc.obj \ + $(INTDIR)/pathnode.obj \ + $(INTDIR)/inval.obj \ + $(INTDIR)/smgrtype.obj \ + $(INTDIR)/joininfo.obj \ + $(INTDIR)/lselect.obj \ + $(INTDIR)/rel.obj \ + $(INTDIR)/internal.obj \ + $(INTDIR)/preptlist.obj \ + $(INTDIR)/joinutils.obj \ + $(INTDIR)/shmqueue.obj \ + $(INTDIR)/date.obj \ + $(INTDIR)/locks.obj \ + $(INTDIR)/not_in.obj \ + $(INTDIR)/char.obj \ + $(INTDIR)/rewriteHandler.obj \ + $(INTDIR)/sets.obj \ + $(INTDIR)/palloc.obj \ + $(INTDIR)/indexnode.obj \ + $(INTDIR)/equalfuncs.obj \ + $(INTDIR)/oidint2.obj \ + $(INTDIR)/list.obj \ + $(INTDIR)/plancat.obj \ + $(INTDIR)/fmgr.obj \ + $(INTDIR)/fmgrtab.obj \ + $(INTDIR)/dllist.obj \ + $(INTDIR)/nodeGroup.obj \ + $(INTDIR)/localbuf.obj \ + $(INTDIR)/cluster.obj \ + $(INTDIR)/ipc.obj \ + $(INTDIR)/nt.obj \ + $(INTDIR)/getopt.obj \ + $(INTDIR)/bootscanner.obj \ + $(INTDIR)/scan.obj \ + $(INTDIR)/bootparse.obj \ + $(INTDIR)/gram.obj \ + $(INTDIR)/findbe.obj \ + $(INTDIR)/regerror.obj \ + $(INTDIR)/regfree.obj \ + $(INTDIR)/regcomp.obj \ + $(INTDIR)/regexec.obj \ + $(INTDIR)/nbtsort.obj \ + $(INTDIR)/buf_init.obj \ + $(INTDIR)/dfmgr.obj + +$(OUTDIR)/pglite.exe : $(OUTDIR) $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +!ELSEIF "$(CFG)" == "Win32 Debug" + +# PROP BASE Use_MFC 0 +# PROP BASE Use_Debug_Libraries 1 +# PROP BASE Output_Dir "WinDebug" +# PROP BASE Intermediate_Dir "WinDebug" +# PROP Use_MFC 0 +# PROP Use_Debug_Libraries 1 +# PROP Output_Dir "d:\local\forrest\pglite" +# PROP Intermediate_Dir "d:\local\forrest\pglite" +OUTDIR=d:\local\forrest\pglite +INTDIR=d:\local\forrest\pglite + +ALL : $(OUTDIR)/pglite.exe $(OUTDIR)/pglite.bsc + +$(OUTDIR) : + if not exist $(OUTDIR)/nul mkdir $(OUTDIR) + +# ADD BASE CPP /nologo /W3 /GX /Zi /YX /Od /D "WIN32" /D "_DEBUG" /D "_CONSOLE" /FR /c +# ADD CPP /nologo /G5 /W3 /GX /Zi /YX /Od /I "g:\pglite\src\backend" /I "g:\pglite\src\backend\port\win32" /I "g:\pglite\src\backend\obj" /I "g:\pglite\src\backend\include" /I "g:\pglite\src\backend\port/win32/regex" /D "_DEBUG" /D "WIN32" /D "_CONSOLE" /D "__STDC__" /D "_POSIX_" /D "_NTSDK" /D "NO_SECURITY" /D "NEED_RUSAGE" /FR /c +CPP_PROJ=/nologo /G5 /W3 /GX /Zi /YX /Od /I "g:\pglite\src\backend" /I\ + "g:\pglite\src\backend\port\win32" /I "g:\pglite\src\backend\obj" /I\ + "g:\pglite\src\backend\include" /I "g:\pglite\src\backend\port/win32/regex" /D\ + "_DEBUG" /D "WIN32" /D "_CONSOLE" /D "__STDC__" /D "_POSIX_" /D "_NTSDK" /D\ + "NO_SECURITY" /D "NEED_RUSAGE" /FR$(INTDIR)/ /Fp$(OUTDIR)/"pglite.pch"\ + /Fo$(INTDIR)/ /Fd$(OUTDIR)/"pglite.pdb" /c +CPP_OBJS=d:\local\forrest\pglite/ +# ADD BASE RSC /l 0x409 /d "_DEBUG" +# ADD RSC /l 0x409 /d "_DEBUG" +BSC32=bscmake.exe +# ADD BASE BSC32 /nologo +# ADD BSC32 /nologo +BSC32_FLAGS=/nologo /o$(OUTDIR)/"pglite.bsc" +BSC32_SBRS= \ + $(INTDIR)/scankey.sbr \ + $(INTDIR)/printtup.sbr \ + $(INTDIR)/indexvalid.sbr \ + $(INTDIR)/heaptuple.sbr \ + $(INTDIR)/tupdesc.sbr \ + $(INTDIR)/indextuple.sbr \ + $(INTDIR)/heapvalid.sbr \ + $(INTDIR)/hashinsert.sbr \ + $(INTDIR)/hashstrat.sbr \ + $(INTDIR)/hashutil.sbr \ + $(INTDIR)/hashpage.sbr \ + $(INTDIR)/hashsearch.sbr \ + $(INTDIR)/hashscan.sbr \ + $(INTDIR)/hashfunc.sbr \ + $(INTDIR)/hash.sbr \ + $(INTDIR)/hashovfl.sbr \ + $(INTDIR)/bootstrap.sbr \ + $(INTDIR)/genam.sbr \ + $(INTDIR)/creatinh.sbr \ + $(INTDIR)/nodeSeqscan.sbr \ + $(INTDIR)/nodeUnique.sbr \ + $(INTDIR)/rename.sbr \ + $(INTDIR)/transsup.sbr \ + $(INTDIR)/transam.sbr \ + $(INTDIR)/define.sbr \ + $(INTDIR)/execMain.sbr \ + $(INTDIR)/xid.sbr \ + $(INTDIR)/nodeAgg.sbr \ + $(INTDIR)/nbtpage.sbr \ + $(INTDIR)/execScan.sbr \ + $(INTDIR)/nbtree.sbr \ + $(INTDIR)/rtscan.sbr \ + $(INTDIR)/indexam.sbr \ + $(INTDIR)/execQual.sbr \ + $(INTDIR)/nodeHash.sbr \ + $(INTDIR)/nbtscan.sbr \ + $(INTDIR)/hio.sbr \ + $(INTDIR)/pg_proc.sbr \ + $(INTDIR)/stats.sbr \ + $(INTDIR)/nodeMaterial.sbr \ + $(INTDIR)/varsup.sbr \ + $(INTDIR)/copy.sbr \ + $(INTDIR)/rtproc.sbr \ + $(INTDIR)/functions.sbr \ + $(INTDIR)/nodeHashjoin.sbr \ + $(INTDIR)/catalog.sbr \ + $(INTDIR)/nbtinsert.sbr \ + $(INTDIR)/rtree.sbr \ + $(INTDIR)/version.sbr \ + $(INTDIR)/async.sbr \ + $(INTDIR)/nbtutils.sbr \ + $(INTDIR)/vacuum.sbr \ + $(INTDIR)/rtstrat.sbr \ + $(INTDIR)/execFlatten.sbr \ + $(INTDIR)/nodeTee.sbr \ + $(INTDIR)/nodeIndexscan.sbr \ + $(INTDIR)/remove.sbr \ + $(INTDIR)/indexing.sbr \ + $(INTDIR)/command.sbr \ + $(INTDIR)/nbtsearch.sbr \ + $(INTDIR)/heapam.sbr \ + $(INTDIR)/nodeSort.sbr \ + $(INTDIR)/execProcnode.sbr \ + $(INTDIR)/nodeResult.sbr \ + $(INTDIR)/index.sbr \ + $(INTDIR)/xact.sbr \ + $(INTDIR)/nodeMergejoin.sbr \ + $(INTDIR)/pg_operator.sbr \ + $(INTDIR)/execJunk.sbr \ + $(INTDIR)/pg_aggregate.sbr \ + $(INTDIR)/istrat.sbr \ + $(INTDIR)/execUtils.sbr \ + $(INTDIR)/purge.sbr \ + $(INTDIR)/heap.sbr \ + $(INTDIR)/nbtstrat.sbr \ + $(INTDIR)/execAmi.sbr \ + $(INTDIR)/execTuples.sbr \ + $(INTDIR)/pg_type.sbr \ + $(INTDIR)/view.sbr \ + $(INTDIR)/nodeAppend.sbr \ + $(INTDIR)/defind.sbr \ + $(INTDIR)/nodeNestloop.sbr \ + $(INTDIR)/nbtcompare.sbr \ + $(INTDIR)/rtget.sbr \ + $(INTDIR)/catalog_utils.sbr \ + $(INTDIR)/setrefs.sbr \ + $(INTDIR)/mergeutils.sbr \ + $(INTDIR)/oset.sbr \ + $(INTDIR)/arrayutils.sbr \ + $(INTDIR)/nodeFuncs.sbr \ + $(INTDIR)/rewriteSupport.sbr \ + $(INTDIR)/bufpage.sbr \ + $(INTDIR)/fd.sbr \ + $(INTDIR)/clauseinfo.sbr \ + $(INTDIR)/nabstime.sbr \ + $(INTDIR)/mcxt.sbr \ + $(INTDIR)/ipci.sbr \ + $(INTDIR)/qsort.sbr \ + $(INTDIR)/outfuncs.sbr \ + $(INTDIR)/tqual.sbr \ + $(INTDIR)/keys.sbr \ + $(INTDIR)/clauses.sbr \ + $(INTDIR)/print.sbr \ + $(INTDIR)/postinit.sbr \ + $(INTDIR)/oidchar16.sbr \ + $(INTDIR)/name.sbr \ + $(INTDIR)/tid.sbr \ + $(INTDIR)/"be-fsstubs.sbr" \ + $(INTDIR)/elog.sbr \ + $(INTDIR)/bufmgr.sbr \ + $(INTDIR)/portalbuf.sbr \ + $(INTDIR)/psort.sbr \ + $(INTDIR)/syscache.sbr \ + $(INTDIR)/exc.sbr \ + $(INTDIR)/selfuncs.sbr \ + $(INTDIR)/var.sbr \ + $(INTDIR)/oid.sbr \ + $(INTDIR)/"be-pqexec.sbr" \ + $(INTDIR)/ordering.sbr \ + $(INTDIR)/inv_api.sbr \ + $(INTDIR)/buf_table.sbr \ + $(INTDIR)/acl.sbr \ + $(INTDIR)/costsize.sbr \ + $(INTDIR)/catcache.sbr \ + $(INTDIR)/rewriteRemove.sbr \ + $(INTDIR)/parse_query.sbr \ + $(INTDIR)/excabort.sbr \ + $(INTDIR)/lmgr.sbr \ + $(INTDIR)/excid.sbr \ + $(INTDIR)/int.sbr \ + $(INTDIR)/auth.sbr \ + $(INTDIR)/regexp.sbr \ + $(INTDIR)/proc.sbr \ + $(INTDIR)/dbcommands.sbr \ + $(INTDIR)/dynahash.sbr \ + $(INTDIR)/shmem.sbr \ + $(INTDIR)/relnode.sbr \ + $(INTDIR)/fstack.sbr \ + $(INTDIR)/smgr.sbr \ + $(INTDIR)/magic.sbr \ + $(INTDIR)/relcache.sbr \ + $(INTDIR)/varlena.sbr \ + $(INTDIR)/allpaths.sbr \ + $(INTDIR)/portalmem.sbr \ + $(INTDIR)/bit.sbr \ + $(INTDIR)/readfuncs.sbr \ + $(INTDIR)/nodes.sbr \ + $(INTDIR)/chunk.sbr \ + $(INTDIR)/datum.sbr \ + $(INTDIR)/analyze.sbr \ + $(INTDIR)/oidint4.sbr \ + $(INTDIR)/hasht.sbr \ + $(INTDIR)/numutils.sbr \ + $(INTDIR)/pqcomm.sbr \ + $(INTDIR)/indxpath.sbr \ + $(INTDIR)/lispsort.sbr \ + $(INTDIR)/arrayfuncs.sbr \ + $(INTDIR)/copyfuncs.sbr \ + $(INTDIR)/planmain.sbr \ + $(INTDIR)/makefuncs.sbr \ + $(INTDIR)/lsyscache.sbr \ + $(INTDIR)/multi.sbr \ + $(INTDIR)/freelist.sbr \ + $(INTDIR)/aclchk.sbr \ + $(INTDIR)/initsplan.sbr \ + $(INTDIR)/prune.sbr \ + $(INTDIR)/sinvaladt.sbr \ + $(INTDIR)/orindxpath.sbr \ + $(INTDIR)/joinrels.sbr \ + $(INTDIR)/rewriteManip.sbr \ + $(INTDIR)/itemptr.sbr \ + $(INTDIR)/s_lock.sbr \ + $(INTDIR)/miscinit.sbr \ + $(INTDIR)/postgres.sbr \ + $(INTDIR)/parser.sbr \ + $(INTDIR)/tlist.sbr \ + $(INTDIR)/dt.sbr \ + $(INTDIR)/sinval.sbr \ + $(INTDIR)/pqpacket.sbr \ + $(INTDIR)/assert.sbr \ + $(INTDIR)/utility.sbr \ + $(INTDIR)/bool.sbr \ + $(INTDIR)/md.sbr \ + $(INTDIR)/pqsignal.sbr \ + $(INTDIR)/globals.sbr \ + $(INTDIR)/postmaster.sbr \ + $(INTDIR)/joinpath.sbr \ + $(INTDIR)/fastpath.sbr \ + $(INTDIR)/archive.sbr \ + $(INTDIR)/fcache.sbr \ + $(INTDIR)/mm.sbr \ + $(INTDIR)/createplan.sbr \ + $(INTDIR)/read.sbr \ + $(INTDIR)/stringinfo.sbr \ + $(INTDIR)/hashfn.sbr \ + $(INTDIR)/regproc.sbr \ + $(INTDIR)/main.sbr \ + $(INTDIR)/enbl.sbr \ + $(INTDIR)/prepunion.sbr \ + $(INTDIR)/prepqual.sbr \ + $(INTDIR)/planner.sbr \ + $(INTDIR)/clausesel.sbr \ + $(INTDIR)/portal.sbr \ + $(INTDIR)/spin.sbr \ + $(INTDIR)/lock.sbr \ + $(INTDIR)/single.sbr \ + $(INTDIR)/io.sbr \ + $(INTDIR)/"geo-ops.sbr" \ + $(INTDIR)/dest.sbr \ + $(INTDIR)/rewriteDefine.sbr \ + $(INTDIR)/keywords.sbr \ + $(INTDIR)/hashutils.sbr \ + $(INTDIR)/format.sbr \ + $(INTDIR)/scanner.sbr \ + $(INTDIR)/aset.sbr \ + $(INTDIR)/"geo-selfuncs.sbr" \ + $(INTDIR)/float.sbr \ + $(INTDIR)/pquery.sbr \ + $(INTDIR)/"be-dumpdata.sbr" \ + $(INTDIR)/filename.sbr \ + $(INTDIR)/misc.sbr \ + $(INTDIR)/pathnode.sbr \ + $(INTDIR)/inval.sbr \ + $(INTDIR)/smgrtype.sbr \ + $(INTDIR)/joininfo.sbr \ + $(INTDIR)/lselect.sbr \ + $(INTDIR)/rel.sbr \ + $(INTDIR)/internal.sbr \ + $(INTDIR)/preptlist.sbr \ + $(INTDIR)/joinutils.sbr \ + $(INTDIR)/shmqueue.sbr \ + $(INTDIR)/date.sbr \ + $(INTDIR)/locks.sbr \ + $(INTDIR)/not_in.sbr \ + $(INTDIR)/char.sbr \ + $(INTDIR)/rewriteHandler.sbr \ + $(INTDIR)/sets.sbr \ + $(INTDIR)/palloc.sbr \ + $(INTDIR)/indexnode.sbr \ + $(INTDIR)/equalfuncs.sbr \ + $(INTDIR)/oidint2.sbr \ + $(INTDIR)/list.sbr \ + $(INTDIR)/plancat.sbr \ + $(INTDIR)/fmgr.sbr \ + $(INTDIR)/fmgrtab.sbr \ + $(INTDIR)/dllist.sbr \ + $(INTDIR)/nodeGroup.sbr \ + $(INTDIR)/localbuf.sbr \ + $(INTDIR)/cluster.sbr \ + $(INTDIR)/ipc.sbr \ + $(INTDIR)/nt.sbr \ + $(INTDIR)/getopt.sbr \ + $(INTDIR)/bootscanner.sbr \ + $(INTDIR)/scan.sbr \ + $(INTDIR)/bootparse.sbr \ + $(INTDIR)/gram.sbr \ + $(INTDIR)/findbe.sbr \ + $(INTDIR)/regerror.sbr \ + $(INTDIR)/regfree.sbr \ + $(INTDIR)/regcomp.sbr \ + $(INTDIR)/regexec.sbr \ + $(INTDIR)/nbtsort.sbr \ + $(INTDIR)/buf_init.sbr \ + $(INTDIR)/dfmgr.sbr + +$(OUTDIR)/pglite.bsc : $(OUTDIR) $(BSC32_SBRS) + $(BSC32) @<< + $(BSC32_FLAGS) $(BSC32_SBRS) +<< + +LINK32=link.exe +# ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /NOLOGO /SUBSYSTEM:console /DEBUG /MACHINE:I386 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib wsock32.lib /NOLOGO /SUBSYSTEM:console /DEBUG /MACHINE:I386 +# SUBTRACT LINK32 /PDB:none +LINK32_FLAGS=kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib\ + advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib\ + odbccp32.lib wsock32.lib /NOLOGO /SUBSYSTEM:console /INCREMENTAL:yes\ + /PDB:$(OUTDIR)/"pglite.pdb" /DEBUG /MACHINE:I386 /OUT:$(OUTDIR)/"pglite.exe" +DEF_FILE= +LINK32_OBJS= \ + $(INTDIR)/scankey.obj \ + $(INTDIR)/printtup.obj \ + $(INTDIR)/indexvalid.obj \ + $(INTDIR)/heaptuple.obj \ + $(INTDIR)/tupdesc.obj \ + $(INTDIR)/indextuple.obj \ + $(INTDIR)/heapvalid.obj \ + $(INTDIR)/hashinsert.obj \ + $(INTDIR)/hashstrat.obj \ + $(INTDIR)/hashutil.obj \ + $(INTDIR)/hashpage.obj \ + $(INTDIR)/hashsearch.obj \ + $(INTDIR)/hashscan.obj \ + $(INTDIR)/hashfunc.obj \ + $(INTDIR)/hash.obj \ + $(INTDIR)/hashovfl.obj \ + $(INTDIR)/bootstrap.obj \ + $(INTDIR)/genam.obj \ + $(INTDIR)/creatinh.obj \ + $(INTDIR)/nodeSeqscan.obj \ + $(INTDIR)/nodeUnique.obj \ + $(INTDIR)/rename.obj \ + $(INTDIR)/transsup.obj \ + $(INTDIR)/transam.obj \ + $(INTDIR)/define.obj \ + $(INTDIR)/execMain.obj \ + $(INTDIR)/xid.obj \ + $(INTDIR)/nodeAgg.obj \ + $(INTDIR)/nbtpage.obj \ + $(INTDIR)/execScan.obj \ + $(INTDIR)/nbtree.obj \ + $(INTDIR)/rtscan.obj \ + $(INTDIR)/indexam.obj \ + $(INTDIR)/execQual.obj \ + $(INTDIR)/nodeHash.obj \ + $(INTDIR)/nbtscan.obj \ + $(INTDIR)/hio.obj \ + $(INTDIR)/pg_proc.obj \ + $(INTDIR)/stats.obj \ + $(INTDIR)/nodeMaterial.obj \ + $(INTDIR)/varsup.obj \ + $(INTDIR)/copy.obj \ + $(INTDIR)/rtproc.obj \ + $(INTDIR)/functions.obj \ + $(INTDIR)/nodeHashjoin.obj \ + $(INTDIR)/catalog.obj \ + $(INTDIR)/nbtinsert.obj \ + $(INTDIR)/rtree.obj \ + $(INTDIR)/version.obj \ + $(INTDIR)/async.obj \ + $(INTDIR)/nbtutils.obj \ + $(INTDIR)/vacuum.obj \ + $(INTDIR)/rtstrat.obj \ + $(INTDIR)/execFlatten.obj \ + $(INTDIR)/nodeTee.obj \ + $(INTDIR)/nodeIndexscan.obj \ + $(INTDIR)/remove.obj \ + $(INTDIR)/indexing.obj \ + $(INTDIR)/command.obj \ + $(INTDIR)/nbtsearch.obj \ + $(INTDIR)/heapam.obj \ + $(INTDIR)/nodeSort.obj \ + $(INTDIR)/execProcnode.obj \ + $(INTDIR)/nodeResult.obj \ + $(INTDIR)/index.obj \ + $(INTDIR)/xact.obj \ + $(INTDIR)/nodeMergejoin.obj \ + $(INTDIR)/pg_operator.obj \ + $(INTDIR)/execJunk.obj \ + $(INTDIR)/pg_aggregate.obj \ + $(INTDIR)/istrat.obj \ + $(INTDIR)/execUtils.obj \ + $(INTDIR)/purge.obj \ + $(INTDIR)/heap.obj \ + $(INTDIR)/nbtstrat.obj \ + $(INTDIR)/execAmi.obj \ + $(INTDIR)/execTuples.obj \ + $(INTDIR)/pg_type.obj \ + $(INTDIR)/view.obj \ + $(INTDIR)/nodeAppend.obj \ + $(INTDIR)/defind.obj \ + $(INTDIR)/nodeNestloop.obj \ + $(INTDIR)/nbtcompare.obj \ + $(INTDIR)/rtget.obj \ + $(INTDIR)/catalog_utils.obj \ + $(INTDIR)/setrefs.obj \ + $(INTDIR)/mergeutils.obj \ + $(INTDIR)/oset.obj \ + $(INTDIR)/arrayutils.obj \ + $(INTDIR)/nodeFuncs.obj \ + $(INTDIR)/rewriteSupport.obj \ + $(INTDIR)/bufpage.obj \ + $(INTDIR)/fd.obj \ + $(INTDIR)/clauseinfo.obj \ + $(INTDIR)/nabstime.obj \ + $(INTDIR)/mcxt.obj \ + $(INTDIR)/ipci.obj \ + $(INTDIR)/qsort.obj \ + $(INTDIR)/outfuncs.obj \ + $(INTDIR)/tqual.obj \ + $(INTDIR)/keys.obj \ + $(INTDIR)/clauses.obj \ + $(INTDIR)/print.obj \ + $(INTDIR)/postinit.obj \ + $(INTDIR)/oidchar16.obj \ + $(INTDIR)/name.obj \ + $(INTDIR)/tid.obj \ + $(INTDIR)/"be-fsstubs.obj" \ + $(INTDIR)/elog.obj \ + $(INTDIR)/bufmgr.obj \ + $(INTDIR)/portalbuf.obj \ + $(INTDIR)/psort.obj \ + $(INTDIR)/syscache.obj \ + $(INTDIR)/exc.obj \ + $(INTDIR)/selfuncs.obj \ + $(INTDIR)/var.obj \ + $(INTDIR)/oid.obj \ + $(INTDIR)/"be-pqexec.obj" \ + $(INTDIR)/ordering.obj \ + $(INTDIR)/inv_api.obj \ + $(INTDIR)/buf_table.obj \ + $(INTDIR)/acl.obj \ + $(INTDIR)/costsize.obj \ + $(INTDIR)/catcache.obj \ + $(INTDIR)/rewriteRemove.obj \ + $(INTDIR)/parse_query.obj \ + $(INTDIR)/excabort.obj \ + $(INTDIR)/lmgr.obj \ + $(INTDIR)/excid.obj \ + $(INTDIR)/int.obj \ + $(INTDIR)/auth.obj \ + $(INTDIR)/regexp.obj \ + $(INTDIR)/proc.obj \ + $(INTDIR)/dbcommands.obj \ + $(INTDIR)/dynahash.obj \ + $(INTDIR)/shmem.obj \ + $(INTDIR)/relnode.obj \ + $(INTDIR)/fstack.obj \ + $(INTDIR)/smgr.obj \ + $(INTDIR)/magic.obj \ + $(INTDIR)/relcache.obj \ + $(INTDIR)/varlena.obj \ + $(INTDIR)/allpaths.obj \ + $(INTDIR)/portalmem.obj \ + $(INTDIR)/bit.obj \ + $(INTDIR)/readfuncs.obj \ + $(INTDIR)/nodes.obj \ + $(INTDIR)/chunk.obj \ + $(INTDIR)/datum.obj \ + $(INTDIR)/analyze.obj \ + $(INTDIR)/oidint4.obj \ + $(INTDIR)/hasht.obj \ + $(INTDIR)/numutils.obj \ + $(INTDIR)/pqcomm.obj \ + $(INTDIR)/indxpath.obj \ + $(INTDIR)/lispsort.obj \ + $(INTDIR)/arrayfuncs.obj \ + $(INTDIR)/copyfuncs.obj \ + $(INTDIR)/planmain.obj \ + $(INTDIR)/makefuncs.obj \ + $(INTDIR)/lsyscache.obj \ + $(INTDIR)/multi.obj \ + $(INTDIR)/freelist.obj \ + $(INTDIR)/aclchk.obj \ + $(INTDIR)/initsplan.obj \ + $(INTDIR)/prune.obj \ + $(INTDIR)/sinvaladt.obj \ + $(INTDIR)/orindxpath.obj \ + $(INTDIR)/joinrels.obj \ + $(INTDIR)/rewriteManip.obj \ + $(INTDIR)/itemptr.obj \ + $(INTDIR)/s_lock.obj \ + $(INTDIR)/miscinit.obj \ + $(INTDIR)/postgres.obj \ + $(INTDIR)/parser.obj \ + $(INTDIR)/tlist.obj \ + $(INTDIR)/dt.obj \ + $(INTDIR)/sinval.obj \ + $(INTDIR)/pqpacket.obj \ + $(INTDIR)/assert.obj \ + $(INTDIR)/utility.obj \ + $(INTDIR)/bool.obj \ + $(INTDIR)/md.obj \ + $(INTDIR)/pqsignal.obj \ + $(INTDIR)/globals.obj \ + $(INTDIR)/postmaster.obj \ + $(INTDIR)/joinpath.obj \ + $(INTDIR)/fastpath.obj \ + $(INTDIR)/archive.obj \ + $(INTDIR)/fcache.obj \ + $(INTDIR)/mm.obj \ + $(INTDIR)/createplan.obj \ + $(INTDIR)/read.obj \ + $(INTDIR)/stringinfo.obj \ + $(INTDIR)/hashfn.obj \ + $(INTDIR)/regproc.obj \ + $(INTDIR)/main.obj \ + $(INTDIR)/enbl.obj \ + $(INTDIR)/prepunion.obj \ + $(INTDIR)/prepqual.obj \ + $(INTDIR)/planner.obj \ + $(INTDIR)/clausesel.obj \ + $(INTDIR)/portal.obj \ + $(INTDIR)/spin.obj \ + $(INTDIR)/lock.obj \ + $(INTDIR)/single.obj \ + $(INTDIR)/io.obj \ + $(INTDIR)/"geo-ops.obj" \ + $(INTDIR)/dest.obj \ + $(INTDIR)/rewriteDefine.obj \ + $(INTDIR)/keywords.obj \ + $(INTDIR)/hashutils.obj \ + $(INTDIR)/format.obj \ + $(INTDIR)/scanner.obj \ + $(INTDIR)/aset.obj \ + $(INTDIR)/"geo-selfuncs.obj" \ + $(INTDIR)/float.obj \ + $(INTDIR)/pquery.obj \ + $(INTDIR)/"be-dumpdata.obj" \ + $(INTDIR)/filename.obj \ + $(INTDIR)/misc.obj \ + $(INTDIR)/pathnode.obj \ + $(INTDIR)/inval.obj \ + $(INTDIR)/smgrtype.obj \ + $(INTDIR)/joininfo.obj \ + $(INTDIR)/lselect.obj \ + $(INTDIR)/rel.obj \ + $(INTDIR)/internal.obj \ + $(INTDIR)/preptlist.obj \ + $(INTDIR)/joinutils.obj \ + $(INTDIR)/shmqueue.obj \ + $(INTDIR)/date.obj \ + $(INTDIR)/locks.obj \ + $(INTDIR)/not_in.obj \ + $(INTDIR)/char.obj \ + $(INTDIR)/rewriteHandler.obj \ + $(INTDIR)/sets.obj \ + $(INTDIR)/palloc.obj \ + $(INTDIR)/indexnode.obj \ + $(INTDIR)/equalfuncs.obj \ + $(INTDIR)/oidint2.obj \ + $(INTDIR)/list.obj \ + $(INTDIR)/plancat.obj \ + $(INTDIR)/fmgr.obj \ + $(INTDIR)/fmgrtab.obj \ + $(INTDIR)/dllist.obj \ + $(INTDIR)/nodeGroup.obj \ + $(INTDIR)/localbuf.obj \ + $(INTDIR)/cluster.obj \ + $(INTDIR)/ipc.obj \ + $(INTDIR)/nt.obj \ + $(INTDIR)/getopt.obj \ + $(INTDIR)/bootscanner.obj \ + $(INTDIR)/scan.obj \ + $(INTDIR)/bootparse.obj \ + $(INTDIR)/gram.obj \ + $(INTDIR)/findbe.obj \ + $(INTDIR)/regerror.obj \ + $(INTDIR)/regfree.obj \ + $(INTDIR)/regcomp.obj \ + $(INTDIR)/regexec.obj \ + $(INTDIR)/nbtsort.obj \ + $(INTDIR)/buf_init.obj \ + $(INTDIR)/dfmgr.obj + +$(OUTDIR)/pglite.exe : $(OUTDIR) $(DEF_FILE) $(LINK32_OBJS) + $(LINK32) @<< + $(LINK32_FLAGS) $(LINK32_OBJS) +<< + +!ENDIF + +.c{$(CPP_OBJS)}.obj: + $(CPP) $(CPP_PROJ) $< + +.cpp{$(CPP_OBJS)}.obj: + $(CPP) $(CPP_PROJ) $< + +.cxx{$(CPP_OBJS)}.obj: + $(CPP) $(CPP_PROJ) $< + +################################################################################ +# Begin Group "Source Files" + +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\scankey.c + +$(INTDIR)/scankey.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\printtup.c + +$(INTDIR)/printtup.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\indexvalid.c + +$(INTDIR)/indexvalid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\heaptuple.c + +$(INTDIR)/heaptuple.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\tupdesc.c + +$(INTDIR)/tupdesc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\indextuple.c + +$(INTDIR)/indextuple.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\common\heapvalid.c + +$(INTDIR)/heapvalid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashinsert.c + +$(INTDIR)/hashinsert.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashstrat.c + +$(INTDIR)/hashstrat.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashutil.c + +$(INTDIR)/hashutil.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashpage.c + +$(INTDIR)/hashpage.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashsearch.c + +$(INTDIR)/hashsearch.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashscan.c + +$(INTDIR)/hashscan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashfunc.c + +$(INTDIR)/hashfunc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hash.c + +$(INTDIR)/hash.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\hash\hashovfl.c + +$(INTDIR)/hashovfl.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\bootstrap\bootstrap.c + +$(INTDIR)/bootstrap.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\index\genam.c + +$(INTDIR)/genam.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\creatinh.c + +$(INTDIR)/creatinh.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeSeqscan.c + +$(INTDIR)/nodeSeqscan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeUnique.c + +$(INTDIR)/nodeUnique.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\rename.c + +$(INTDIR)/rename.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\transam\transsup.c + +$(INTDIR)/transsup.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\transam\transam.c + +$(INTDIR)/transam.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\define.c + +$(INTDIR)/define.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execMain.c + +$(INTDIR)/execMain.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\transam\xid.c + +$(INTDIR)/xid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeAgg.c + +$(INTDIR)/nodeAgg.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtpage.c + +$(INTDIR)/nbtpage.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execScan.c + +$(INTDIR)/execScan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtree.c + +$(INTDIR)/nbtree.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\rtree\rtscan.c + +$(INTDIR)/rtscan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\index\indexam.c + +$(INTDIR)/indexam.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execQual.c + +$(INTDIR)/execQual.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeHash.c + +$(INTDIR)/nodeHash.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtscan.c + +$(INTDIR)/nbtscan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\heap\hio.c + +$(INTDIR)/hio.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\pg_proc.c + +$(INTDIR)/pg_proc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\heap\stats.c + +$(INTDIR)/stats.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeMaterial.c + +$(INTDIR)/nodeMaterial.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\transam\varsup.c + +$(INTDIR)/varsup.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\copy.c + +$(INTDIR)/copy.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\rtree\rtproc.c + +$(INTDIR)/rtproc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\functions.c + +$(INTDIR)/functions.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeHashjoin.c + +$(INTDIR)/nodeHashjoin.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\catalog.c + +$(INTDIR)/catalog.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtinsert.c + +$(INTDIR)/nbtinsert.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\rtree\rtree.c + +$(INTDIR)/rtree.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\version.c + +$(INTDIR)/version.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\async.c + +$(INTDIR)/async.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtutils.c + +$(INTDIR)/nbtutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\vacuum.c + +$(INTDIR)/vacuum.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\rtree\rtstrat.c + +$(INTDIR)/rtstrat.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execFlatten.c + +$(INTDIR)/execFlatten.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeTee.c + +$(INTDIR)/nodeTee.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeIndexscan.c + +$(INTDIR)/nodeIndexscan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\remove.c + +$(INTDIR)/remove.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\indexing.c + +$(INTDIR)/indexing.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\command.c + +$(INTDIR)/command.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtsearch.c + +$(INTDIR)/nbtsearch.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\heap\heapam.c + +$(INTDIR)/heapam.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeSort.c + +$(INTDIR)/nodeSort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execProcnode.c + +$(INTDIR)/execProcnode.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeResult.c + +$(INTDIR)/nodeResult.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\index.c + +$(INTDIR)/index.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\transam\xact.c + +$(INTDIR)/xact.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeMergejoin.c + +$(INTDIR)/nodeMergejoin.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\pg_operator.c + +$(INTDIR)/pg_operator.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execJunk.c + +$(INTDIR)/execJunk.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\pg_aggregate.c + +$(INTDIR)/pg_aggregate.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\index\istrat.c + +$(INTDIR)/istrat.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execUtils.c + +$(INTDIR)/execUtils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\purge.c + +$(INTDIR)/purge.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\heap.c + +$(INTDIR)/heap.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtstrat.c + +$(INTDIR)/nbtstrat.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execAmi.c + +$(INTDIR)/execAmi.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\execTuples.c + +$(INTDIR)/execTuples.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\catalog\pg_type.c + +$(INTDIR)/pg_type.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\view.c + +$(INTDIR)/view.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeAppend.c + +$(INTDIR)/nodeAppend.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\defind.c + +$(INTDIR)/defind.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeNestloop.c + +$(INTDIR)/nodeNestloop.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtcompare.c + +$(INTDIR)/nbtcompare.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\rtree\rtget.c + +$(INTDIR)/rtget.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\catalog_utils.c + +$(INTDIR)/catalog_utils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\plan\setrefs.c + +$(INTDIR)/setrefs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\mergeutils.c + +$(INTDIR)/mergeutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\mmgr\oset.c + +$(INTDIR)/oset.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\arrayutils.c + +$(INTDIR)/arrayutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\nodeFuncs.c + +$(INTDIR)/nodeFuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\rewriteSupport.c + +$(INTDIR)/rewriteSupport.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\page\bufpage.c + +$(INTDIR)/bufpage.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\file\fd.c + +$(INTDIR)/fd.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\clauseinfo.c + +$(INTDIR)/clauseinfo.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\nabstime.c + +$(INTDIR)/nabstime.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\mmgr\mcxt.c + +$(INTDIR)/mcxt.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\ipci.c + +$(INTDIR)/ipci.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\qsort.c + +$(INTDIR)/qsort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\outfuncs.c + +$(INTDIR)/outfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\time\tqual.c + +$(INTDIR)/tqual.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\keys.c + +$(INTDIR)/keys.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\clauses.c + +$(INTDIR)/clauses.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\print.c + +$(INTDIR)/print.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\postinit.c + +$(INTDIR)/postinit.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\oidchar16.c + +$(INTDIR)/oidchar16.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\name.c + +$(INTDIR)/name.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\tid.c + +$(INTDIR)/tid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE="G:\pglite\src\backend\libpq\be-fsstubs.c" + +$(INTDIR)/"be-fsstubs.obj" : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\elog.c + +$(INTDIR)/elog.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\buffer\bufmgr.c + +$(INTDIR)/bufmgr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\portalbuf.c + +$(INTDIR)/portalbuf.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\sort\psort.c + +$(INTDIR)/psort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\syscache.c + +$(INTDIR)/syscache.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\exc.c + +$(INTDIR)/exc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\selfuncs.c + +$(INTDIR)/selfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\var.c + +$(INTDIR)/var.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\oid.c + +$(INTDIR)/oid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE="G:\pglite\src\backend\libpq\be-pqexec.c" + +$(INTDIR)/"be-pqexec.obj" : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\ordering.c + +$(INTDIR)/ordering.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\large_object\inv_api.c + +$(INTDIR)/inv_api.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\buffer\buf_table.c + +$(INTDIR)/buf_table.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\acl.c + +$(INTDIR)/acl.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\costsize.c + +$(INTDIR)/costsize.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\catcache.c + +$(INTDIR)/catcache.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\rewriteRemove.c + +$(INTDIR)/rewriteRemove.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\parse_query.c + +$(INTDIR)/parse_query.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\excabort.c + +$(INTDIR)/excabort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\lmgr\lmgr.c + +$(INTDIR)/lmgr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\excid.c + +$(INTDIR)/excid.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\int.c + +$(INTDIR)/int.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\auth.c + +$(INTDIR)/auth.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\regexp.c + +$(INTDIR)/regexp.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\lmgr\proc.c + +$(INTDIR)/proc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\dbcommands.c + +$(INTDIR)/dbcommands.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\hash\dynahash.c + +$(INTDIR)/dynahash.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\shmem.c + +$(INTDIR)/shmem.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\relnode.c + +$(INTDIR)/relnode.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\fstack.c + +$(INTDIR)/fstack.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\smgr\smgr.c + +$(INTDIR)/smgr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\magic.c + +$(INTDIR)/magic.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\relcache.c + +$(INTDIR)/relcache.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\varlena.c + +$(INTDIR)/varlena.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\allpaths.c + +$(INTDIR)/allpaths.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\mmgr\portalmem.c + +$(INTDIR)/portalmem.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\bit.c + +$(INTDIR)/bit.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\readfuncs.c + +$(INTDIR)/readfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\nodes.c + +$(INTDIR)/nodes.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\chunk.c + +$(INTDIR)/chunk.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\datum.c + +$(INTDIR)/datum.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\analyze.c + +$(INTDIR)/analyze.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\oidint4.c + +$(INTDIR)/oidint4.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\hasht.c + +$(INTDIR)/hasht.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\numutils.c + +$(INTDIR)/numutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\pqcomm.c + +$(INTDIR)/pqcomm.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\indxpath.c + +$(INTDIR)/indxpath.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\lispsort.c + +$(INTDIR)/lispsort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\arrayfuncs.c + +$(INTDIR)/arrayfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\copyfuncs.c + +$(INTDIR)/copyfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\plan\planmain.c + +$(INTDIR)/planmain.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\makefuncs.c + +$(INTDIR)/makefuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\lsyscache.c + +$(INTDIR)/lsyscache.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\lmgr\multi.c + +$(INTDIR)/multi.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\buffer\freelist.c + +$(INTDIR)/freelist.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\aclchk.c + +$(INTDIR)/aclchk.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\plan\initsplan.c + +$(INTDIR)/initsplan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\prune.c + +$(INTDIR)/prune.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\sinvaladt.c + +$(INTDIR)/sinvaladt.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\orindxpath.c + +$(INTDIR)/orindxpath.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\joinrels.c + +$(INTDIR)/joinrels.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\rewriteManip.c + +$(INTDIR)/rewriteManip.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\page\itemptr.c + +$(INTDIR)/itemptr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\s_lock.c + +$(INTDIR)/s_lock.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\miscinit.c + +$(INTDIR)/miscinit.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\postgres.c + +$(INTDIR)/postgres.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\parser.c + +$(INTDIR)/parser.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\tlist.c + +$(INTDIR)/tlist.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\dt.c + +$(INTDIR)/dt.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\sinval.c + +$(INTDIR)/sinval.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\pqpacket.c + +$(INTDIR)/pqpacket.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\assert.c + +$(INTDIR)/assert.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\utility.c + +$(INTDIR)/utility.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\bool.c + +$(INTDIR)/bool.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\smgr\md.c + +$(INTDIR)/md.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\pqsignal.c + +$(INTDIR)/pqsignal.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\globals.c + +$(INTDIR)/globals.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\postmaster\postmaster.c + +$(INTDIR)/postmaster.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\joinpath.c + +$(INTDIR)/joinpath.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\fastpath.c + +$(INTDIR)/fastpath.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\prep\archive.c + +$(INTDIR)/archive.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\fcache.c + +$(INTDIR)/fcache.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\smgr\mm.c + +$(INTDIR)/mm.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\plan\createplan.c + +$(INTDIR)/createplan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\read.c + +$(INTDIR)/read.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\stringinfo.c + +$(INTDIR)/stringinfo.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\hash\hashfn.c + +$(INTDIR)/hashfn.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\regproc.c + +$(INTDIR)/regproc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\main\main.c + +$(INTDIR)/main.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\enbl.c + +$(INTDIR)/enbl.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\prep\prepunion.c + +$(INTDIR)/prepunion.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\prep\prepqual.c + +$(INTDIR)/prepqual.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\plan\planner.c + +$(INTDIR)/planner.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\clausesel.c + +$(INTDIR)/clausesel.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\libpq\portal.c + +$(INTDIR)/portal.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\spin.c + +$(INTDIR)/spin.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\lmgr\lock.c + +$(INTDIR)/lock.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\lmgr\single.c + +$(INTDIR)/single.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\io.c + +$(INTDIR)/io.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE="G:\pglite\src\backend\utils\adt\geo-ops.c" + +$(INTDIR)/"geo-ops.obj" : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\dest.c + +$(INTDIR)/dest.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\rewriteDefine.c + +$(INTDIR)/rewriteDefine.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\keywords.c + +$(INTDIR)/keywords.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\hashutils.c + +$(INTDIR)/hashutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\error\format.c + +$(INTDIR)/format.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\parser\scanner.c + +$(INTDIR)/scanner.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\mmgr\aset.c + +$(INTDIR)/aset.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE="G:\pglite\src\backend\utils\adt\geo-selfuncs.c" + +$(INTDIR)/"geo-selfuncs.obj" : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\float.c + +$(INTDIR)/float.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\tcop\pquery.c + +$(INTDIR)/pquery.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE="G:\pglite\src\backend\libpq\be-dumpdata.c" + +$(INTDIR)/"be-dumpdata.obj" : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\filename.c + +$(INTDIR)/filename.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\misc.c + +$(INTDIR)/misc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\pathnode.c + +$(INTDIR)/pathnode.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\inval.c + +$(INTDIR)/inval.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\smgr\smgrtype.c + +$(INTDIR)/smgrtype.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\joininfo.c + +$(INTDIR)/joininfo.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\sort\lselect.c + +$(INTDIR)/lselect.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\cache\rel.c + +$(INTDIR)/rel.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\internal.c + +$(INTDIR)/internal.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\prep\preptlist.c + +$(INTDIR)/preptlist.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\path\joinutils.c + +$(INTDIR)/joinutils.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\shmqueue.c + +$(INTDIR)/shmqueue.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\date.c + +$(INTDIR)/date.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\locks.c + +$(INTDIR)/locks.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\not_in.c + +$(INTDIR)/not_in.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\char.c + +$(INTDIR)/char.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\rewrite\rewriteHandler.c + +$(INTDIR)/rewriteHandler.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\sets.c + +$(INTDIR)/sets.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\mmgr\palloc.c + +$(INTDIR)/palloc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\indexnode.c + +$(INTDIR)/indexnode.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\equalfuncs.c + +$(INTDIR)/equalfuncs.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\adt\oidint2.c + +$(INTDIR)/oidint2.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\nodes\list.c + +$(INTDIR)/list.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\optimizer\util\plancat.c + +$(INTDIR)/plancat.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\fmgr\fmgr.c + +$(INTDIR)/fmgr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\obj\fmgrtab.c + +$(INTDIR)/fmgrtab.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\lib\dllist.c + +$(INTDIR)/dllist.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\executor\nodeGroup.c + +$(INTDIR)/nodeGroup.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\buffer\localbuf.c + +$(INTDIR)/localbuf.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\commands\cluster.c + +$(INTDIR)/cluster.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\ipc\ipc.c + +$(INTDIR)/ipc.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\nt.c + +$(INTDIR)/nt.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\getopt.c + +$(INTDIR)/getopt.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\obj\bootscanner.c + +$(INTDIR)/bootscanner.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\obj\scan.c + +$(INTDIR)/scan.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\obj\bootparse.c + +$(INTDIR)/bootparse.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\obj\gram.c + +$(INTDIR)/gram.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\init\findbe.c + +$(INTDIR)/findbe.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\regex\regerror.c + +$(INTDIR)/regerror.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\regex\regfree.c + +$(INTDIR)/regfree.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\regex\regcomp.c + +$(INTDIR)/regcomp.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\port\win32\regex\regexec.c + +$(INTDIR)/regexec.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\access\nbtree\nbtsort.c + +$(INTDIR)/nbtsort.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\storage\buffer\buf_init.c + +$(INTDIR)/buf_init.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +################################################################################ +# Begin Source File + +SOURCE=G:\pglite\src\backend\utils\fmgr\dfmgr.c + +$(INTDIR)/dfmgr.obj : $(SOURCE) $(INTDIR) + $(CPP) $(CPP_PROJ) $(SOURCE) + +# End Source File +# End Group +# End Project +################################################################################ diff --git a/src/backend/port/win32/port-protos.h b/src/backend/port/win32/port-protos.h new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/src/backend/port/win32/port-protos.h @@ -0,0 +1 @@ + diff --git a/src/backend/port/win32/pwd.h b/src/backend/port/win32/pwd.h new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/src/backend/port/win32/pwd.h @@ -0,0 +1 @@ + diff --git a/src/backend/port/win32/regex/COPYRIGHT b/src/backend/port/win32/regex/COPYRIGHT new file mode 100644 index 00000000000..574f6bcec6c --- /dev/null +++ b/src/backend/port/win32/regex/COPYRIGHT @@ -0,0 +1,56 @@ +Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved. +This software is not subject to any license of the American Telephone +and Telegraph Company or of the Regents of the University of California. + +Permission is granted to anyone to use this software for any purpose on +any computer system, and to alter it and redistribute it, subject +to the following restrictions: + +1. The author is not responsible for the consequences of use of this + software, no matter how awful, even if they arise from flaws in it. + +2. The origin of this software must not be misrepresented, either by + explicit claim or by omission. Since few users ever read sources, + credits must appear in the documentation. + +3. Altered versions must be plainly marked as such, and must not be + misrepresented as being the original software. Since few users + ever read sources, credits must appear in the documentation. + +4. This notice may not be removed or altered. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= +/*- + * Copyright (c) 1994 + * The Regents of the University of California. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)COPYRIGHT 8.1 (Berkeley) 3/16/94 + */ diff --git a/src/backend/port/win32/regex/Makefile.inc b/src/backend/port/win32/regex/Makefile.inc new file mode 100644 index 00000000000..f9853f31ce2 --- /dev/null +++ b/src/backend/port/win32/regex/Makefile.inc @@ -0,0 +1,14 @@ +# @(#)Makefile.inc 8.1 (Berkeley) 6/4/93 + +# regex sources +.PATH: ${.CURDIR}/regex + +CFLAGS+=-DPOSIX_MISTAKE + +SRCS+= regcomp.c regerror.c regexec.c regfree.c + +MAN3+= regex.0 +MAN7+= re_format.0 + +MLINKS+=regex.3 regcomp.3 regex.3 regexec.3 regex.3 regerror.3 +MLINKS+=regexec.3 regfree.3 diff --git a/src/backend/port/win32/regex/WHATSNEW b/src/backend/port/win32/regex/WHATSNEW new file mode 100644 index 00000000000..f4301d300dd --- /dev/null +++ b/src/backend/port/win32/regex/WHATSNEW @@ -0,0 +1,94 @@ +# @(#)WHATSNEW 8.3 (Berkeley) 3/18/94 + +New in alpha3.4: The complex bug alluded to below has been fixed (in a +slightly kludgey temporary way that may hurt efficiency a bit; this is +another "get it out the door for 4.4" release). The tests at the end of +the tests file have accordingly been uncommented. The primary sign of +the bug was that something like a?b matching ab matched b rather than ab. +(The bug was essentially specific to this exact situation, else it would +have shown up earlier.) + +New in alpha3.3: The definition of word boundaries has been altered +slightly, to more closely match the usual programming notion that "_" +is an alphabetic. Stuff used for pre-ANSI systems is now in a subdir, +and the makefile no longer alludes to it in mysterious ways. The +makefile has generally been cleaned up some. Fixes have been made +(again!) so that the regression test will run without -DREDEBUG, at +the cost of weaker checking. A workaround for a bug in some folks' + has been added. And some more things have been added to +tests, including a couple right at the end which are commented out +because the code currently flunks them (complex bug; fix coming). +Plus the usual minor cleanup. + +New in alpha3.2: Assorted bits of cleanup and portability improvement +(the development base is now a BSDI system using GCC instead of an ancient +Sun system, and the newer compiler exposed some glitches). Fix for a +serious bug that affected REs using many [] (including REG_ICASE REs +because of the way they are implemented), *sometimes*, depending on +memory-allocation patterns. The header-file prototypes no longer name +the parameters, avoiding possible name conflicts. The possibility that +some clot has defined CHAR_MIN as (say) `-128' instead of `(-128)' is +now handled gracefully. "uchar" is no longer used as an internal type +name (too many people have the same idea). Still the same old lousy +performance, alas. + +New in alpha3.1: Basically nothing, this release is just a bookkeeping +convenience. Stay tuned. + +New in alpha3.0: Performance is no better, alas, but some fixes have been +made and some functionality has been added. (This is basically the "get +it out the door in time for 4.4" release.) One bug fix: regfree() didn't +free the main internal structure (how embarrassing). It is now possible +to put NULs in either the RE or the target string, using (resp.) a new +REG_PEND flag and the old REG_STARTEND flag. The REG_NOSPEC flag to +regcomp() makes all characters ordinary, so you can match a literal +string easily (this will become more useful when performance improves!). +There are now primitives to match beginnings and ends of words, although +the syntax is disgusting and so is the implementation. The REG_ATOI +debugging interface has changed a bit. And there has been considerable +internal cleanup of various kinds. + +New in alpha2.3: Split change list out of README, and moved flags notes +into Makefile. Macro-ized the name of regex(7) in regex(3), since it has +to change for 4.4BSD. Cleanup work in engine.c, and some new regression +tests to catch tricky cases thereof. + +New in alpha2.2: Out-of-date manpages updated. Regerror() acquires two +small extensions -- REG_ITOA and REG_ATOI -- which avoid debugging kludges +in my own test program and might be useful to others for similar purposes. +The regression test will now compile (and run) without REDEBUG. The +BRE \$ bug is fixed. Most uses of "uchar" are gone; it's all chars now. +Char/uchar parameters are now written int/unsigned, to avoid possible +portability problems with unpromoted parameters. Some unsigned casts have +been introduced to minimize portability problems with shifting into sign +bits. + +New in alpha2.1: Lots of little stuff, cleanup and fixes. The one big +thing is that regex.h is now generated, using mkh, rather than being +supplied in the distribution; due to circularities in dependencies, +you have to build regex.h explicitly by "make h". The two known bugs +have been fixed (and the regression test now checks for them), as has a +problem with assertions not being suppressed in the absence of REDEBUG. +No performance work yet. + +New in alpha2: Backslash-anything is an ordinary character, not an +error (except, of course, for the handful of backslashed metacharacters +in BREs), which should reduce script breakage. The regression test +checks *where* null strings are supposed to match, and has generally +been tightened up somewhat. Small bug fixes in parameter passing (not +harmful, but technically errors) and some other areas. Debugging +invoked by defining REDEBUG rather than not defining NDEBUG. + +New in alpha+3: full prototyping for internal routines, using a little +helper program, mkh, which extracts prototypes given in stylized comments. +More minor cleanup. Buglet fix: it's CHAR_BIT, not CHAR_BITS. Simple +pre-screening of input when a literal string is known to be part of the +RE; this does wonders for performance. + +New in alpha+2: minor bits of cleanup. Notably, the number "32" for the +word width isn't hardwired into regexec.c any more, the public header +file prototypes the functions if __STDC__ is defined, and some small typos +in the manpages have been fixed. + +New in alpha+1: improvements to the manual pages, and an important +extension, the REG_STARTEND option to regexec(). diff --git a/src/backend/port/win32/regex/cclass.h b/src/backend/port/win32/regex/cclass.h new file mode 100644 index 00000000000..a29a92ee9c4 --- /dev/null +++ b/src/backend/port/win32/regex/cclass.h @@ -0,0 +1,70 @@ +/*- + * Copyright (c) 1992, 1993, 1994 Henry Spencer. + * Copyright (c) 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Henry Spencer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cclass.h 8.3 (Berkeley) 3/20/94 + */ + +/* character-class table */ +static struct cclass { + char *name; + char *chars; + char *multis; +} cclasses[] = { + "alnum", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ +0123456789", "", + "alpha", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", + "", + "blank", " \t", "", + "cntrl", "\007\b\t\n\v\f\r\1\2\3\4\5\6\16\17\20\21\22\23\24\ +\25\26\27\30\31\32\33\34\35\36\37\177", "", + "digit", "0123456789", "", + "graph", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ +0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + "", + "lower", "abcdefghijklmnopqrstuvwxyz", + "", + "print", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz\ +0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ ", + "", + "punct", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~", + "", + "space", "\t\n\v\f\r ", "", + "upper", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", + "", + "xdigit", "0123456789ABCDEFabcdef", + "", + NULL, 0, "" +}; diff --git a/src/backend/port/win32/regex/cname.h b/src/backend/port/win32/regex/cname.h new file mode 100644 index 00000000000..c1632ebb1f5 --- /dev/null +++ b/src/backend/port/win32/regex/cname.h @@ -0,0 +1,141 @@ +/*- + * Copyright (c) 1992, 1993, 1994 Henry Spencer. + * Copyright (c) 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Henry Spencer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)cname.h 8.3 (Berkeley) 3/20/94 + */ + +/* character-name table */ +static struct cname { + char *name; + char code; +} cnames[] = { + "NUL", '\0', + "SOH", '\001', + "STX", '\002', + "ETX", '\003', + "EOT", '\004', + "ENQ", '\005', + "ACK", '\006', + "BEL", '\007', + "alert", '\007', + "BS", '\010', + "backspace", '\b', + "HT", '\011', + "tab", '\t', + "LF", '\012', + "newline", '\n', + "VT", '\013', + "vertical-tab", '\v', + "FF", '\014', + "form-feed", '\f', + "CR", '\015', + "carriage-return", '\r', + "SO", '\016', + "SI", '\017', + "DLE", '\020', + "DC1", '\021', + "DC2", '\022', + "DC3", '\023', + "DC4", '\024', + "NAK", '\025', + "SYN", '\026', + "ETB", '\027', + "CAN", '\030', + "EM", '\031', + "SUB", '\032', + "ESC", '\033', + "IS4", '\034', + "FS", '\034', + "IS3", '\035', + "GS", '\035', + "IS2", '\036', + "RS", '\036', + "IS1", '\037', + "US", '\037', + "space", ' ', + "exclamation-mark", '!', + "quotation-mark", '"', + "number-sign", '#', + "dollar-sign", '$', + "percent-sign", '%', + "ampersand", '&', + "apostrophe", '\'', + "left-parenthesis", '(', + "right-parenthesis", ')', + "asterisk", '*', + "plus-sign", '+', + "comma", ',', + "hyphen", '-', + "hyphen-minus", '-', + "period", '.', + "full-stop", '.', + "slash", '/', + "solidus", '/', + "zero", '0', + "one", '1', + "two", '2', + "three", '3', + "four", '4', + "five", '5', + "six", '6', + "seven", '7', + "eight", '8', + "nine", '9', + "colon", ':', + "semicolon", ';', + "less-than-sign", '<', + "equals-sign", '=', + "greater-than-sign", '>', + "question-mark", '?', + "commercial-at", '@', + "left-square-bracket", '[', + "backslash", '\\', + "reverse-solidus", '\\', + "right-square-bracket", ']', + "circumflex", '^', + "circumflex-accent", '^', + "underscore", '_', + "low-line", '_', + "grave-accent", '`', + "left-brace", '{', + "left-curly-bracket", '{', + "vertical-line", '|', + "right-brace", '}', + "right-curly-bracket", '}', + "tilde", '~', + "DEL", '\177', + NULL, 0, +}; diff --git a/src/backend/port/win32/regex/engine.c b/src/backend/port/win32/regex/engine.c new file mode 100644 index 00000000000..02c841afa46 --- /dev/null +++ b/src/backend/port/win32/regex/engine.c @@ -0,0 +1,1091 @@ +/*- + * Copyright (c) 1992, 1993, 1994 Henry Spencer. + * Copyright (c) 1992, 1993, 1994 + * The Regents of the University of California. All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * Henry Spencer. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * @(#)engine.c 8.5 (Berkeley) 3/20/94 + */ + +/* + * The matching engine and friends. This file is #included by regexec.c + * after suitable #defines of a variety of macros used herein, so that + * different state representations can be used without duplicating masses + * of code. + */ + +#ifdef SNAMES +#define matcher smatcher +#define fast sfast +#define slow sslow +#define dissect sdissect +#define backref sbackref +#define step sstep +#define print sprint +#define at sat +#define match smat +#endif +#ifdef LNAMES +#define matcher lmatcher +#define fast lfast +#define slow lslow +#define dissect ldissect +#define backref lbackref +#define step lstep +#define print lprint +#define at lat +#define match lmat +#endif + +/* another structure passed up and down to avoid zillions of parameters */ +struct match { + struct re_guts *g; + int eflags; + regmatch_t *pmatch; /* [nsub+1] (0 element unused) */ + char *offp; /* offsets work from here */ + char *beginp; /* start of string -- virtual NUL precedes */ + char *endp; /* end of string -- virtual NUL here */ + char *coldp; /* can be no match starting before here */ + char **lastpos; /* [nplus+1] */ + STATEVARS; + states st; /* current states */ + states fresh; /* states for a fresh start */ + states tmp; /* temporary */ + states empty; /* empty set of states */ +}; + +/* ========= begin header generated by ./mkh ========= */ +#ifdef __cplusplus +extern "C" { +#endif + +/* === engine.c === */ +static int matcher __P((struct re_guts *g, char *string, size_t nmatch, regmatch_t pmatch[], int eflags)); +static char *dissect __P((struct match *m, char *start, char *stop, sopno startst, sopno stopst)); +static char *backref __P((struct match *m, char *start, char *stop, sopno startst, sopno stopst, sopno lev)); +static char *fast __P((struct match *m, char *start, char *stop, sopno startst, sopno stopst)); +static char *slow __P((struct match *m, char *start, char *stop, sopno startst, sopno stopst)); +static states step __P((struct re_guts *g, sopno start, sopno stop, states bef, int ch, states aft)); +#define BOL (OUT+1) +#define EOL (BOL+1) +#define BOLEOL (BOL+2) +#define NOTHING (BOL+3) +#define BOW (BOL+4) +#define EOW (BOL+5) +#define CODEMAX (BOL+5) /* highest code used */ +#define NONCHAR(c) ((c) > CHAR_MAX) +#define NNONCHAR (CODEMAX-CHAR_MAX) +#ifdef REDEBUG +static void print __P((struct match *m, char *caption, states st, int ch, FILE *d)); +#endif +#ifdef REDEBUG +static void at __P((struct match *m, char *title, char *start, char *stop, sopno startst, sopno stopst)); +#endif +#ifdef REDEBUG +static char *pchar __P((int ch)); +#endif + +#ifdef __cplusplus +} +#endif +/* ========= end header generated by ./mkh ========= */ + +#ifdef REDEBUG +#define SP(t, s, c) print(m, t, s, c, stdout) +#define AT(t, p1, p2, s1, s2) at(m, t, p1, p2, s1, s2) +#define NOTE(str) { if (m->eflags®_TRACE) printf("=%s\n", (str)); } +#else +#define SP(t, s, c) /* nothing */ +#define AT(t, p1, p2, s1, s2) /* nothing */ +#define NOTE(s) /* nothing */ +#endif + +/* + - matcher - the actual matching engine + == static int matcher(register struct re_guts *g, char *string, \ + == size_t nmatch, regmatch_t pmatch[], int eflags); + */ +static int /* 0 success, REG_NOMATCH failure */ +matcher(g, string, nmatch, pmatch, eflags) +register struct re_guts *g; +char *string; +size_t nmatch; +regmatch_t pmatch[]; +int eflags; +{ + register char *endp; + register int i; + struct match mv; + register struct match *m = &mv; + register char *dp; + const register sopno gf = g->firststate+1; /* +1 for OEND */ + const register sopno gl = g->laststate; + char *start; + char *stop; + + /* simplify the situation where possible */ + if (g->cflags®_NOSUB) + nmatch = 0; + if (eflags®_STARTEND) { + start = string + pmatch[0].rm_so; + stop = string + pmatch[0].rm_eo; + } else { + start = string; + stop = start + strlen(start); + } + if (stop < start) + return(REG_INVARG); + + /* prescreening; this does wonders for this rather slow code */ + if (g->must != NULL) { + for (dp = start; dp < stop; dp++) + if (*dp == g->must[0] && stop - dp >= g->mlen && + memcmp(dp, g->must, (size_t)g->mlen) == 0) + break; + if (dp == stop) /* we didn't find g->must */ + return(REG_NOMATCH); + } + + /* match struct setup */ + m->g = g; + m->eflags = eflags; + m->pmatch = NULL; + m->lastpos = NULL; + m->offp = string; + m->beginp = start; + m->endp = stop; + STATESETUP(m, 4); + SETUP(m->st); + SETUP(m->fresh); + SETUP(m->tmp); + SETUP(m->empty); + CLEAR(m->empty); + + /* this loop does only one repetition except for backrefs */ + for (;;) { + endp = fast(m, start, stop, gf, gl); + if (endp == NULL) { /* a miss */ + STATETEARDOWN(m); + return(REG_NOMATCH); + } + if (nmatch == 0 && !g->backrefs) + break; /* no further info needed */ + + /* where? */ + assert(m->coldp != NULL); + for (;;) { + NOTE("finding start"); + endp = slow(m, m->coldp, stop, gf, gl); + if (endp != NULL) + break; + assert(m->coldp < m->endp); + m->coldp++; + } + if (nmatch == 1 && !g->backrefs) + break; /* no further info needed */ + + /* oh my, he wants the subexpressions... */ + if (m->pmatch == NULL) + m->pmatch = (regmatch_t *)malloc((m->g->nsub + 1) * + sizeof(regmatch_t)); + if (m->pmatch == NULL) { + STATETEARDOWN(m); + return(REG_ESPACE); + } + for (i = 1; i <= m->g->nsub; i++) + m->pmatch[i].rm_so = m->pmatch[i].rm_eo = -1; + if (!g->backrefs && !(m->eflags®_BACKR)) { + NOTE("dissecting"); + dp = dissect(m, m->coldp, endp, gf, gl); + } else { + if (g->n