1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
/*
* fe_memutils.h
* memory management support for frontend code
*
* Copyright (c) 2003-2017, PostgreSQL Global Development Group
*
* src/include/common/fe_memutils.h
*/
#ifndef FE_MEMUTILS_H
#define FE_MEMUTILS_H
typedef size_t Size;
/*
* Flags for pg_malloc_extended and palloc_extended, deliberately named
* the same as the backend flags.
*/
#define MCXT_ALLOC_HUGE 0x01 /* allow huge allocation (> 1 GB) not
* actually used for frontends */
#define MCXT_ALLOC_NO_OOM 0x02 /* no failure if out-of-memory */
#define MCXT_ALLOC_ZERO 0x04 /* zero allocated memory */
/*
* "Safe" memory allocation functions --- these exit(1) on failure
* (except pg_malloc_extended with MCXT_ALLOC_NO_OOM)
*/
extern char *pg_strdup(const char *in);
extern void *pg_malloc(size_t size);
extern void *pg_malloc0(size_t size);
extern void *pg_malloc_extended(size_t size, int flags);
extern void *pg_realloc(void *pointer, size_t size);
extern void pg_free(void *pointer);
/* Equivalent functions, deliberately named the same as backend functions */
extern char *pstrdup(const char *in);
extern void *palloc(Size size);
extern void *palloc0(Size size);
extern void *palloc_extended(Size size, int flags);
extern void *repalloc(void *pointer, Size size);
extern void pfree(void *pointer);
#ifdef NOT_USED
/* sprintf into a palloc'd buffer --- these are in psprintf.c */
extern char *psprintf(const char *fmt,...) pg_attribute_printf(1, 2);
extern size_t pvsnprintf(char *buf, size_t len, const char *fmt, va_list args) pg_attribute_printf(3, 0);
#endif
#define MEMSET_LOOP_LIMIT 1024
/*
* MemSet
* Exactly the same as standard library function memset(), but considerably
* faster for zeroing small word-aligned structures (such as parsetree nodes).
* This has to be a macro because the main point is to avoid function-call
* overhead. However, we have also found that the loop is faster than
* native libc memset() on some platforms, even those with assembler
* memset() functions. More research needs to be done, perhaps with
* MEMSET_LOOP_LIMIT tests in configure.
*/
#define MemSet(start, val, len) \
do \
{ \
/* must be void* because we don't know if it is integer aligned yet */ \
void *_vstart = (void *) (start); \
int _val = (val); \
Size _len = (len); \
\
if ((((uintptr_t) _vstart) & LONG_ALIGN_MASK) == 0 && \
(_len & LONG_ALIGN_MASK) == 0 && \
_val == 0 && \
_len <= MEMSET_LOOP_LIMIT && \
/* \
* If MEMSET_LOOP_LIMIT == 0, optimizer should find \
* the whole "if" false at compile time. \
*/ \
MEMSET_LOOP_LIMIT != 0) \
{ \
long *_start = (long *) _vstart; \
long *_stop = (long *) ((char *) _start + _len); \
while (_start < _stop) \
*_start++ = 0; \
} \
else \
memset(_vstart, _val, _len); \
} while (0)
/* originally gettext staff */
#define _(x) x
/* Get a bit mask of the bits set in non-long aligned addresses */
#define LONG_ALIGN_MASK (sizeof(long) - 1)
#endif /* FE_MEMUTILS_H */
|