summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/auth/auth-scram.c10
-rw-r--r--src/auth/pool_auth.c163
-rw-r--r--src/auth/pool_hba.c75
-rw-r--r--src/auth/pool_passwd.c42
-rw-r--r--src/config/pool_config_variables.c177
-rw-r--r--src/context/pool_process_context.c8
-rw-r--r--src/context/pool_query_context.c285
-rw-r--r--src/context/pool_session_context.c76
-rw-r--r--src/include/auth/pool_auth.h10
-rw-r--r--src/include/auth/pool_hba.h5
-rw-r--r--src/include/auth/pool_passwd.h15
-rw-r--r--src/include/auth/scram-common.h4
-rw-r--r--src/include/auth/scram.h10
-rw-r--r--src/include/context/pool_process_context.h20
-rw-r--r--src/include/context/pool_query_context.h52
-rw-r--r--src/include/context/pool_session_context.h123
-rw-r--r--src/include/main/health_check.h46
-rw-r--r--src/include/main/pgpool_logger.h2
-rw-r--r--src/include/main/pool_internal_comms.h2
-rw-r--r--src/include/parser/explain.h46
-rw-r--r--src/include/parser/extensible.h8
-rw-r--r--src/include/parser/gramparse.h6
-rw-r--r--src/include/parser/nodes.h14
-rw-r--r--src/include/parser/parsenodes.h12
-rw-r--r--src/include/parser/parser.h2
-rw-r--r--src/include/parser/pg_class.h4
-rw-r--r--src/include/parser/pg_list.h8
-rw-r--r--src/include/parser/pg_trigger.h2
-rw-r--r--src/include/parser/pg_wchar.h16
-rw-r--r--src/include/parser/pool_parser.h2
-rw-r--r--src/include/parser/scanner.h4
-rw-r--r--src/include/pcp/libpcp_ext.h110
-rw-r--r--src/include/pcp/pcp.h3
-rw-r--r--src/include/pcp/pcp_stream.h12
-rw-r--r--src/include/pcp/pcp_worker.h2
-rw-r--r--src/include/pcp/recovery.h6
-rw-r--r--src/include/pool.h136
-rw-r--r--src/include/pool_config.h250
-rw-r--r--src/include/pool_config_variables.h38
-rw-r--r--src/include/pool_type.h6
-rw-r--r--src/include/protocol/pool_connection_pool.h12
-rw-r--r--src/include/protocol/pool_pg_utils.h31
-rw-r--r--src/include/protocol/pool_process_query.h82
-rw-r--r--src/include/protocol/pool_proto_modules.h182
-rw-r--r--src/include/query_cache/pool_memqcache.h68
-rw-r--r--src/include/rewrite/pool_lobj.h2
-rw-r--r--src/include/rewrite/pool_timestamp.h4
-rw-r--r--src/include/utils/elog.h54
-rw-r--r--src/include/utils/fe_ports.h14
-rw-r--r--src/include/utils/getopt_long.h6
-rw-r--r--src/include/utils/json.h22
-rw-r--r--src/include/utils/json_writer.h48
-rw-r--r--src/include/utils/memutils.h24
-rw-r--r--src/include/utils/palloc.h4
-rw-r--r--src/include/utils/pool_ip.h24
-rw-r--r--src/include/utils/pool_params.h16
-rw-r--r--src/include/utils/pool_process_reporting.h38
-rw-r--r--src/include/utils/pool_relcache.h18
-rw-r--r--src/include/utils/pool_select_walker.h6
-rw-r--r--src/include/utils/pool_ssl.h14
-rw-r--r--src/include/utils/pool_stream.h40
-rw-r--r--src/include/utils/ps_status.h9
-rw-r--r--src/include/utils/regex_array.h14
-rw-r--r--src/include/utils/sha2.h8
-rw-r--r--src/include/utils/ssl_utils.h8
-rw-r--r--src/include/utils/statistics.h30
-rw-r--r--src/include/utils/timestamp.h2
-rw-r--r--src/include/version.h1
-rw-r--r--src/include/watchdog/watchdog.h63
-rw-r--r--src/include/watchdog/wd_commands.h10
-rw-r--r--src/include/watchdog/wd_internal_commands.h6
-rw-r--r--src/include/watchdog/wd_ipc_conn.h8
-rw-r--r--src/include/watchdog/wd_ipc_defines.h4
-rw-r--r--src/include/watchdog/wd_json_data.h33
-rw-r--r--src/include/watchdog/wd_lifecheck.h14
-rw-r--r--src/include/watchdog/wd_utils.h8
-rw-r--r--src/libs/pcp/pcp.c97
-rw-r--r--src/main/health_check.c73
-rw-r--r--src/main/main.c11
-rw-r--r--src/main/pgpool_logger.c43
-rw-r--r--src/main/pgpool_main.c659
-rw-r--r--src/main/pool_globals.c41
-rw-r--r--src/main/pool_internal_comms.c5
-rw-r--r--src/parser/copyfuncs.c236
-rw-r--r--src/parser/keywords.c2
-rw-r--r--src/parser/list.c28
-rw-r--r--src/parser/makefuncs.c46
-rw-r--r--src/parser/outfuncs.c88
-rw-r--r--src/parser/parser.c66
-rw-r--r--src/parser/snprintf.c58
-rw-r--r--src/parser/wchar.c648
-rw-r--r--src/pcp_con/pcp_child.c35
-rw-r--r--src/pcp_con/pcp_worker.c164
-rw-r--r--src/pcp_con/recovery.c26
-rw-r--r--src/protocol/CommandComplete.c94
-rw-r--r--src/protocol/child.c161
-rw-r--r--src/protocol/pool_connection_pool.c32
-rw-r--r--src/protocol/pool_pg_utils.c94
-rw-r--r--src/protocol/pool_process_query.c318
-rw-r--r--src/protocol/pool_proto2.c40
-rw-r--r--src/protocol/pool_proto_modules.c451
-rw-r--r--src/query_cache/pool_memqcache.c286
-rw-r--r--src/rewrite/pool_lobj.c6
-rw-r--r--src/rewrite/pool_timestamp.c81
-rw-r--r--src/sql/pgpool-recovery/pgpool-recovery.c15
-rw-r--r--src/sql/pgpool_adm/pgpool_adm.c8
-rw-r--r--src/streaming_replication/pool_worker_child.c85
-rw-r--r--src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c18
-rw-r--r--src/tools/fe_port.c3
-rw-r--r--src/tools/pcp/pcp_frontend_client.c65
-rw-r--r--src/tools/pgenc/pg_enc.c60
-rw-r--r--src/tools/pgmd5/pg_md5.c26
-rw-r--r--src/tools/pgproto/extended_query.c8
-rw-r--r--src/tools/pgproto/main.c2
-rw-r--r--src/tools/pgproto/read.c13
-rw-r--r--src/tools/pgproto/send.c9
-rw-r--r--src/tools/watchdog/wd_cli.c192
-rw-r--r--src/utils/error/elog.c13
-rw-r--r--src/utils/json.c26
-rw-r--r--src/utils/json_writer.c55
-rw-r--r--src/utils/mmgr/aset.c2
-rw-r--r--src/utils/mmgr/mcxt.c4
-rw-r--r--src/utils/pcp/pcp_stream.c20
-rw-r--r--src/utils/pool_health_check_stats.c14
-rw-r--r--src/utils/pool_ip.c30
-rw-r--r--src/utils/pool_params.c12
-rw-r--r--src/utils/pool_path.c4
-rw-r--r--src/utils/pool_process_reporting.c198
-rw-r--r--src/utils/pool_relcache.c104
-rw-r--r--src/utils/pool_select_walker.c81
-rw-r--r--src/utils/pool_shmem.c19
-rw-r--r--src/utils/pool_ssl.c102
-rw-r--r--src/utils/pool_stream.c54
-rw-r--r--src/utils/pqexpbuffer.c1
-rw-r--r--src/utils/ps_status.c5
-rw-r--r--src/utils/regex_array.c8
-rw-r--r--src/utils/ssl_utils.c8
-rw-r--r--src/utils/statistics.c34
-rw-r--r--src/watchdog/watchdog.c1135
-rw-r--r--src/watchdog/wd_commands.c42
-rw-r--r--src/watchdog/wd_heartbeat.c34
-rw-r--r--src/watchdog/wd_if.c15
-rw-r--r--src/watchdog/wd_internal_commands.c47
-rw-r--r--src/watchdog/wd_ipc_conn.c14
-rw-r--r--src/watchdog/wd_json_data.c55
-rw-r--r--src/watchdog/wd_lifecheck.c41
-rw-r--r--src/watchdog/wd_ping.c12
-rw-r--r--src/watchdog/wd_utils.c5
148 files changed, 4880 insertions, 4446 deletions
diff --git a/src/auth/auth-scram.c b/src/auth/auth-scram.c
index 3a0aa71c8..334e28f33 100644
--- a/src/auth/auth-scram.c
+++ b/src/auth/auth-scram.c
@@ -212,8 +212,8 @@ static char *build_client_first_message(fe_scram_state *state);
static char *build_client_final_message(fe_scram_state *state);
static bool verify_server_signature(fe_scram_state *state);
static void calculate_client_proof(fe_scram_state *state,
- const char *client_final_message_without_proof,
- uint8 *result);
+ const char *client_final_message_without_proof,
+ uint8 *result);
static void read_client_first_message(scram_state *state, char *input);
static void read_client_final_message(scram_state *state, char *input);
@@ -222,9 +222,9 @@ static char *build_server_final_message(scram_state *state);
static bool verify_client_proof(scram_state *state);
static bool verify_final_nonce(scram_state *state);
static bool parse_scram_verifier(const char *verifier, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static void mock_scram_verifier(const char *username, int *iterations,
- char **salt, uint8 *stored_key, uint8 *server_key);
+ char **salt, uint8 *stored_key, uint8 *server_key);
static bool is_scram_printable(char *p);
static char *sanitize_char(char c);
static char *GetMockAuthenticationNonce(void);
@@ -1138,7 +1138,7 @@ build_server_final_message(scram_state *state)
char *server_signature_base64;
int siglen;
scram_HMAC_ctx ctx;
- char *res;
+ char *res;
/* calculate ServerSignature */
scram_HMAC_init(&ctx, state->ServerKey, SCRAM_KEY_LEN);
diff --git a/src/auth/pool_auth.c b/src/auth/pool_auth.c
index 198d8c99e..8c17db4a7 100644
--- a/src/auth/pool_auth.c
+++ b/src/auth/pool_auth.c
@@ -58,36 +58,36 @@
#define MAX_SASL_PAYLOAD_LEN 1024
-static void pool_send_backend_key_data(POOL_CONNECTION * frontend, int pid,
+static void pool_send_backend_key_data(POOL_CONNECTION *frontend, int pid,
char *key, int32 keylen, int protoMajor);
-static int do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor);
-static void pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
-static int do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor,
- char *storedPassword, PasswordType passwordType);
-static void send_md5auth_request(POOL_CONNECTION * frontend, int protoMajor, char *salt);
-static int read_password_packet(POOL_CONNECTION * frontend, int protoMajor, char *password, int *pwdSize);
-static int send_password_packet(POOL_CONNECTION * backend, int protoMajor, char *password);
-static int send_auth_ok(POOL_CONNECTION * frontend, int protoMajor);
-static void sendAuthRequest(POOL_CONNECTION * frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen);
-
-static int pg_SASL_continue(POOL_CONNECTION * backend, char *payload, int payloadlen, void *sasl_state, bool final);
-static void *pg_SASL_init(POOL_CONNECTION * backend, char *payload, int payloadlen, char *username, char *storedPassword);
-static bool do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor, int message_length,
- char *username, char *storedPassword, PasswordType passwordType);
-static void authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor);
-static void authenticate_frontend_cert(POOL_CONNECTION * frontend);
-static void authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth);
-static void authenticate_frontend_clear_text(POOL_CONNECTION * frontend);
-static bool get_auth_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth,
- char **password, PasswordType *passwordType);
-static void ProcessNegotiateProtocol(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
+static int do_clear_text_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor);
+static void pool_send_auth_fail(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
+static int do_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor,
+ char *storedPassword, PasswordType passwordType);
+static void send_md5auth_request(POOL_CONNECTION *frontend, int protoMajor, char *salt);
+static int read_password_packet(POOL_CONNECTION *frontend, int protoMajor, char *password, int *pwdSize);
+static int send_password_packet(POOL_CONNECTION *backend, int protoMajor, char *password);
+static int send_auth_ok(POOL_CONNECTION *frontend, int protoMajor);
+static void sendAuthRequest(POOL_CONNECTION *frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen);
+
+static int pg_SASL_continue(POOL_CONNECTION *backend, char *payload, int payloadlen, void *sasl_state, bool final);
+static void *pg_SASL_init(POOL_CONNECTION *backend, char *payload, int payloadlen, char *username, char *storedPassword);
+static bool do_SCRAM(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoMajor, int message_length,
+ char *username, char *storedPassword, PasswordType passwordType);
+static void authenticate_frontend_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor);
+static void authenticate_frontend_cert(POOL_CONNECTION *frontend);
+static void authenticate_frontend_SCRAM(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth);
+static void authenticate_frontend_clear_text(POOL_CONNECTION *frontend);
+static bool get_auth_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth,
+ char **password, PasswordType *passwordType);
+static void ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
/*
* Do authentication. Assuming the only caller is
* make_persistent_db_connection().
*/
void
-connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password)
+connection_do_auth(POOL_CONNECTION_POOL_SLOT *cp, char *password)
{
char kind;
int length;
@@ -243,8 +243,8 @@ connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password)
switch (kind)
{
- char *p;
- int32 keylen;
+ char *p;
+ int32 keylen;
case 'K': /* backend key data */
keydata_done = true;
@@ -259,7 +259,7 @@ connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password)
{
ereport(ERROR,
(errmsg("failed to authenticate"),
- errdetail("invalid backend key data length. received %d bytes exceeding %d",
+ errdetail("invalid backend key data length. received %d bytes exceeding %d",
ntohl(length), MAX_CANCELKEY_LENGTH)));
}
@@ -332,7 +332,7 @@ connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password)
* 0.
*/
int
-pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_do_auth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
signed char kind;
int pid;
@@ -342,9 +342,9 @@ pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
int i;
int message_length = 0;
StartupPacket *sp;
- int32 keylen; /* cancel key length */
- char cancel_key[MAX_CANCELKEY_LENGTH];
- char *p;
+ int32 keylen; /* cancel key length */
+ char cancel_key[MAX_CANCELKEY_LENGTH];
+ char *p;
protoMajor = MAIN_CONNECTION(cp)->sp->major;
@@ -484,9 +484,8 @@ read_kind:
&password, &passwordType) == false)
{
/*
- * We do not have any password, we can still get the password
- * from client using plain text authentication if it is
- * allowed by user
+ * We do not have any password, we can still get the password from
+ * client using plain text authentication if it is allowed by user
*/
if (frontend->pool_hba == NULL && pool_config->allow_clear_text_frontend_auth)
{
@@ -741,7 +740,7 @@ read_kind:
}
}
else
- keylen = 4;
+ keylen = 4;
elog(DEBUG1, "cancel key length: %d", keylen);
@@ -821,11 +820,11 @@ read_kind:
* throws ereport.
*/
int
-pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_do_reauth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
int protoMajor;
int msglen;
- POOL_CONNECTION_POOL_SLOT *sp;
+ POOL_CONNECTION_POOL_SLOT *sp;
protoMajor = MAJOR(cp);
@@ -900,7 +899,7 @@ pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
* send authentication failure message text to frontend
*/
static void
-pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
+pool_send_auth_fail(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
int messagelen;
char *errmessage;
@@ -925,7 +924,7 @@ pool_send_auth_fail(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp)
* Send backend key data to frontend.
*/
static void
-pool_send_backend_key_data(POOL_CONNECTION * frontend, int pid,
+pool_send_backend_key_data(POOL_CONNECTION *frontend, int pid,
char *key, int32 keylen, int protoMajor)
{
char kind;
@@ -946,7 +945,7 @@ pool_send_backend_key_data(POOL_CONNECTION * frontend, int pid,
}
static void
-authenticate_frontend_clear_text(POOL_CONNECTION * frontend)
+authenticate_frontend_clear_text(POOL_CONNECTION *frontend)
{
static int size;
char password[MAX_PASSWORD_SIZE];
@@ -1033,7 +1032,7 @@ authenticate_frontend_clear_text(POOL_CONNECTION * frontend)
* perform clear text password authentication
*/
static int
-do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor)
+do_clear_text_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor)
{
static int size;
char *pwd = NULL;
@@ -1066,17 +1065,16 @@ do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, in
else if (!rtn || frontend->pwd_size == 0)
{
/*
- * We do not have any password, we can still get the password
- * from client using plain text authentication if it is
- * allowed by user
+ * We do not have any password, we can still get the password from
+ * client using plain text authentication if it is allowed by user
*/
if (frontend->pool_hba == NULL ||
frontend->pool_hba->auth_method == uaPassword ||
- pool_config->allow_clear_text_frontend_auth )
+ pool_config->allow_clear_text_frontend_auth)
{
ereport(DEBUG1,
- (errmsg("using clear text authentication with frontend"),
+ (errmsg("using clear text authentication with frontend"),
errdetail("backend is using password authentication")));
authenticate_frontend_clear_text(frontend);
@@ -1086,8 +1084,8 @@ do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, in
{
ereport(FATAL,
(return_code(2),
- errmsg("clear text password authentication failed"),
- errdetail("unable to get the password for user: \"%s\"", frontend->username)));
+ errmsg("clear text password authentication failed"),
+ errdetail("unable to get the password for user: \"%s\"", frontend->username)));
}
}
}
@@ -1157,7 +1155,7 @@ do_clear_text_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, in
* password in the pool_passwd file.
*/
static void
-authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth)
+authenticate_frontend_SCRAM(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth)
{
void *scram_opaq;
char *output = NULL;
@@ -1172,7 +1170,7 @@ authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * fronten
PasswordType storedPasswordType = PASSWORD_TYPE_UNKNOWN;
char *storedPassword = NULL;
- if (get_auth_password(backend, frontend, reauth,&storedPassword, &storedPasswordType) == false)
+ if (get_auth_password(backend, frontend, reauth, &storedPassword, &storedPasswordType) == false)
{
ereport(FATAL,
(return_code(2),
@@ -1341,7 +1339,7 @@ authenticate_frontend_SCRAM(POOL_CONNECTION * backend, POOL_CONNECTION * fronten
* Authenticate frontend using pool_hba.conf
*/
void
-authenticate_frontend(POOL_CONNECTION * frontend)
+authenticate_frontend(POOL_CONNECTION *frontend)
{
switch (frontend->pool_hba->auth_method)
{
@@ -1379,7 +1377,7 @@ authenticate_frontend(POOL_CONNECTION * frontend)
#ifdef USE_SSL
static void
-authenticate_frontend_cert(POOL_CONNECTION * frontend)
+authenticate_frontend_cert(POOL_CONNECTION *frontend)
{
if (frontend->client_cert_loaded == true && frontend->cert_cn)
{
@@ -1406,7 +1404,7 @@ authenticate_frontend_cert(POOL_CONNECTION * frontend)
}
#else
static void
-authenticate_frontend_cert(POOL_CONNECTION * frontend)
+authenticate_frontend_cert(POOL_CONNECTION *frontend)
{
ereport(ERROR,
(errmsg("CERT authentication failed"),
@@ -1415,7 +1413,7 @@ authenticate_frontend_cert(POOL_CONNECTION * frontend)
#endif
static void
-authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor)
+authenticate_frontend_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor)
{
char salt[4];
static int size;
@@ -1427,7 +1425,7 @@ authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend,
PasswordType storedPasswordType = PASSWORD_TYPE_UNKNOWN;
char *storedPassword = NULL;
- if (get_auth_password(backend, frontend, reauth,&storedPassword, &storedPasswordType) == false)
+ if (get_auth_password(backend, frontend, reauth, &storedPassword, &storedPasswordType) == false)
{
ereport(FATAL,
(return_code(2),
@@ -1503,7 +1501,7 @@ authenticate_frontend_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend,
* it.
*/
static bool
-get_auth_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth,
+get_auth_password(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth,
char **password, PasswordType *passwordType)
{
/* First preference is to use the pool_passwd file */
@@ -1552,7 +1550,7 @@ get_auth_password(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int rea
* perform MD5 authentication
*/
static int
-do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int protoMajor,
+do_md5(POOL_CONNECTION *backend, POOL_CONNECTION *frontend, int reauth, int protoMajor,
char *storedPassword, PasswordType passwordType)
{
char salt[4];
@@ -1669,7 +1667,7 @@ do_md5(POOL_CONNECTION * backend, POOL_CONNECTION * frontend, int reauth, int pr
* Send an authentication request packet to the frontend.
*/
static void
-sendAuthRequest(POOL_CONNECTION * frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen)
+sendAuthRequest(POOL_CONNECTION *frontend, int protoMajor, int32 auth_req_type, char *extradata, int extralen)
{
int kind = htonl(auth_req_type);
@@ -1692,7 +1690,7 @@ sendAuthRequest(POOL_CONNECTION * frontend, int protoMajor, int32 auth_req_type,
* Send md5 authentication request packet to frontend
*/
static void
-send_md5auth_request(POOL_CONNECTION * frontend, int protoMajor, char *salt)
+send_md5auth_request(POOL_CONNECTION *frontend, int protoMajor, char *salt)
{
sendAuthRequest(frontend, protoMajor, AUTH_REQ_MD5, salt, 4);
}
@@ -1702,7 +1700,7 @@ send_md5auth_request(POOL_CONNECTION * frontend, int protoMajor, char *salt)
* Read password packet from frontend
*/
static int
-read_password_packet(POOL_CONNECTION * frontend, int protoMajor, char *password, int *pwdSize)
+read_password_packet(POOL_CONNECTION *frontend, int protoMajor, char *password, int *pwdSize)
{
int size;
@@ -1755,7 +1753,7 @@ read_password_packet(POOL_CONNECTION * frontend, int protoMajor, char *password,
* "password" must be null-terminated.
*/
static int
-send_password_packet(POOL_CONNECTION * backend, int protoMajor, char *password)
+send_password_packet(POOL_CONNECTION *backend, int protoMajor, char *password)
{
int size;
int len;
@@ -1811,7 +1809,7 @@ send_password_packet(POOL_CONNECTION * backend, int protoMajor, char *password)
* Send auth ok to frontend
*/
static int
-send_auth_ok(POOL_CONNECTION * frontend, int protoMajor)
+send_auth_ok(POOL_CONNECTION *frontend, int protoMajor)
{
int msglen;
@@ -1850,7 +1848,7 @@ pool_random_salt(char *md5Salt)
}
static bool
-do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor, int message_length,
+do_SCRAM(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoMajor, int message_length,
char *username, char *storedPassword, PasswordType passwordType)
{
/* read the packet first */
@@ -1979,7 +1977,7 @@ do_SCRAM(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoMajor,
}
static void *
-pg_SASL_init(POOL_CONNECTION * backend, char *payload, int payloadlen, char *username, char *storedPassword)
+pg_SASL_init(POOL_CONNECTION *backend, char *payload, int payloadlen, char *username, char *storedPassword)
{
char *initialresponse = NULL;
int initialresponselen;
@@ -2082,7 +2080,7 @@ pg_SASL_init(POOL_CONNECTION * backend, char *payload, int payloadlen, char *use
* the protocol.
*/
static int
-pg_SASL_continue(POOL_CONNECTION * backend, char *payload, int payloadlen, void *sasl_state, bool final)
+pg_SASL_continue(POOL_CONNECTION *backend, char *payload, int payloadlen, void *sasl_state, bool final)
{
char *output;
int outputlen;
@@ -2141,21 +2139,21 @@ pg_SASL_continue(POOL_CONNECTION * backend, char *payload, int payloadlen, void
static void
ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
- int32 len;
- int32 savelen;
- int32 protoMajor;
- int32 protoMinor;
- int32 protov;
- bool forwardMsg = false;
- int i;
+ int32 len;
+ int32 savelen;
+ int32 protoMajor;
+ int32 protoMinor;
+ int32 protov;
+ bool forwardMsg = false;
+ int i;
elog(DEBUG1, "Forwarding NegotiateProtocol message to frontend");
pool_write(frontend, "v", 1); /* forward message kind */
- savelen = len = pool_read_int(cp); /* message length including self */
+ savelen = len = pool_read_int(cp); /* message length including self */
pool_write(frontend, &len, 4); /* forward message length */
- len = ntohl(len) - 4; /* length of rest of the message */
- protov = pool_read_int(cp); /* read protocol version */
- protoMajor = PG_PROTOCOL_MAJOR(ntohl(protov)); /* protocol major version */
+ len = ntohl(len) - 4; /* length of rest of the message */
+ protov = pool_read_int(cp); /* read protocol version */
+ protoMajor = PG_PROTOCOL_MAJOR(ntohl(protov)); /* protocol major version */
protoMinor = PG_PROTOCOL_MINOR(ntohl(protov)); /* protocol minor version */
pool_write(frontend, &protov, 4); /* forward protocol version */
elog(DEBUG1, "protocol verion offered: major: %d minor: %d", protoMajor, protoMinor);
@@ -2164,15 +2162,16 @@ ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
{
if (VALID_BACKEND(i))
{
- POOL_CONNECTION_POOL_SLOT *sp;
- char *p;
- char *np;
- Size nplen;
+ POOL_CONNECTION_POOL_SLOT *sp;
+ char *p;
+ char *np;
+ Size nplen;
p = pool_read2(CONNECTION(cp, i), len);
if (!forwardMsg)
{
- pool_write_and_flush(frontend, p, len); /* forward rest of message */
+ pool_write_and_flush(frontend, p, len); /* forward rest of
+ * message */
forwardMsg = true;
}
/* save negatiate protocol version */
@@ -2181,10 +2180,10 @@ ProcessNegotiateProtocol(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp)
sp->negotiated_minor = protoMinor;
/* save negatiate protocol message */
- nplen = 1 + /* message kind */
+ nplen = 1 + /* message kind */
sizeof(savelen) + /* message length */
sizeof(protov) + /* protocol version */
- len; /* rest of message */
+ len; /* rest of message */
/* allocate message area */
sp->negotiateProtocolMsg = MemoryContextAlloc(TopMemoryContext, nplen);
np = sp->negotiateProtocolMsg;
diff --git a/src/auth/pool_hba.c b/src/auth/pool_hba.c
index 202195b43..8daf74a97 100644
--- a/src/auth/pool_hba.c
+++ b/src/auth/pool_hba.c
@@ -109,37 +109,37 @@ static HbaToken *copy_hba_token(HbaToken *in);
static HbaToken *make_hba_token(const char *token, bool quoted);
static bool parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
- int elevel, char **err_msg);
+ int elevel, char **err_msg);
static MemoryContext tokenize_file(const char *filename, FILE *file,
- List **tok_lines, int elevel);
-static void sendAuthRequest(POOL_CONNECTION * frontend, AuthRequest areq);
-static void auth_failed(POOL_CONNECTION * frontend);
-static bool hba_getauthmethod(POOL_CONNECTION * frontend);
-static bool check_hba(POOL_CONNECTION * frontend);
+ List **tok_lines, int elevel);
+static void sendAuthRequest(POOL_CONNECTION *frontend, AuthRequest areq);
+static void auth_failed(POOL_CONNECTION *frontend);
+static bool hba_getauthmethod(POOL_CONNECTION *frontend);
+static bool check_hba(POOL_CONNECTION *frontend);
static bool check_user(char *user, List *tokens);
static bool check_db(const char *dbname, const char *user, List *tokens);
static List *tokenize_inc_file(List *tokens,
- const char *outer_filename,
- const char *inc_filename,
- int elevel,
- char **err_msg);
+ const char *outer_filename,
+ const char *inc_filename,
+ int elevel,
+ char **err_msg);
static bool
- check_hostname(POOL_CONNECTION * frontend, const char *hostname);
+ check_hostname(POOL_CONNECTION *frontend, const char *hostname);
static bool
check_ip(SockAddr *raddr, struct sockaddr *addr, struct sockaddr *mask);
static bool
check_same_host_or_net(SockAddr *raddr, IPCompareMethod method);
static void check_network_callback(struct sockaddr *addr, struct sockaddr *netmask,
- void *cb_data);
+ void *cb_data);
static HbaLine *parse_hba_line(TokenizedLine *tok_line, int elevel);
static bool pg_isblank(const char c);
static bool next_token(char **lineptr, char *buf, int bufsz,
- bool *initial_quote, bool *terminating_comma,
- int elevel, char **err_msg);
+ bool *initial_quote, bool *terminating_comma,
+ int elevel, char **err_msg);
static List *next_field_expand(const char *filename, char **lineptr,
- int elevel, char **err_msg);
+ int elevel, char **err_msg);
#ifdef NOT_USED
static POOL_STATUS CheckUserExist(char *username);
#endif
@@ -154,7 +154,7 @@ static POOL_STATUS CheckUserExist(char *username);
#define PGPOOL_PAM_SERVICE "pgpool" /* Service name passed to PAM */
-static POOL_STATUS CheckPAMAuth(POOL_CONNECTION * frontend, char *user, char *password);
+static POOL_STATUS CheckPAMAuth(POOL_CONNECTION *frontend, char *user, char *password);
static int pam_passwd_conv_proc(int num_msg, const struct pam_message **msg, struct pam_response **resp, void *appdata_ptr);
static struct pam_conv pam_passwd_conv = {
@@ -163,7 +163,7 @@ static struct pam_conv pam_passwd_conv = {
};
static char *pam_passwd = NULL; /* Workaround for Solaris 2.6 brokenness */
-static POOL_CONNECTION * pam_frontend_kludge; /* Workaround for passing
+static POOL_CONNECTION *pam_frontend_kludge; /* Workaround for passing
* POOL_CONNECTION *frontend
* into pam_passwd_conv_proc */
#endif /* USE_PAM */
@@ -189,7 +189,7 @@ static POOL_STATUS CheckLDAPAuth(POOL_CONNECTION *frontend);
* so declare a prototype here in "#if defined(USE_PAM or USE_LDAP)" to avoid
* compilation warning.
*/
-static char *recv_password_packet(POOL_CONNECTION * frontend);
+static char *recv_password_packet(POOL_CONNECTION *frontend);
#endif /* USE_PAM or USE_LDAP */
/*
@@ -761,6 +761,7 @@ parse_hba_line(TokenizedLine *tok_line, int elevel)
}
#ifdef USE_LDAP
+
/*
* Check if the selected authentication method has any mandatory arguments
* that are not set.
@@ -1166,7 +1167,7 @@ parse_hba_auth_opt(char *name, char *val, HbaLine *hbaline,
* do frontend <-> pgpool authentication based on pool_hba.conf
*/
void
-ClientAuthentication(POOL_CONNECTION * frontend)
+ClientAuthentication(POOL_CONNECTION *frontend)
{
POOL_STATUS status = POOL_END;
MemoryContext oldContext;
@@ -1182,10 +1183,9 @@ ClientAuthentication(POOL_CONNECTION * frontend)
/*
* Get the password for the user if it is stored in the pool_password
- * file
- * authentication process is called in the temporary memory
- * context, but password mappings has to live till the life time
- * of frontend connection, so call the pool_get_user_credentials in
+ * file authentication process is called in the temporary memory
+ * context, but password mappings has to live till the life time of
+ * frontend connection, so call the pool_get_user_credentials in
* ProcessLoopContext memory context
*/
oldContext = MemoryContextSwitchTo(ProcessLoopContext);
@@ -1215,7 +1215,7 @@ ClientAuthentication(POOL_CONNECTION * frontend)
#ifdef USE_SSL
ereport(FATAL,
- (return_code(2),
+ (return_code(2),
errmsg("client authentication failed"),
errdetail("no pool_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
hostinfo, frontend->username, frontend->database,
@@ -1323,7 +1323,7 @@ ClientAuthentication(POOL_CONNECTION * frontend)
static void
-sendAuthRequest(POOL_CONNECTION * frontend, AuthRequest areq)
+sendAuthRequest(POOL_CONNECTION *frontend, AuthRequest areq)
{
int wsize; /* number of bytes to write */
int areq_nbo; /* areq in network byte order */
@@ -1371,7 +1371,7 @@ sendAuthRequest(POOL_CONNECTION * frontend, AuthRequest areq)
* Returns NULL if couldn't get password, else palloc'd string.
*/
static char *
-recv_password_packet(POOL_CONNECTION * frontend)
+recv_password_packet(POOL_CONNECTION *frontend)
{
int rsize;
char *passwd;
@@ -1421,7 +1421,7 @@ recv_password_packet(POOL_CONNECTION * frontend)
* Tell the user the authentication failed.
*/
static void
-auth_failed(POOL_CONNECTION * frontend)
+auth_failed(POOL_CONNECTION *frontend)
{
int messagelen;
char *errmessage;
@@ -1521,7 +1521,7 @@ auth_failed(POOL_CONNECTION * frontend)
* we return true and method = uaReject.
*/
static bool
-hba_getauthmethod(POOL_CONNECTION * frontend)
+hba_getauthmethod(POOL_CONNECTION *frontend)
{
if (check_hba(frontend))
return true;
@@ -1535,7 +1535,7 @@ hba_getauthmethod(POOL_CONNECTION * frontend)
* request.
*/
static bool
-check_hba(POOL_CONNECTION * frontend)
+check_hba(POOL_CONNECTION *frontend)
{
ListCell *line;
HbaLine *hba;
@@ -1672,7 +1672,7 @@ hostname_match(const char *pattern, const char *actual_hostname)
* Check to see if a connecting IP matches a given host name.
*/
static bool
-check_hostname(POOL_CONNECTION * frontend, const char *hostname)
+check_hostname(POOL_CONNECTION *frontend, const char *hostname)
{
struct addrinfo *gai_result,
*gai;
@@ -2363,7 +2363,8 @@ pam_passwd_conv_proc(int num_msg, const struct pam_message **msg,
/*
* Check authentication against PAM.
*/
-static POOL_STATUS CheckPAMAuth(POOL_CONNECTION * frontend, char *user, char *password)
+static POOL_STATUS
+CheckPAMAuth(POOL_CONNECTION *frontend, char *user, char *password)
{
int retval;
pam_handle_t *pamh = NULL;
@@ -2379,8 +2380,8 @@ static POOL_STATUS CheckPAMAuth(POOL_CONNECTION * frontend, char *user, char *pa
* later used inside the PAM conversation to pass the password to the
* authentication module.
*/
- pam_passwd_conv.appdata_ptr = (char *) password; /* from password above,
- * not allocated */
+ pam_passwd_conv.appdata_ptr = (char *) password; /* from password above,
+ * not allocated */
/* Optionally, one can set the service name in pool_hba.conf */
if (frontend->pool_hba->pamservice && frontend->pool_hba->pamservice[0] != '\0')
@@ -2673,7 +2674,8 @@ FormatSearchFilter(const char *pattern, const char *user_name)
/*
* Check authentication against LDAP.
*/
-static POOL_STATUS CheckLDAPAuth(POOL_CONNECTION * frontend)
+static POOL_STATUS
+CheckLDAPAuth(POOL_CONNECTION *frontend)
{
char *passwd;
LDAP *ldap;
@@ -2722,7 +2724,7 @@ static POOL_STATUS CheckLDAPAuth(POOL_CONNECTION * frontend)
passwd = recv_password_packet(frontend);
if (passwd == NULL)
- return -2; /* client wouldn't send password */
+ return -2; /* client wouldn't send password */
if (InitializeLDAPConnection(frontend, &ldap) == -1)
{
@@ -2925,7 +2927,8 @@ static POOL_STATUS CheckLDAPAuth(POOL_CONNECTION * frontend)
#endif /* USE_LDAP */
#ifdef NOT_USED
-static POOL_STATUS CheckUserExist(char *username)
+static POOL_STATUS
+CheckUserExist(char *username)
{
char *passwd;
diff --git a/src/auth/pool_passwd.c b/src/auth/pool_passwd.c
index 41bd38f07..91b0efd00 100644
--- a/src/auth/pool_passwd.c
+++ b/src/auth/pool_passwd.c
@@ -58,7 +58,8 @@ pool_init_pool_passwd(char *pool_passwd_filename, POOL_PASSWD_MODE mode)
if (pool_passwd_filename == NULL)
{
- saved_passwd_filename[0] = '\0'; /* indicate pool_passwd is disabled */
+ saved_passwd_filename[0] = '\0'; /* indicate pool_passwd is
+ * disabled */
return;
}
@@ -102,10 +103,10 @@ pool_create_passwdent(char *username, char *passwd)
{
#define LINE_LEN \
MAX_USER_NAME_LEN + 1 + MAX_POOL_PASSWD_LEN + 2
- char linebuf[LINE_LEN];
- char *writebuf = NULL;
- int len;
- bool updated = false;
+ char linebuf[LINE_LEN];
+ char *writebuf = NULL;
+ int len;
+ bool updated = false;
if (!passwd_fd)
ereport(ERROR,
@@ -124,8 +125,8 @@ pool_create_passwdent(char *username, char *passwd)
while (!feof(passwd_fd) && !ferror(passwd_fd))
{
- char *t = linebuf;
- int len;
+ char *t = linebuf;
+ int len;
if (fgets(linebuf, sizeof(linebuf), passwd_fd) == NULL)
break;
@@ -162,7 +163,7 @@ pool_create_passwdent(char *username, char *passwd)
strcat(writebuf, linebuf);
}
- if(!writebuf)
+ if (!writebuf)
return 0;
fclose(passwd_fd);
@@ -203,7 +204,7 @@ pool_get_passwd(char *username)
{
if (strlen(saved_passwd_filename))
ereport(ERROR,
- (errmsg("unable to get password, password file descriptor is NULL")));
+ (errmsg("unable to get password, password file descriptor is NULL")));
else
return NULL;
}
@@ -355,7 +356,7 @@ pool_get_user_credentials(char *username)
{
if (strlen(saved_passwd_filename))
ereport(WARNING,
- (errmsg("unable to get password, password file descriptor is NULL")));
+ (errmsg("unable to get password, password file descriptor is NULL")));
return NULL;
}
@@ -416,7 +417,7 @@ pool_get_user_credentials(char *username)
}
void
-delete_passwordMapping(PasswordMapping * pwdMapping)
+delete_passwordMapping(PasswordMapping *pwdMapping)
{
if (!pwdMapping)
return;
@@ -476,8 +477,8 @@ get_pgpool_config_user_password(char *username, char *password_in_config)
PasswordMapping *password_mapping = NULL;
/*
- * if the password specified in config is empty string or NULL look for the
- * password in pool_passwd file
+ * if the password specified in config is empty string or NULL look for
+ * the password in pool_passwd file
*/
if (password_in_config == NULL || strlen(password_in_config) == 0)
{
@@ -525,7 +526,7 @@ get_pgpool_config_user_password(char *username, char *password_in_config)
/* convert the TEXT prefixed password to plain text password */
passwordType = PASSWORD_TYPE_PLAINTEXT;
if (password)
- password = (char*)(password + strlen(PASSWORD_TEXT_PREFIX));
+ password = (char *) (password + strlen(PASSWORD_TEXT_PREFIX));
}
if (password && strlen(password) && (passwordType != PASSWORD_TYPE_PLAINTEXT &&
@@ -634,11 +635,11 @@ read_pool_key(char *key_file_path)
return NULL;
/*
- * To prevent file-swapping due to file race conditions,
- * we open the key file before checking it by stat().
+ * To prevent file-swapping due to file race conditions, we open the key
+ * file before checking it by stat().
*/
/* If password file cannot be opened, ignore it. */
- if ( (fp = fopen(key_file_path, "r")) == NULL)
+ if ((fp = fopen(key_file_path, "r")) == NULL)
return NULL;
if (fstat(fileno(fp), &stat_buf) != 0)
@@ -707,12 +708,13 @@ check_password_type_is_not_md5(char *username, char *password_in_config)
PasswordType passwordType = PASSWORD_TYPE_UNKNOWN;
/*
- * if the password specified in config is empty string or NULL look for the
- * password in pool_passwd file
+ * if the password specified in config is empty string or NULL look for
+ * the password in pool_passwd file
*/
if (password_in_config == NULL || strlen(password_in_config) == 0)
{
PasswordMapping *password_mapping = NULL;
+
password_mapping = pool_get_user_credentials(username);
if (password_mapping == NULL)
{
@@ -726,7 +728,7 @@ check_password_type_is_not_md5(char *username, char *password_in_config)
passwordType = get_password_type(password_in_config);
}
- /* if the password type is MD5 hash return -1*/
+ /* if the password type is MD5 hash return -1 */
if (passwordType == PASSWORD_TYPE_MD5)
{
return -1;
diff --git a/src/config/pool_config_variables.c b/src/config/pool_config_variables.c
index 83ce0c2bf..31f42caa9 100644
--- a/src/config/pool_config_variables.c
+++ b/src/config/pool_config_variables.c
@@ -85,22 +85,22 @@ static bool config_post_processor(ConfigContext context, int elevel);
static void sort_config_vars(void);
static bool setConfigOptionArrayVarWithConfigDefault(struct config_generic *record, const char *name,
- const char *value, ConfigContext context, int elevel);
+ const char *value, ConfigContext context, int elevel);
static bool setConfigOption(const char *name, const char *value,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
static bool setConfigOptionVar(struct config_generic *record, const char *name, int index_val,
- const char *value, ConfigContext context, GucSource source, int elevel);
+ const char *value, ConfigContext context, GucSource source, int elevel);
static bool get_index_in_var_name(struct config_generic *record,
- const char *name, int *index, int elevel);
+ const char *name, int *index, int elevel);
static bool MakeUserRedirectListRegex(char *newval, int elevel);
static bool MakeDBRedirectListRegex(char *newval, int elevel);
static bool MakeAppRedirectListRegex(char *newval, int elevel);
static bool MakeDMLAdaptiveObjectRelationList(char *newval, int elevel);
-static char* getParsedToken(char *token, DBObjectTypes *object_type);
+static char *getParsedToken(char *token, DBObjectTypes *object_type);
static bool check_redirect_node_spec(char *node_spec);
static char **get_list_from_string(const char *str, const char *delimi, int *n);
@@ -180,7 +180,7 @@ static bool convert_to_base_unit(double value, const char *unit,
#ifndef POOL_PRIVATE
static void convert_int_from_base_unit(int64 base_value, int base_unit,
- int64 *value, const char **unit);
+ int64 *value, const char **unit);
/* These functions are used to provide Hints for enum type config parameters and
@@ -194,12 +194,12 @@ static const char *config_enum_lookup_by_value(struct config_enum *record, int v
static char *ShowOption(struct config_generic *record, int index, int elevel);
static char *config_enum_get_options(struct config_enum *record, const char *prefix,
- const char *suffix, const char *separator);
-static void send_row_description_for_detail_view(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-static int send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record,
- POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-static int send_array_type_variable_to_frontend(struct config_generic *record,
- POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+ const char *suffix, const char *separator);
+static void send_row_description_for_detail_view(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+static int send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record,
+ POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+static int send_array_type_variable_to_frontend(struct config_generic *record,
+ POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
#endif
@@ -300,17 +300,20 @@ static const struct config_enum_entry relcache_query_target_options[] = {
};
static const struct config_enum_entry check_temp_table_options[] = {
- {"catalog", CHECK_TEMP_CATALOG, false}, /* search system catalogs */
- {"trace", CHECK_TEMP_TRACE, false}, /* tracing temp tables */
- {"none", CHECK_TEMP_NONE, false}, /* do not check temp tables */
- {"on", CHECK_TEMP_ON, false}, /* same as CHECK_TEMP_CATALOG. Just for backward compatibility. */
- {"off", CHECK_TEMP_OFF, false}, /* same as CHECK_TEMP_NONE. Just for backward compatibility. */
+ {"catalog", CHECK_TEMP_CATALOG, false}, /* search system catalogs */
+ {"trace", CHECK_TEMP_TRACE, false}, /* tracing temp tables */
+ {"none", CHECK_TEMP_NONE, false}, /* do not check temp tables */
+ {"on", CHECK_TEMP_ON, false}, /* same as CHECK_TEMP_CATALOG. Just for
+ * backward compatibility. */
+ {"off", CHECK_TEMP_OFF, false}, /* same as CHECK_TEMP_NONE. Just for
+ * backward compatibility. */
{NULL, 0, false}
};
static const struct config_enum_entry log_backend_messages_options[] = {
- {"none", BGMSG_NONE, false}, /* turn off logging */
- {"terse", BGMSG_TERSE, false}, /* terse logging (repeated messages are collapsed into count */
+ {"none", BGMSG_NONE, false}, /* turn off logging */
+ {"terse", BGMSG_TERSE, false}, /* terse logging (repeated messages are
+ * collapsed into count */
{"verbose", BGMSG_VERBOSE, false}, /* always log each message */
{NULL, 0, false}
};
@@ -810,8 +813,8 @@ static struct config_bool ConfigureNamesBool[] =
{
{"health_check_test", CFGCXT_INIT, HEALTH_CHECK_CONFIG,
- "If on, enable health check testing.",
- CONFIG_VAR_TYPE_BOOL, false, 0
+ "If on, enable health check testing.",
+ CONFIG_VAR_TYPE_BOOL, false, 0
},
&g_pool_config.health_check_test,
false,
@@ -1825,7 +1828,7 @@ static struct config_string_array ConfigureNamesStringArray[] =
CONFIG_VAR_TYPE_STRING_ARRAY, true, VAR_PART_OF_GROUP, MAX_NUM_BACKENDS
},
NULL,
- "", /* for ALWAYS_PRIMARY */
+ "", /* for ALWAYS_PRIMARY */
EMPTY_CONFIG_STRING,
BackendFlagsAssignFunc, NULL, BackendFlagsShowFunc, BackendSlotEmptyCheckFunc
},
@@ -2285,7 +2288,7 @@ static struct config_int ConfigureNamesInt[] =
CONFIG_VAR_TYPE_INT, false, GUC_UNIT_MIN
},
&g_pool_config.log_rotation_age,
- 1440,/*1 day*/
+ 1440, /* 1 day */
0, INT_MAX,
NULL, NULL, NULL
},
@@ -2296,7 +2299,7 @@ static struct config_int ConfigureNamesInt[] =
},
&g_pool_config.log_rotation_size,
10 * 1024,
- 0, INT_MAX/1024,
+ 0, INT_MAX / 1024,
NULL, NULL, NULL
},
{
@@ -3106,7 +3109,7 @@ get_list_from_string(const char *str, const char *delimi, int *n)
for (token = strtok(temp_string, delimi); token != NULL; token = strtok(NULL, delimi))
{
- int i;
+ int i;
/* skip leading whitespace */
while (isspace(*token))
@@ -3114,7 +3117,8 @@ get_list_from_string(const char *str, const char *delimi, int *n)
/* skip trailing whitespace */
i = strlen(token) - 1;
- while (i >= 0 && isspace(token[i])) {
+ while (i >= 0 && isspace(token[i]))
+ {
token[i] = '\0';
i--;
}
@@ -3186,7 +3190,8 @@ get_list_from_string_regex_delim(const char *input, const char *delimi, int *n)
}
else if (*str_temp == *delimi)
{
- char *output = (char *) palloc(j + 1);
+ char *output = (char *) palloc(j + 1);
+
StrNCpy(output, buf, j + 1);
/* replace escape character of "'" */
@@ -3642,7 +3647,7 @@ setConfigOptionVar(struct config_generic *record, const char *name, int index_va
if (value != NULL)
{
- int64 newval64;
+ int64 newval64;
const char *hintmsg;
if (!parse_int(value, &newval64,
@@ -3654,7 +3659,7 @@ setConfigOptionVar(struct config_generic *record, const char *name, int index_va
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return false;
}
- newval = (int)newval64;
+ newval = (int) newval64;
}
else if (source == PGC_S_DEFAULT)
{
@@ -3752,7 +3757,7 @@ setConfigOptionVar(struct config_generic *record, const char *name, int index_va
if (value != NULL)
{
- int64 newval64;
+ int64 newval64;
const char *hintmsg;
if (!parse_int(value, &newval64,
@@ -3764,7 +3769,7 @@ setConfigOptionVar(struct config_generic *record, const char *name, int index_va
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return false;
}
- newval = (int)newval64;
+ newval = (int) newval64;
}
else if (source == PGC_S_DEFAULT)
{
@@ -4551,7 +4556,7 @@ BackendFlagsShowFunc(int index)
if (*buffer == '\0')
snprintf(buffer, sizeof(buffer), "ALWAYS_PRIMARY");
else
- snprintf(buffer+strlen(buffer), sizeof(buffer), "|ALWAYS_PRIMARY");
+ snprintf(buffer + strlen(buffer), sizeof(buffer), "|ALWAYS_PRIMARY");
}
return buffer;
}
@@ -4577,7 +4582,7 @@ WdSlotEmptyCheckFunc(int index)
static bool
WdIFSlotEmptyCheckFunc(int index)
{
- return (g_pool_config.hb_ifs[index].dest_port == 0);
+ return (g_pool_config.hb_ifs[index].dest_port == 0);
}
static const char *
@@ -4809,6 +4814,7 @@ FailOverOnBackendErrorAssignMessage(ConfigContext scontext, bool newval, int ele
g_pool_config.failover_on_backend_error = newval;
return true;
}
+
/*
* Throws warning for if someone uses the removed delegate_IP
* configuration parameter and set the value to delegate_ip
@@ -4823,6 +4829,7 @@ DelegateIPAssignMessage(ConfigContext scontext, char *newval, int elevel)
g_pool_config.delegate_ip = newval;
return true;
}
+
/*
* Check DB node spec. node spec should be either "primary", "standby" or
* numeric DB node id.
@@ -4864,9 +4871,9 @@ check_redirect_node_spec(char *node_spec)
static bool
config_post_processor(ConfigContext context, int elevel)
{
- double total_weight = 0.0;
+ double total_weight = 0.0;
sig_atomic_t local_num_backends = 0;
- int i;
+ int i;
/* read from pgpool_node_id */
SetPgpoolNodeId(elevel);
@@ -4987,9 +4994,9 @@ config_post_processor(ConfigContext context, int elevel)
/*
* Quarantine state in native replication mode is dangerous and it can
- * potentially cause data inconsistency.
- * So as per the discussions, we agreed on disallowing setting
- * failover_when_quorum_exists in native replication mode
+ * potentially cause data inconsistency. So as per the discussions, we
+ * agreed on disallowing setting failover_when_quorum_exists in native
+ * replication mode
*/
if (pool_config->failover_when_quorum_exists && pool_config->replication_mode)
@@ -5001,8 +5008,8 @@ config_post_processor(ConfigContext context, int elevel)
}
/*
- * Verify the minimum and maximum number of spare children configuration when
- * dynamic process management is enabled
+ * Verify the minimum and maximum number of spare children configuration
+ * when dynamic process management is enabled
*/
if (g_pool_config.process_management == PM_DYNAMIC)
@@ -5011,14 +5018,14 @@ config_post_processor(ConfigContext context, int elevel)
{
ereport(elevel,
(errmsg("invalid configuration, max_spare_children:%d must be greater than min_spare_children:%d",
- pool_config->max_spare_children,pool_config->min_spare_children)));
+ pool_config->max_spare_children, pool_config->min_spare_children)));
return false;
}
if (pool_config->num_init_children < pool_config->max_spare_children)
{
ereport(elevel,
(errmsg("invalid configuration, max_spare_children:%d can't be greater than num_init_children:%d",
- pool_config->max_spare_children,pool_config->num_init_children)));
+ pool_config->max_spare_children, pool_config->num_init_children)));
return false;
}
}
@@ -5028,9 +5035,9 @@ config_post_processor(ConfigContext context, int elevel)
static bool
MakeDMLAdaptiveObjectRelationList(char *newval, int elevel)
{
- int i;
- int elements_count = 0;
- char **rawList = get_list_from_string(newval, ",", &elements_count);
+ int i;
+ int elements_count = 0;
+ char **rawList = get_list_from_string(newval, ",", &elements_count);
if (rawList == NULL || elements_count == 0)
{
@@ -5041,21 +5048,21 @@ MakeDMLAdaptiveObjectRelationList(char *newval, int elevel)
for (i = 0; i < elements_count; i++)
{
- char *kvstr = rawList[i];
- char *left_token = strtok(kvstr, ":");
- char *right_token = strtok(NULL, ":");
+ char *kvstr = rawList[i];
+ char *left_token = strtok(kvstr, ":");
+ char *right_token = strtok(NULL, ":");
DBObjectTypes object_type;
ereport(DEBUG5,
(errmsg("dml_adaptive_init"),
- errdetail("%s -- left_token[%s] right_token[%s]", kvstr, left_token, right_token)));
+ errdetail("%s -- left_token[%s] right_token[%s]", kvstr, left_token, right_token)));
pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name =
- getParsedToken(left_token, &object_type);
+ getParsedToken(left_token, &object_type);
pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.object_type = object_type;
pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.name =
- getParsedToken(right_token,&object_type);
+ getParsedToken(right_token, &object_type);
pool_config->parsed_dml_adaptive_object_relationship_list[i].right_token.object_type = object_type;
pfree(kvstr);
}
@@ -5075,10 +5082,11 @@ MakeDMLAdaptiveObjectRelationList(char *newval, int elevel)
* We also remove the trailing spaces from the function type token
* and return the palloc'd copy of token in new_token
*/
-static char*
+static char *
getParsedToken(char *token, DBObjectTypes *object_type)
{
- int len;
+ int len;
+
*object_type = OBJECT_TYPE_UNKNOWN;
if (!token)
@@ -5087,18 +5095,19 @@ getParsedToken(char *token, DBObjectTypes *object_type)
len = strlen(token);
if (len > strlen("*()"))
{
- int namelen = len - 2;
+ int namelen = len - 2;
+
/* check if token ends with () */
- if (strcmp(token + namelen,"()") == 0)
+ if (strcmp(token + namelen, "()") == 0)
{
/*
- * Remove the Parentheses from end of
- * token name
+ * Remove the Parentheses from end of token name
*/
- char *new_token;
- int new_len = strlen(token) - 2;
+ char *new_token;
+ int new_len = strlen(token) - 2;
+
new_token = palloc(new_len + 1);
- strncpy(new_token,token,new_len);
+ strncpy(new_token, token, new_len);
new_token[new_len] = '\0';
*object_type = OBJECT_TYPE_FUNCTION;
return new_token;
@@ -5241,16 +5250,16 @@ static bool
SetPgpoolNodeId(int elevel)
{
char pgpool_node_id_file[POOLMAXPATHLEN + 1];
- FILE *fd;
- int length;
- int i;
+ FILE *fd;
+ int length;
+ int i;
if (g_pool_config.use_watchdog)
{
snprintf(pgpool_node_id_file, sizeof(pgpool_node_id_file), "%s/%s", config_file_dir, NODE_ID_FILE_NAME);
#define MAXLINE 10
- char readbuf[MAXLINE];
+ char readbuf[MAXLINE];
fd = fopen(pgpool_node_id_file, "r");
if (!fd)
@@ -5326,12 +5335,13 @@ SetPgpoolNodeId(int elevel)
static bool
SetHBDestIfFunc(int elevel)
{
- int idx = 0;
- char **addrs;
- char **if_names;
- int i, j,
- n_addr,
- n_if_name;
+ int idx = 0;
+ char **addrs;
+ char **if_names;
+ int i,
+ j,
+ n_addr,
+ n_if_name;
g_pool_config.num_hb_dest_if = 0;
@@ -5342,10 +5352,10 @@ SetHBDestIfFunc(int elevel)
/*
* g_pool_config.hb_ifs is the information for sending/receiving heartbeat
- * for all nodes specified in pgpool.conf.
- * If it is local pgpool node information, set dest_port to g_pool_config.wd_heartbeat_port
- * and ignore addr and if_name.
- * g_pool_config.hb_dest_if is the heartbeat destination information.
+ * for all nodes specified in pgpool.conf. If it is local pgpool node
+ * information, set dest_port to g_pool_config.wd_heartbeat_port and
+ * ignore addr and if_name. g_pool_config.hb_dest_if is the heartbeat
+ * destination information.
*/
for (i = 0; i < WD_MAX_IF_NUM; i++)
{
@@ -5358,7 +5368,7 @@ SetHBDestIfFunc(int elevel)
continue;
}
- WdHbIf *hbNodeInfo = &g_pool_config.hb_ifs[i];
+ WdHbIf *hbNodeInfo = &g_pool_config.hb_ifs[i];
addrs = get_list_from_string(hbNodeInfo->addr, ";", &n_addr);
if_names = get_list_from_string(hbNodeInfo->if_name, ";", &n_if_name);
@@ -5383,7 +5393,7 @@ SetHBDestIfFunc(int elevel)
{
strlcpy(g_pool_config.hb_dest_if[idx].addr, addrs[j], WD_MAX_HOST_NAMELEN - 1);
g_pool_config.hb_dest_if[idx].dest_port = hbNodeInfo->dest_port;
- if (n_if_name > j )
+ if (n_if_name > j)
{
strlcpy(g_pool_config.hb_dest_if[idx].if_name, if_names[j], WD_MAX_IF_NAME_LEN - 1);
pfree(if_names[j]);
@@ -5537,6 +5547,7 @@ parse_int(const char *value, int64 *result, int flags, const char **hintmsg, int
*result = (int64) val;
return true;
}
+
/*
* Convert a value from one of the human-friendly units ("kB", "min" etc.)
* to the given base unit. 'value' and 'unit' are the input value and unit
@@ -5935,7 +5946,7 @@ value_slot_for_config_record_is_empty(struct config_generic *record, int index)
}
bool
-set_config_option_for_session(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value)
+set_config_option_for_session(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value)
{
bool ret;
MemoryContext oldCxt = MemoryContextSwitchTo(TopMemoryContext);
@@ -5950,7 +5961,7 @@ set_config_option_for_session(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL *
}
bool
-reset_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+reset_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int i;
int elevel = (frontend == NULL) ? FATAL : FRONTEND_ONLY_ERROR;
@@ -6019,7 +6030,7 @@ reset_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
* Handle "pgpool show all" command.
*/
bool
-report_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+report_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int i;
int num_rows = 0;
@@ -6077,7 +6088,7 @@ report_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
* Handle "pgpool show" command.
*/
bool
-report_config_variable(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *var_name)
+report_config_variable(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *var_name)
{
int index = 0;
char *value;
@@ -6151,7 +6162,7 @@ report_config_variable(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
}
static int
-send_array_type_variable_to_frontend(struct config_generic *record, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_array_type_variable_to_frontend(struct config_generic *record, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
if (record->dynamic_array_var)
{
@@ -6192,7 +6203,7 @@ send_array_type_variable_to_frontend(struct config_generic *record, POOL_CONNECT
}
static int
-send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_record, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int k,
index;
@@ -6243,7 +6254,7 @@ send_grouped_type_variable_to_frontend(struct config_grouped_array_var *grouped_
}
static void
-send_row_description_for_detail_view(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_row_description_for_detail_view(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"item", "value", "description"};
diff --git a/src/context/pool_process_context.c b/src/context/pool_process_context.c
index 00a04ff8d..da2913b6d 100644
--- a/src/context/pool_process_context.c
+++ b/src/context/pool_process_context.c
@@ -30,7 +30,7 @@
#include "pool_config.h" /* remove me afterwards */
static POOL_PROCESS_CONTEXT process_context_d;
-static POOL_PROCESS_CONTEXT * process_context;
+static POOL_PROCESS_CONTEXT *process_context;
/*
* Initialize per process context
@@ -108,7 +108,7 @@ pool_increment_local_session_id(void)
size_t
pool_coninfo_size(void)
{
- size_t size;
+ size_t size;
size = pool_config->num_init_children *
pool_config->max_pool *
@@ -264,13 +264,13 @@ pool_coninfo_backend_pid(int backend_pid, int *backend_node_id)
* This flag is used to handle pg_terminate_backend()
*/
void
-pool_set_connection_will_be_terminated(ConnectionInfo * connInfo)
+pool_set_connection_will_be_terminated(ConnectionInfo *connInfo)
{
connInfo->swallow_termination = 1;
}
void
-pool_unset_connection_will_be_terminated(ConnectionInfo * connInfo)
+pool_unset_connection_will_be_terminated(ConnectionInfo *connInfo)
{
connInfo->swallow_termination = 0;
}
diff --git a/src/context/pool_query_context.c b/src/context/pool_query_context.c
index d398bee6d..a4800d94b 100644
--- a/src/context/pool_query_context.c
+++ b/src/context/pool_query_context.c
@@ -48,7 +48,7 @@ typedef enum
POOL_STANDBY,
POOL_EITHER,
POOL_BOTH
-} POOL_DEST;
+} POOL_DEST;
#define CHECK_QUERY_CONTEXT_IS_VALID \
do { \
@@ -58,9 +58,9 @@ typedef enum
} while (0)
static POOL_DEST send_to_where(Node *node);
-static void where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node);
-static void where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
-static void where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
+static void where_to_send_deallocate(POOL_QUERY_CONTEXT *query_context, Node *node);
+static void where_to_send_main_replica(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
+static void where_to_send_native_replication(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
static char *remove_read_write(int len, const char *contents, int *rewritten_len);
static void set_virtual_main_node(POOL_QUERY_CONTEXT *query_context);
@@ -70,8 +70,8 @@ static bool is_in_list(char *name, List *list);
static bool is_select_object_in_temp_write_list(Node *node, void *context);
static bool add_object_into_temp_write_list(Node *node, void *context);
static void dml_adaptive(Node *node, char *query);
-static char* get_associated_object_from_dml_adaptive_relations
- (char *left_token, DBObjectTypes object_type);
+static char *get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type);
/*
* Create and initialize per query session context
@@ -98,7 +98,7 @@ pool_init_query_context(void)
* Destroy query context
*/
void
-pool_query_context_destroy(POOL_QUERY_CONTEXT * query_context)
+pool_query_context_destroy(POOL_QUERY_CONTEXT *query_context)
{
POOL_SESSION_CONTEXT *session_context;
@@ -130,7 +130,7 @@ pool_query_context_destroy(POOL_QUERY_CONTEXT * query_context)
* Perform shallow copy of given query context. Used in parse_before_bind.
*/
POOL_QUERY_CONTEXT *
-pool_query_context_shallow_copy(POOL_QUERY_CONTEXT * query_context)
+pool_query_context_shallow_copy(POOL_QUERY_CONTEXT *query_context)
{
POOL_QUERY_CONTEXT *qc;
MemoryContext memory_context;
@@ -146,7 +146,7 @@ pool_query_context_shallow_copy(POOL_QUERY_CONTEXT * query_context)
* Start query
*/
void
-pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node *node)
+pool_start_query(POOL_QUERY_CONTEXT *query_context, char *query, int len, Node *node)
{
POOL_SESSION_CONTEXT *session_context;
@@ -180,7 +180,7 @@ pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node
* Specify DB node to send query
*/
void
-pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -198,7 +198,7 @@ pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
* Unspecified DB node to send query
*/
void
-pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -216,7 +216,7 @@ pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
* Clear DB node map
*/
void
-pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -228,7 +228,7 @@ pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
* Set all DB node map entry
*/
void
-pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
int i;
POOL_SESSION_CONTEXT *sc;
@@ -245,8 +245,9 @@ pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
if (SL_MODE)
{
/*
- * If load balance mode is disabled, only send to the primary node.
- * If primary node does not exist, send to the main node.
+ * If load balance mode is disabled, only send to the primary
+ * node. If primary node does not exist, send to the main
+ * node.
*/
if (!pool_config->load_balance_mode)
{
@@ -259,6 +260,7 @@ pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
continue;
}
else
+
/*
* If the node is not primary node nor load balance node,
* there's no point to send query except statement level
@@ -266,7 +268,7 @@ pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
*/
if (!pool_config->statement_level_load_balance &&
i != PRIMARY_NODE_ID && i != sc->load_balance_node_id)
- continue;
+ continue;
}
query_context->where_to_send[i] = true;
}
@@ -278,7 +280,7 @@ pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
* Return true if multiple nodes are targets
*/
bool
-pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
+pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT *query_context)
{
int i;
int cnt = 0;
@@ -305,7 +307,7 @@ pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT * query_context)
* Return if the DB node is needed to send query
*/
bool
-pool_is_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id)
+pool_is_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -346,12 +348,12 @@ pool_is_node_to_be_sent_in_current_query(int node_id)
int
pool_virtual_main_db_node_id(void)
{
- volatile POOL_REQUEST_INFO *my_req;
+ volatile POOL_REQUEST_INFO *my_req;
POOL_SESSION_CONTEXT *sc;
/*
- * Check whether failover is in progress and we are child process.
- * If so, we will wait for failover to finish.
+ * Check whether failover is in progress and we are child process. If so,
+ * we will wait for failover to finish.
*/
my_req = Req_info;
if (processType == PT_CHILD && my_req->switching)
@@ -360,17 +362,19 @@ pool_virtual_main_db_node_id(void)
POOL_SETMASK(&BlockSig);
ereport(WARNING,
(errmsg("failover/failback is in progress"),
- errdetail("executing failover or failback on backend"),
+ errdetail("executing failover or failback on backend"),
errhint("In a moment you should be able to reconnect to the database")));
POOL_SETMASK(&UnBlockSig);
#endif
+
/*
* Wait for failover to finish
*/
if (wait_for_failover_to_finish() == -2)
+
/*
- * Waiting for failover/failback to finish was timed out.
- * Time to exit this process (and session disconnection).
+ * Waiting for failover/failback to finish was timed out. Time to
+ * exit this process (and session disconnection).
*/
child_exit(POOL_EXIT_AND_RESTART);
}
@@ -380,9 +384,9 @@ pool_virtual_main_db_node_id(void)
{
/*
* We used to return REAL_MAIN_NODE_ID here. Problem with it is, it
- * is possible that REAL_MAIN_NODE_ID could be changed
- * anytime. Suppose REAL_MAIN_NODE_ID == my_main_node_id == 1. Then
- * due to failback, REAL_MAIN_NODE_ID is changed to 0. Then
+ * is possible that REAL_MAIN_NODE_ID could be changed anytime.
+ * Suppose REAL_MAIN_NODE_ID == my_main_node_id == 1. Then due to
+ * failback, REAL_MAIN_NODE_ID is changed to 0. Then
* MAIN_CONNECTION(cp) will return NULL and any reference to it will
* cause segmentation fault. To prevent the issue we should return
* my_main_node_id instead.
@@ -430,9 +434,9 @@ pool_virtual_main_db_node_id(void)
}
/*
- * No query context exists. If in streaming replication mode, returns primary node
- * if exists. Otherwise returns my_main_node_id, which represents the
- * last REAL_MAIN_NODE_ID.
+ * No query context exists. If in streaming replication mode, returns
+ * primary node if exists. Otherwise returns my_main_node_id, which
+ * represents the last REAL_MAIN_NODE_ID.
*/
if (MAIN_REPLICA)
{
@@ -445,7 +449,7 @@ pool_virtual_main_db_node_id(void)
* Set the destination for the current query to the specific backend node.
*/
void
-pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend_id)
+pool_force_query_node_to_backend(POOL_QUERY_CONTEXT *query_context, int backend_id)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -460,7 +464,7 @@ pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend
* Decide where to send queries(thus expecting response)
*/
void
-pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+pool_where_to_send(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
CHECK_QUERY_CONTEXT_IS_VALID;
@@ -481,14 +485,15 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
if (query_context->is_multi_statement)
{
/*
- * If we are in streaming replication mode and we have multi statement query,
- * we should send it to primary server only. Otherwise it is possible
- * to send a write query to standby servers because we only use the
- * first element of the multi statement query and don't care about the
- * rest. Typical situation where we are bugged by this is,
- * "BEGIN;DELETE FROM table;END". Note that from pgpool-II 3.1.0
- * transactional statements such as "BEGIN" is unconditionally sent to
- * all nodes(see send_to_where() for more details). Someday we might
+ * If we are in streaming replication mode and we have multi
+ * statement query, we should send it to primary server only.
+ * Otherwise it is possible to send a write query to standby
+ * servers because we only use the first element of the multi
+ * statement query and don't care about the rest. Typical
+ * situation where we are bugged by this is, "BEGIN;DELETE FROM
+ * table;END". Note that from pgpool-II 3.1.0 transactional
+ * statements such as "BEGIN" is unconditionally sent to all
+ * nodes(see send_to_where() for more details). Someday we might
* be able to understand all part of multi statement queries, but
* until that day we need this band aid.
*/
@@ -535,7 +540,7 @@ pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
* >0: send to this node_id
*/
POOL_STATUS
-pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
+pool_send_and_wait(POOL_QUERY_CONTEXT *query_context,
int send_type, int node_id)
{
POOL_SESSION_CONTEXT *session_context;
@@ -556,10 +561,10 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
string = NULL;
/*
- * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in
- * streaming replication mode, we send BEGIN to standbys instead.
- * The original_query which is BEGIN READ WRITE is sent to primary.
- * The rewritten_query BEGIN is sent to standbys.
+ * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in streaming
+ * replication mode, we send BEGIN to standbys instead. The original_query
+ * which is BEGIN READ WRITE is sent to primary. The rewritten_query BEGIN
+ * is sent to standbys.
*/
if (pool_need_to_treat_as_if_default_transaction(query_context))
{
@@ -590,8 +595,9 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
continue;
/*
- * If we are in streaming replication mode or logical replication mode,
- * we do not send COMMIT/ABORT to standbys if it's in I (idle) state.
+ * If we are in streaming replication mode or logical replication
+ * mode, we do not send COMMIT/ABORT to standbys if it's in I (idle)
+ * state.
*/
if (is_commit && MAIN_REPLICA && !IS_MAIN_NODE_ID(i) && TSTATE(backend, i) == 'I')
{
@@ -692,7 +698,7 @@ pool_send_and_wait(POOL_QUERY_CONTEXT * query_context,
* >0: send to this node_id
*/
POOL_STATUS
-pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
+pool_extended_send_and_wait(POOL_QUERY_CONTEXT *query_context,
char *kind, int len, char *contents,
int send_type, int node_id, bool nowait)
{
@@ -718,10 +724,10 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
rewritten_begin = NULL;
/*
- * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in
- * streaming replication mode, we send BEGIN to standbys instead.
- * The original_query which is BEGIN READ WRITE is sent to primary.
- * The rewritten_query BEGIN is sent to standbys.
+ * If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE in streaming
+ * replication mode, we send BEGIN to standbys instead. The original_query
+ * which is BEGIN READ WRITE is sent to primary. The rewritten_query BEGIN
+ * is sent to standbys.
*/
if (pool_need_to_treat_as_if_default_transaction(query_context))
{
@@ -903,7 +909,8 @@ pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context,
* From syntactically analysis decide the statement to be sent to the
* primary, the standby or either or both in native replication+HR/SR mode.
*/
-static POOL_DEST send_to_where(Node *node)
+static POOL_DEST
+send_to_where(Node *node)
{
/* From storage/lock.h */
@@ -981,15 +988,15 @@ static POOL_DEST send_to_where(Node *node)
if (is_start_transaction_query(node))
{
/*
- * But actually, we send BEGIN to standby if it's BEGIN READ
- * WRITE or START TRANSACTION READ WRITE
+ * But actually, we send BEGIN to standby if it's BEGIN READ WRITE
+ * or START TRANSACTION READ WRITE
*/
if (is_read_write((TransactionStmt *) node))
return POOL_BOTH;
/*
- * Other TRANSACTION start commands are sent to both primary
- * and standby
+ * Other TRANSACTION start commands are sent to both primary and
+ * standby
*/
else
return POOL_BOTH;
@@ -1009,6 +1016,7 @@ static POOL_DEST send_to_where(Node *node)
}
return POOL_BOTH;
}
+
/*
* 2PC commands
*/
@@ -1059,8 +1067,8 @@ static POOL_DEST send_to_where(Node *node)
/*
* SET TRANSACTION ISOLATION LEVEL SERIALIZABLE or SET SESSION
- * CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE or
- * SET transaction_isolation TO 'serializable' SET
+ * CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL SERIALIZABLE or SET
+ * transaction_isolation TO 'serializable' SET
* default_transaction_isolation TO 'serializable'
*/
else if (is_set_transaction_serializable(node))
@@ -1069,8 +1077,8 @@ static POOL_DEST send_to_where(Node *node)
}
/*
- * Check "SET TRANSACTION READ WRITE" "SET SESSION CHARACTERISTICS
- * AS TRANSACTION READ WRITE"
+ * Check "SET TRANSACTION READ WRITE" "SET SESSION CHARACTERISTICS AS
+ * TRANSACTION READ WRITE"
*/
else if (((VariableSetStmt *) node)->kind == VAR_SET_MULTI &&
(!strcmp(((VariableSetStmt *) node)->name, "TRANSACTION") ||
@@ -1165,11 +1173,11 @@ static POOL_DEST send_to_where(Node *node)
*/
static
void
-where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node)
+where_to_send_deallocate(POOL_QUERY_CONTEXT *query_context, Node *node)
{
DeallocateStmt *d = NULL;
ExecuteStmt *e = NULL;
- char *name;
+ char *name;
POOL_SENT_MESSAGE *msg;
if (IsA(node, DeallocateStmt))
@@ -1217,12 +1225,10 @@ where_to_send_deallocate(POOL_QUERY_CONTEXT * query_context, Node *node)
else
{
/*
- * prepared statement was not found.
- * There are two cases when this could happen.
- * (1) mistakes by client. In this case backend will return ERROR
- * anyway.
- * (2) previous query was issued as multi-statement query. e.g.
- * SELECT 1\;PREPARE foo AS SELECT 1;
+ * prepared statement was not found. There are two cases when this
+ * could happen. (1) mistakes by client. In this case backend will
+ * return ERROR anyway. (2) previous query was issued as
+ * multi-statement query. e.g. SELECT 1\;PREPARE foo AS SELECT 1;
* In this case pgpool does not know anything about the prepared
* statement "foo".
*/
@@ -1431,7 +1437,7 @@ is_serializable(TransactionStmt *node)
* The rewritten_query BEGIN is sent to standbys.
*/
bool
-pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT * query_context)
+pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT *query_context)
{
return (MAIN_REPLICA &&
is_start_transaction_query(query_context->parse_tree) &&
@@ -1471,7 +1477,7 @@ is_2pc_transaction_query(Node *node)
* Set query state, if a current state is before it than the specified state.
*/
void
-pool_set_query_state(POOL_QUERY_CONTEXT * query_context, POOL_QUERY_STATE state)
+pool_set_query_state(POOL_QUERY_CONTEXT *query_context, POOL_QUERY_STATE state)
{
int i;
@@ -1755,7 +1761,7 @@ pool_is_transaction_read_only(Node *node)
static void
set_virtual_main_node(POOL_QUERY_CONTEXT *query_context)
{
- int i;
+ int i;
for (i = 0; i < NUM_BACKENDS; i++)
{
@@ -1774,6 +1780,7 @@ static void
set_load_balance_info(POOL_QUERY_CONTEXT *query_context)
{
POOL_SESSION_CONTEXT *session_context;
+
session_context = pool_get_session_context(false);
if (pool_config->statement_level_load_balance)
@@ -1794,10 +1801,12 @@ is_in_list(char *name, List *list)
if (name == NULL || list == NIL)
return false;
- ListCell *cell;
- foreach (cell, list)
+ ListCell *cell;
+
+ foreach(cell, list)
{
- char *cell_name = (char *)lfirst(cell);
+ char *cell_name = (char *) lfirst(cell);
+
if (strcasecmp(name, cell_name) == 0)
{
ereport(DEBUG1,
@@ -1826,7 +1835,7 @@ is_select_object_in_temp_write_list(Node *node, void *context)
if (pool_config->disable_load_balance_on_write == DLBOW_DML_ADAPTIVE && session_context->is_in_transaction)
{
ereport(DEBUG1,
- (errmsg("is_select_object_in_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+ (errmsg("is_select_object_in_temp_write_list: \"%s\", found relation \"%s\"", (char *) context, rgv->relname)));
return is_in_list(rgv->relname, session_context->transaction_temp_write_list);
}
@@ -1835,15 +1844,15 @@ is_select_object_in_temp_write_list(Node *node, void *context)
return raw_expression_tree_walker(node, is_select_object_in_temp_write_list, context);
}
-static char*
-get_associated_object_from_dml_adaptive_relations
- (char *left_token, DBObjectTypes object_type)
+static char *get_associated_object_from_dml_adaptive_relations
+ (char *left_token, DBObjectTypes object_type)
{
- int i;
- char *right_token = NULL;
+ int i;
+ char *right_token = NULL;
+
if (!pool_config->parsed_dml_adaptive_object_relationship_list)
return NULL;
- for (i=0 ;; i++)
+ for (i = 0;; i++)
{
if (pool_config->parsed_dml_adaptive_object_relationship_list[i].left_token.name == NULL)
break;
@@ -1873,13 +1882,14 @@ check_object_relationship_list(char *name, bool is_func_name)
if (session_context->is_in_transaction)
{
- char *right_token =
- get_associated_object_from_dml_adaptive_relations
- (name, is_func_name? OBJECT_TYPE_FUNCTION : OBJECT_TYPE_RELATION);
+ char *right_token =
+ get_associated_object_from_dml_adaptive_relations
+ (name, is_func_name ? OBJECT_TYPE_FUNCTION : OBJECT_TYPE_RELATION);
if (right_token)
{
MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
+
session_context->transaction_temp_write_list =
lappend(session_context->transaction_temp_write_list, pstrdup(right_token));
MemoryContextSwitchTo(old_context);
@@ -1903,7 +1913,7 @@ add_object_into_temp_write_list(Node *node, void *context)
RangeVar *rgv = (RangeVar *) node;
ereport(DEBUG5,
- (errmsg("add_object_into_temp_write_list: \"%s\", found relation \"%s\"", (char*)context, rgv->relname)));
+ (errmsg("add_object_into_temp_write_list: \"%s\", found relation \"%s\"", (char *) context, rgv->relname)));
POOL_SESSION_CONTEXT *session_context = pool_get_session_context(false);
MemoryContext old_context = MemoryContextSwitchTo(session_context->memory_context);
@@ -1947,7 +1957,7 @@ dml_adaptive(Node *node, char *query)
session_context->transaction_temp_write_list = NIL;
}
- else if(is_commit_or_rollback_query(node))
+ else if (is_commit_or_rollback_query(node))
{
session_context->is_in_transaction = false;
@@ -1961,7 +1971,10 @@ dml_adaptive(Node *node, char *query)
return;
}
- /* If non-selectStmt, find the relname and add it to the transaction temp write list. */
+ /*
+ * If non-selectStmt, find the relname and add it to the transaction
+ * temp write list.
+ */
if (!is_select_query(node, query))
add_object_into_temp_write_list(node, query);
@@ -1973,7 +1986,7 @@ dml_adaptive(Node *node, char *query)
* replication mode and slony mode. Called by pool_where_to_send.
*/
static void
-where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+where_to_send_main_replica(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
POOL_DEST dest;
POOL_SESSION_CONTEXT *session_context;
@@ -2000,11 +2013,10 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
if (is_tx_started_by_multi_statement_query())
{
/*
- * If we are in an explicit transaction and the transaction
- * was started by a multi statement query, we should send
- * query to primary node only (which was supposed to be sent
- * to all nodes) until the transaction gets committed or
- * aborted.
+ * If we are in an explicit transaction and the transaction was
+ * started by a multi statement query, we should send query to
+ * primary node only (which was supposed to be sent to all nodes)
+ * until the transaction gets committed or aborted.
*/
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
@@ -2030,9 +2042,9 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
{
/*
* If (we are outside of an explicit transaction) OR (the
- * transaction has not issued a write query yet, AND
- * transaction isolation level is not SERIALIZABLE) we might
- * be able to load balance.
+ * transaction has not issued a write query yet, AND transaction
+ * isolation level is not SERIALIZABLE) we might be able to load
+ * balance.
*/
ereport(DEBUG1,
@@ -2053,14 +2065,14 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
*/
/*
- * If system catalog is used in the SELECT, we prefer to
- * send to the primary. Example: SELECT * FROM pg_class
- * WHERE relname = 't1'; Because 't1' is a constant, it's
- * hard to recognize as table name. Most use case such
- * query is against system catalog, and the table name can
- * be a temporary table, it's best to query against
- * primary system catalog. Please note that this test must
- * be done *before* test using pool_has_temp_table.
+ * If system catalog is used in the SELECT, we prefer to send
+ * to the primary. Example: SELECT * FROM pg_class WHERE
+ * relname = 't1'; Because 't1' is a constant, it's hard to
+ * recognize as table name. Most use case such query is
+ * against system catalog, and the table name can be a
+ * temporary table, it's best to query against primary system
+ * catalog. Please note that this test must be done *before*
+ * test using pool_has_temp_table.
*/
if (pool_has_system_catalog(node))
{
@@ -2072,8 +2084,8 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
}
/*
- * If temporary table is used in the SELECT, we prefer to
- * send to the primary.
+ * If temporary table is used in the SELECT, we prefer to send
+ * to the primary.
*/
else if (pool_config->check_temp_table && pool_has_temp_table(node))
{
@@ -2085,8 +2097,8 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
}
/*
- * If unlogged table is used in the SELECT, we prefer to
- * send to the primary.
+ * If unlogged table is used in the SELECT, we prefer to send
+ * to the primary.
*/
else if (pool_config->check_unlogged_table && pool_has_unlogged_table(node))
{
@@ -2096,17 +2108,20 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
+
/*
- * When query match the query patterns in primary_routing_query_pattern_list, we
- * send only to main node.
+ * When query match the query patterns in
+ * primary_routing_query_pattern_list, we send only to main
+ * node.
*/
else if (pattern_compare(query, WRITELIST, "primary_routing_query_pattern_list") == 1)
{
pool_set_node_to_be_sent(query_context, PRIMARY_NODE_ID);
}
+
/*
- * If a writing function call is used, we prefer to send
- * to the primary.
+ * If a writing function call is used, we prefer to send to
+ * the primary.
*/
else if (pool_has_function_call(node))
{
@@ -2129,9 +2144,9 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
/*
* As streaming replication delay is too much, if
- * prefer_lower_delay_standby is true then elect new
- * load balance node which is lowest delayed,
- * false then send to the primary.
+ * prefer_lower_delay_standby is true then elect new load
+ * balance node which is lowest delayed, false then send
+ * to the primary.
*/
if (STREAM && check_replication_delay(session_context->load_balance_node_id))
{
@@ -2141,7 +2156,7 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
if (pool_config->prefer_lower_delay_standby)
{
- int new_load_balancing_node = select_load_balancing_node();
+ int new_load_balancing_node = select_load_balancing_node();
session_context->load_balance_node_id = new_load_balancing_node;
session_context->query_context->load_balance_node_id = session_context->load_balance_node_id;
@@ -2180,7 +2195,7 @@ where_to_send_main_replica(POOL_QUERY_CONTEXT * query_context, char *query, Node
* Called by pool_where_to_send.
*/
static void
-where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query, Node *node)
+where_to_send_native_replication(POOL_QUERY_CONTEXT *query_context, char *query, Node *node)
{
POOL_SESSION_CONTEXT *session_context;
POOL_CONNECTION_POOL *backend;
@@ -2193,7 +2208,7 @@ where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query
* from syntactical point of view).
*/
elog(DEBUG1, "Maybe: load balance mode: %d is_select_query: %d",
- pool_config->load_balance_mode, is_select_query(node, query));
+ pool_config->load_balance_mode, is_select_query(node, query));
if (pool_config->load_balance_mode &&
is_select_query(node, query) &&
@@ -2223,12 +2238,12 @@ where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query
else if (TSTATE(backend, MAIN_NODE_ID) == 'I')
{
/*
- * We are out side transaction. If default transaction is read only,
- * we can load balance.
+ * We are out side transaction. If default transaction is read
+ * only, we can load balance.
*/
- static char *si_query = "SELECT current_setting('transaction_read_only')";
+ static char *si_query = "SELECT current_setting('transaction_read_only')";
POOL_SELECT_RESULT *res;
- bool load_balance = false;
+ bool load_balance = false;
do_query(CONNECTION(backend, MAIN_NODE_ID), si_query, &res, MAJOR(backend));
if (res)
@@ -2251,7 +2266,7 @@ where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query
}
}
}
-
+
/*
* If a writing function call is used or replicate_select is true, we
* have to send to all nodes since the function may modify database.
@@ -2264,10 +2279,9 @@ where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query
}
/*
- * If (we are outside of an explicit transaction) OR (the
- * transaction has not issued a write query yet, AND transaction
- * isolation level is not SERIALIZABLE) we might be able to load
- * balance.
+ * If (we are outside of an explicit transaction) OR (the transaction
+ * has not issued a write query yet, AND transaction isolation level
+ * is not SERIALIZABLE) we might be able to load balance.
*/
else if (TSTATE(backend, MAIN_NODE_ID) == 'I' ||
(!pool_is_writing_transaction() &&
@@ -2314,22 +2328,23 @@ where_to_send_native_replication(POOL_QUERY_CONTEXT * query_context, char *query
int
wait_for_failover_to_finish(void)
{
-#define MAX_FAILOVER_WAIT 30 /* waiting for failover finish timeout in seconds */
+#define MAX_FAILOVER_WAIT 30 /* waiting for failover finish timeout in
+ * seconds */
- volatile POOL_REQUEST_INFO *my_req;
- int ret = 0;
- int i;
+ volatile POOL_REQUEST_INFO *my_req;
+ int ret = 0;
+ int i;
/*
* Wait for failover to finish
*/
- for (i = 0;i < MAX_FAILOVER_WAIT; i++)
+ for (i = 0; i < MAX_FAILOVER_WAIT; i++)
{
my_req = Req_info;
if (my_req->switching == 0)
return ret;
- ret = -1; /* failover/failback finished */
+ ret = -1; /* failover/failback finished */
sleep(1);
}
- return -2; /* timed out */
+ return -2; /* timed out */
}
diff --git a/src/context/pool_session_context.c b/src/context/pool_session_context.c
index eea8df136..6ed64b2de 100644
--- a/src/context/pool_session_context.c
+++ b/src/context/pool_session_context.c
@@ -34,11 +34,11 @@
#include "context/pool_session_context.h"
static POOL_SESSION_CONTEXT session_context_d;
-static POOL_SESSION_CONTEXT * session_context = NULL;
+static POOL_SESSION_CONTEXT *session_context = NULL;
static void GetTranIsolationErrorCb(void *arg);
static void init_sent_message_list(void);
-static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * message);
-static void dump_sent_message(char *caller, POOL_SENT_MESSAGE * m);
+static POOL_PENDING_MESSAGE *copy_pending_message(POOL_PENDING_MESSAGE *message);
+static void dump_sent_message(char *caller, POOL_SENT_MESSAGE *m);
static void dml_adaptive_init(void);
static void dml_adaptive_destroy(void);
@@ -52,7 +52,7 @@ static int Elevel = DEBUG2;
* Initialize per session context
*/
void
-pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+pool_init_session_context(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
session_context = &session_context_d;
ProcessInfo *process_info;
@@ -161,7 +161,7 @@ pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * bac
/* Initialize temp tables */
pool_temp_tables_init();
-
+
/* Snapshot isolation state */
session_context->si_state = SI_NO_SNAPSHOT;
@@ -434,7 +434,7 @@ pool_remove_sent_messages(char kind)
* Destroy sent message
*/
void
-pool_sent_message_destroy(POOL_SENT_MESSAGE * message)
+pool_sent_message_destroy(POOL_SENT_MESSAGE *message)
{
bool in_progress;
POOL_QUERY_CONTEXT *qc = NULL;
@@ -522,7 +522,7 @@ pool_zap_query_context_in_sent_messages(POOL_QUERY_CONTEXT *query_context)
}
static void
-dump_sent_message(char *caller, POOL_SENT_MESSAGE * m)
+dump_sent_message(char *caller, POOL_SENT_MESSAGE *m)
{
ereport(DEBUG5,
(errmsg("called by %s: sent message: address: %p kind: %c name: =%s= state:%d",
@@ -560,7 +560,7 @@ dml_adaptive_destroy(void)
POOL_SENT_MESSAGE *
pool_create_sent_message(char kind, int len, char *contents,
int num_tsparams, const char *name,
- POOL_QUERY_CONTEXT * query_context)
+ POOL_QUERY_CONTEXT *query_context)
{
POOL_SENT_MESSAGE *msg;
@@ -589,7 +589,7 @@ pool_create_sent_message(char kind, int len, char *contents,
* Add a sent message to sent message list
*/
void
-pool_add_sent_message(POOL_SENT_MESSAGE * message)
+pool_add_sent_message(POOL_SENT_MESSAGE *message)
{
POOL_SENT_MESSAGE *old_msg;
POOL_SENT_MESSAGE_LIST *msglist;
@@ -682,7 +682,7 @@ pool_get_sent_message(char kind, const char *name, POOL_SENT_MESSAGE_STATE state
* Find a sent message by query context.
*/
POOL_SENT_MESSAGE *
-pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT * query_context)
+pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT *query_context)
{
int i;
POOL_SENT_MESSAGE_LIST *msglist;
@@ -705,7 +705,7 @@ pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT * query_context)
* Set message state to POOL_SENT_MESSAGE_STATE to POOL_SENT_MESSAGE_CLOSED.
*/
void
-pool_set_sent_message_state(POOL_SENT_MESSAGE * message)
+pool_set_sent_message_state(POOL_SENT_MESSAGE *message)
{
ereport(DEBUG5,
(errmsg("pool_set_sent_message_state: name:%s kind:%c previous state: %d",
@@ -738,8 +738,8 @@ void
pool_set_writing_transaction(void)
{
/*
- * If disable_transaction_on_write is 'off' or 'dml_adaptive', then never turn on writing
- * transaction flag.
+ * If disable_transaction_on_write is 'off' or 'dml_adaptive', then never
+ * turn on writing transaction flag.
*/
if (pool_config->disable_load_balance_on_write != DLBOW_OFF && pool_config->disable_load_balance_on_write != DLBOW_DML_ADAPTIVE)
{
@@ -968,7 +968,7 @@ init_sent_message_list(void)
* is used. Returns true if it is not used.
*/
bool
-can_query_context_destroy(POOL_QUERY_CONTEXT * qc)
+can_query_context_destroy(POOL_QUERY_CONTEXT *qc)
{
int i;
int count = 0;
@@ -1125,7 +1125,7 @@ pool_pending_message_create(char kind, int len, char *contents)
* message was sent.
*/
void
-pool_pending_message_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
memcpy(message->node_ids, query_context->where_to_send, sizeof(message->node_ids));
@@ -1142,7 +1142,7 @@ pool_pending_message_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT
* which indicates which backend nodes the message was sent.
*/
void
-pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
int i;
@@ -1168,7 +1168,7 @@ pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE * message, POOL
* Set query field of message.
*/
void
-pool_pending_message_query_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context)
+pool_pending_message_query_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context)
{
StrNCpy(message->query, query_context->original_query, sizeof(message->query));
}
@@ -1177,7 +1177,7 @@ pool_pending_message_query_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEX
* Add one message to the tail of the list.
*/
void
-pool_pending_message_add(POOL_PENDING_MESSAGE * message)
+pool_pending_message_add(POOL_PENDING_MESSAGE *message)
{
MemoryContext old_context;
@@ -1354,7 +1354,7 @@ pool_pending_message_get(POOL_MESSAGE_TYPE type)
* close message.
*/
char
-pool_get_close_message_spec(POOL_PENDING_MESSAGE * msg)
+pool_get_close_message_spec(POOL_PENDING_MESSAGE *msg)
{
return *msg->contents;
}
@@ -1364,7 +1364,7 @@ pool_get_close_message_spec(POOL_PENDING_MESSAGE * msg)
* The returned pointer is within "msg".
*/
char *
-pool_get_close_message_name(POOL_PENDING_MESSAGE * msg)
+pool_get_close_message_name(POOL_PENDING_MESSAGE *msg)
{
return (msg->contents) + 1;
}
@@ -1373,7 +1373,8 @@ pool_get_close_message_name(POOL_PENDING_MESSAGE * msg)
* Perform deep copy of POOL_PENDING_MESSAGE object in the current memory
* context except the query context.
*/
-static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * message)
+static POOL_PENDING_MESSAGE *
+copy_pending_message(POOL_PENDING_MESSAGE *message)
{
POOL_PENDING_MESSAGE *msg;
@@ -1390,7 +1391,7 @@ static POOL_PENDING_MESSAGE * copy_pending_message(POOL_PENDING_MESSAGE * messag
* context except the query context.
*/
void
-pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE * message)
+pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE *message)
{
if (message == NULL)
return;
@@ -1420,7 +1421,7 @@ pool_pending_message_reset_previous_message(void)
* Set previous message.
*/
void
-pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE * message)
+pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE *message)
{
if (!session_context)
{
@@ -1521,7 +1522,7 @@ pool_check_pending_message_and_reply(POOL_MESSAGE_TYPE type, char kind)
* pool_pending_message_free_pending_message.
*/
POOL_PENDING_MESSAGE *
-pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT * qc)
+pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT *qc)
{
List *msgs;
POOL_PENDING_MESSAGE *msg;
@@ -1573,7 +1574,7 @@ pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT * qc)
* the pending message is one of primary or standby node.
*/
int
-pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE * msg)
+pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE *msg)
{
int backend_id = -1;
int i;
@@ -1602,8 +1603,8 @@ pool_pending_message_get_message_num_by_backend_id(int backend_id)
{
ListCell *cell;
ListCell *next;
- int cnt = 0;
- int i;
+ int cnt = 0;
+ int i;
if (!session_context)
{
@@ -1641,6 +1642,7 @@ pool_pending_message_set_flush_request(void)
foreach(msg_item, session_context->pending_messages)
{
POOL_PENDING_MESSAGE *msg = (POOL_PENDING_MESSAGE *) lfirst(msg_item);
+
msg->flush_pending = true;
ereport(DEBUG5,
(errmsg("pool_pending_message_set_flush_request: msg: %s",
@@ -1799,10 +1801,10 @@ pool_temp_tables_destroy(void)
* If the table already exists, just replace state.
*/
void
-pool_temp_tables_add(char * tablename, POOL_TEMP_TABLE_STATE state)
+pool_temp_tables_add(char *tablename, POOL_TEMP_TABLE_STATE state)
{
MemoryContext old_context;
- POOL_TEMP_TABLE * table;
+ POOL_TEMP_TABLE *table;
if (!session_context)
ereport(ERROR,
@@ -1832,7 +1834,7 @@ pool_temp_tables_add(char * tablename, POOL_TEMP_TABLE_STATE state)
*/
POOL_TEMP_TABLE *
-pool_temp_tables_find(char * tablename)
+pool_temp_tables_find(char *tablename)
{
ListCell *cell;
@@ -1842,7 +1844,8 @@ pool_temp_tables_find(char * tablename)
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
+
if (strcmp(tablename, table->tablename) == 0)
return table;
}
@@ -1855,9 +1858,9 @@ pool_temp_tables_find(char * tablename)
* the table state.
*/
void
-pool_temp_tables_delete(char * tablename, POOL_TEMP_TABLE_STATE state)
+pool_temp_tables_delete(char *tablename, POOL_TEMP_TABLE_STATE state)
{
- POOL_TEMP_TABLE * table;
+ POOL_TEMP_TABLE *table;
MemoryContext old_context;
if (!session_context)
@@ -1914,7 +1917,7 @@ pool_temp_tables_commit_pending(void)
Retry:
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
if (table->state == TEMP_TABLE_CREATING)
{
@@ -1957,7 +1960,7 @@ pool_temp_tables_remove_pending(void)
Retry:
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
if (table->state == TEMP_TABLE_CREATING || table->state == TEMP_TABLE_DROPPING)
{
@@ -1985,7 +1988,8 @@ pool_temp_tables_dump(void)
foreach(cell, session_context->temp_tables)
{
- POOL_TEMP_TABLE * table = (POOL_TEMP_TABLE *)lfirst(cell);
+ POOL_TEMP_TABLE *table = (POOL_TEMP_TABLE *) lfirst(cell);
+
ereport(DEBUG1,
(errmsg("pool_temp_tables_dump: table %s state: %d",
table->tablename, table->state)));
diff --git a/src/include/auth/pool_auth.h b/src/include/auth/pool_auth.h
index 08a21c8a7..a5ee9a183 100644
--- a/src/include/auth/pool_auth.h
+++ b/src/include/auth/pool_auth.h
@@ -22,12 +22,12 @@
#ifndef pool_auth_h
#define pool_auth_h
-extern void connection_do_auth(POOL_CONNECTION_POOL_SLOT * cp, char *password);
-extern int pool_do_auth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern int pool_do_reauth(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * cp);
-extern void authenticate_frontend(POOL_CONNECTION * frontend);
+extern void connection_do_auth(POOL_CONNECTION_POOL_SLOT *cp, char *password);
+extern int pool_do_auth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern int pool_do_reauth(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *cp);
+extern void authenticate_frontend(POOL_CONNECTION *frontend);
extern void pool_random_salt(char *md5Salt);
extern void pool_random(void *buf, size_t len);
-#endif /* pool_auth_h */
+#endif /* pool_auth_h */
diff --git a/src/include/auth/pool_hba.h b/src/include/auth/pool_hba.h
index a6e0a1533..e92404fd9 100644
--- a/src/include/auth/pool_hba.h
+++ b/src/include/auth/pool_hba.h
@@ -101,10 +101,11 @@ struct HbaLine
char *ldapprefix;
char *ldapsuffix;
/* Additional LDAPl option with pgpool */
- bool backend_use_passwd; /* If true, pgpool use same password to auth backend */
+ bool backend_use_passwd; /* If true, pgpool use same password to
+ * auth backend */
};
extern bool load_hba(char *hbapath);
-extern void ClientAuthentication(POOL_CONNECTION * frontend);
+extern void ClientAuthentication(POOL_CONNECTION *frontend);
#endif /* POOL_HBA_H */
diff --git a/src/include/auth/pool_passwd.h b/src/include/auth/pool_passwd.h
index ace61160c..17b07b456 100644
--- a/src/include/auth/pool_passwd.h
+++ b/src/include/auth/pool_passwd.h
@@ -29,7 +29,8 @@
#define POOL_PASSWD_FILENAME "pool_passwd"
#define POOL_PASSWD_LEN 35
-#define MAX_POOL_PASSWD_LEN 132 /* In case of TEXT prefix(4byte) and plain text password(128byte)*/
+#define MAX_POOL_PASSWD_LEN 132 /* In case of TEXT prefix(4byte) and plain
+ * text password(128byte) */
#define MAX_USER_NAME_LEN 128
#define MAX_PGPASS_LEN 128
@@ -47,7 +48,7 @@ typedef enum
* pgpool-II child main process */
POOL_PASSWD_RW, /* open pool_passwd in read/write mode. used
* by pg_md5 command */
-} POOL_PASSWD_MODE;
+} POOL_PASSWD_MODE;
typedef enum PasswordType
{
@@ -64,16 +65,16 @@ typedef struct UserPassword
char *userName;
char *password;
PasswordType passwordType;
-} UserPassword;
+} UserPassword;
typedef struct PasswordMapping
{
UserPassword pgpoolUser;
UserPassword backendUser;
bool mappedUser;
-} PasswordMapping;
+} PasswordMapping;
-extern PasswordMapping * pool_get_user_credentials(char *username);
+extern PasswordMapping *pool_get_user_credentials(char *username);
extern PasswordType get_password_type(const char *shadow_pass);
extern void pool_init_pool_passwd(char *pool_passwd_filename, POOL_PASSWD_MODE mode);
extern int pool_create_passwdent(char *username, char *passwd);
@@ -84,6 +85,6 @@ extern void pool_reopen_passwd_file(void);
extern char *get_decrypted_password(const char *shadow_pass);
extern char *read_pool_key(char *key_file_path);
extern char *get_pgpool_config_user_password(char *username, char *password_in_config);
-extern void delete_passwordMapping(PasswordMapping * pwdMapping);
-extern int check_password_type_is_not_md5(char *username, char *password_in_config);
+extern void delete_passwordMapping(PasswordMapping *pwdMapping);
+extern int check_password_type_is_not_md5(char *username, char *password_in_config);
#endif /* POOL_PASSWD_H */
diff --git a/src/include/auth/scram-common.h b/src/include/auth/scram-common.h
index 9852f48e6..56ca862c0 100644
--- a/src/include/auth/scram-common.h
+++ b/src/include/auth/scram-common.h
@@ -82,12 +82,12 @@ extern void scram_HMAC_update(scram_HMAC_ctx *ctx, const char *str, int slen);
extern void scram_HMAC_final(uint8 *result, scram_HMAC_ctx *ctx);
extern void scram_SaltedPassword(const char *password, const char *salt,
- int saltlen, int iterations, uint8 *result);
+ int saltlen, int iterations, uint8 *result);
extern void scram_H(const uint8 *str, int len, uint8 *result);
extern void scram_ClientKey(const uint8 *salted_password, uint8 *result);
extern void scram_ServerKey(const uint8 *salted_password, uint8 *result);
extern char *scram_build_verifier(const char *salt, int saltlen, int iterations,
- const char *password);
+ const char *password);
#endif /* SCRAM_COMMON_H */
diff --git a/src/include/auth/scram.h b/src/include/auth/scram.h
index 883ab1442..2eaf0c06c 100644
--- a/src/include/auth/scram.h
+++ b/src/include/auth/scram.h
@@ -48,17 +48,17 @@
/* Routines dedicated to authentication */
extern void *pg_be_scram_init(const char *username, const char *shadow_pass);
-extern int pg_be_scram_exchange(void *opaq, char *input, int inputlen,
- char **output, int *outputlen, char **logdetail);
+extern int pg_be_scram_exchange(void *opaq, char *input, int inputlen,
+ char **output, int *outputlen, char **logdetail);
/* Routines to handle and check SCRAM-SHA-256 verifier */
extern char *pg_be_scram_build_verifier(const char *password);
extern bool scram_verify_plain_password(const char *username,
- const char *password, const char *verifier);
+ const char *password, const char *verifier);
extern void *pg_fe_scram_init(const char *username, const char *password);
extern void pg_fe_scram_exchange(void *opaq, char *input, int inputlen,
- char **output, int *outputlen,
- bool *done, bool *success);
+ char **output, int *outputlen,
+ bool *done, bool *success);
extern void pg_fe_scram_free(void *opaq);
extern char *pg_fe_scram_build_verifier(const char *password);
diff --git a/src/include/context/pool_process_context.h b/src/include/context/pool_process_context.h
index a9f9e1002..194b6220a 100644
--- a/src/include/context/pool_process_context.h
+++ b/src/include/context/pool_process_context.h
@@ -26,7 +26,7 @@
#ifndef POOL_PROCESS_CONTEXT_H
#define POOL_PROCESS_CONTEXT_H
-//#include "pool.h"
+/* #include "pool.h" */
#include "pcp/libpcp_ext.h"
#include "utils/pool_signal.h"
@@ -57,22 +57,22 @@ typedef struct
unsigned int last_alarm_second;
unsigned int undo_alarm_second;
-} POOL_PROCESS_CONTEXT;
+} POOL_PROCESS_CONTEXT;
extern void pool_init_process_context(void);
-extern POOL_PROCESS_CONTEXT * pool_get_process_context(void);
-extern ProcessInfo * pool_get_my_process_info(void);
+extern POOL_PROCESS_CONTEXT *pool_get_process_context(void);
+extern ProcessInfo *pool_get_my_process_info(void);
extern void pool_increment_local_session_id(void);
-extern size_t pool_coninfo_size(void);
+extern size_t pool_coninfo_size(void);
extern int pool_coninfo_num(void);
-extern ConnectionInfo * pool_coninfo(int child, int connection_pool, int backend);
-extern ConnectionInfo * pool_coninfo_pid(int pid, int connection_pool, int backend);
+extern ConnectionInfo *pool_coninfo(int child, int connection_pool, int backend);
+extern ConnectionInfo *pool_coninfo_pid(int pid, int connection_pool, int backend);
extern void pool_coninfo_set_frontend_connected(int proc_id, int pool_index);
extern void pool_coninfo_unset_frontend_connected(int proc_id, int pool_index);
-extern ConnectionInfo * pool_coninfo_backend_pid(int backend_pid, int *backend_node_id);
-extern void pool_set_connection_will_be_terminated(ConnectionInfo * connInfo);
-extern void pool_unset_connection_will_be_terminated(ConnectionInfo * connInfo);
+extern ConnectionInfo *pool_coninfo_backend_pid(int backend_pid, int *backend_node_id);
+extern void pool_set_connection_will_be_terminated(ConnectionInfo *connInfo);
+extern void pool_unset_connection_will_be_terminated(ConnectionInfo *connInfo);
extern void pool_alarm(pool_sighandler_t handler, unsigned int second);
extern void pool_undo_alarm(void);
diff --git a/src/include/context/pool_query_context.h b/src/include/context/pool_query_context.h
index 8ca7fb336..1d1ef13eb 100644
--- a/src/include/context/pool_query_context.h
+++ b/src/include/context/pool_query_context.h
@@ -44,7 +44,7 @@ typedef enum
POOL_PARSE_COMPLETE,
POOL_BIND_COMPLETE,
POOL_EXECUTE_COMPLETE
-} POOL_QUERY_STATE;
+} POOL_QUERY_STATE;
/*
* Query context:
@@ -62,8 +62,8 @@ typedef struct
Node *rewritten_parse_tree; /* rewritten raw parser output if any */
bool where_to_send[MAX_NUM_BACKENDS]; /* DB node map to send
* query */
- int load_balance_node_id; /* load balance node id per statement */
- int virtual_main_node_id; /* the 1st DB node to send query */
+ int load_balance_node_id; /* load balance node id per statement */
+ int virtual_main_node_id; /* the 1st DB node to send query */
POOL_QUERY_STATE query_state[MAX_NUM_BACKENDS]; /* for extended query
* protocol */
bool is_cache_safe; /* true if SELECT is safe to cache */
@@ -90,39 +90,39 @@ typedef struct
* extended query, do not commit cache if
* this flag is true. */
- bool atEnd; /* if true all rows have been already
- * fetched from the portal */
+ bool atEnd; /* if true all rows have been already fetched
+ * from the portal */
- bool partial_fetch; /* if true some rows have been fetched by
- * an execute with non 0 row option */
+ bool partial_fetch; /* if true some rows have been fetched by an
+ * execute with non 0 row option */
MemoryContext memory_context; /* memory context for query context */
-} POOL_QUERY_CONTEXT;
+} POOL_QUERY_CONTEXT;
-extern POOL_QUERY_CONTEXT * pool_init_query_context(void);
-extern void pool_query_context_destroy(POOL_QUERY_CONTEXT * query_context);
-extern POOL_QUERY_CONTEXT * pool_query_context_shallow_copy(POOL_QUERY_CONTEXT * query_context);
-extern void pool_start_query(POOL_QUERY_CONTEXT * query_context, char *query, int len, Node *node);
-extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern bool pool_is_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT * query_context, int node_id);
-extern void pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern void pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern bool pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT * query_context);
-extern void pool_where_to_send(POOL_QUERY_CONTEXT * query_context, char *query, Node *node);
-extern POOL_STATUS pool_send_and_wait(POOL_QUERY_CONTEXT * query_context, int send_type, int node_id);
-extern POOL_STATUS pool_extended_send_and_wait(POOL_QUERY_CONTEXT * query_context, char *kind, int len, char *contents, int send_type, int node_id, bool nowait);
+extern POOL_QUERY_CONTEXT *pool_init_query_context(void);
+extern void pool_query_context_destroy(POOL_QUERY_CONTEXT *query_context);
+extern POOL_QUERY_CONTEXT *pool_query_context_shallow_copy(POOL_QUERY_CONTEXT *query_context);
+extern void pool_start_query(POOL_QUERY_CONTEXT *query_context, char *query, int len, Node *node);
+extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern bool pool_is_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_set_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_unset_node_to_be_sent(POOL_QUERY_CONTEXT *query_context, int node_id);
+extern void pool_clear_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern void pool_setall_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern bool pool_multi_node_to_be_sent(POOL_QUERY_CONTEXT *query_context);
+extern void pool_where_to_send(POOL_QUERY_CONTEXT *query_context, char *query, Node *node);
+extern POOL_STATUS pool_send_and_wait(POOL_QUERY_CONTEXT *query_context, int send_type, int node_id);
+extern POOL_STATUS pool_extended_send_and_wait(POOL_QUERY_CONTEXT *query_context, char *kind, int len, char *contents, int send_type, int node_id, bool nowait);
extern Node *pool_get_parse_tree(void);
extern char *pool_get_query_string(void);
extern bool is_set_transaction_serializable(Node *node);
extern bool is_start_transaction_query(Node *node);
extern bool is_read_write(TransactionStmt *node);
extern bool is_serializable(TransactionStmt *node);
-extern bool pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT * query_context);
+extern bool pool_need_to_treat_as_if_default_transaction(POOL_QUERY_CONTEXT *query_context);
extern bool is_savepoint_query(Node *node);
extern bool is_2pc_transaction_query(Node *node);
-extern void pool_set_query_state(POOL_QUERY_CONTEXT * query_context, POOL_QUERY_STATE state);
+extern void pool_set_query_state(POOL_QUERY_CONTEXT *query_context, POOL_QUERY_STATE state);
extern int statecmp(POOL_QUERY_STATE s1, POOL_QUERY_STATE s2);
extern bool pool_is_cache_safe(void);
extern void pool_set_cache_safe(void);
@@ -131,8 +131,8 @@ extern bool pool_is_cache_exceeded(void);
extern void pool_set_cache_exceeded(void);
extern void pool_unset_cache_exceeded(void);
extern bool pool_is_transaction_read_only(Node *node);
-extern void pool_force_query_node_to_backend(POOL_QUERY_CONTEXT * query_context, int backend_id);
+extern void pool_force_query_node_to_backend(POOL_QUERY_CONTEXT *query_context, int backend_id);
extern void check_object_relationship_list(char *name, bool is_func_name);
-extern int wait_for_failover_to_finish(void);
+extern int wait_for_failover_to_finish(void);
#endif /* POOL_QUERY_CONTEXT_H */
diff --git a/src/include/context/pool_session_context.h b/src/include/context/pool_session_context.h
index a662aa01c..e86966e2e 100644
--- a/src/include/context/pool_session_context.h
+++ b/src/include/context/pool_session_context.h
@@ -43,7 +43,7 @@ typedef enum
POOL_READ_COMMITTED, /* Read committed */
POOL_REPEATABLE_READ, /* Repeatable read */
POOL_SERIALIZABLE /* Serializable */
-} POOL_TRANSACTION_ISOLATION;
+} POOL_TRANSACTION_ISOLATION;
/*
* Return values for pool_use_sync_map
@@ -63,7 +63,7 @@ typedef enum
POOL_SENT_MESSAGE_CREATED, /* initial state of sent message */
POOL_SENT_MESSAGE_CLOSED /* sent message closed but close complete
* message has not arrived yet */
-} POOL_SENT_MESSAGE_STATE;
+} POOL_SENT_MESSAGE_STATE;
/*
* Message content of extended query
@@ -90,7 +90,7 @@ typedef struct
int param_offset; /* Offset from contents where actual bind
* parameters are stored. This is meaningful
* only when is_cache_safe is true. */
-} POOL_SENT_MESSAGE;
+} POOL_SENT_MESSAGE;
/*
* List of POOL_SENT_MESSAGE (XXX this should have been implemented using a
@@ -101,7 +101,7 @@ typedef struct
int capacity; /* capacity of list */
int size; /* number of elements */
POOL_SENT_MESSAGE **sent_messages;
-} POOL_SENT_MESSAGE_LIST;
+} POOL_SENT_MESSAGE_LIST;
/*
* Received message queue used in extended query/streaming replication mode.
@@ -123,7 +123,7 @@ typedef enum
POOL_DESCRIBE,
POOL_CLOSE,
POOL_SYNC
-} POOL_MESSAGE_TYPE;
+} POOL_MESSAGE_TYPE;
typedef struct
{
@@ -140,29 +140,35 @@ typedef struct
bool not_forward_to_frontend; /* Do not forward response from
* backend to frontend. This is
* used by parse_before_bind() */
- bool node_ids[MAX_NUM_BACKENDS]; /* backend node map which this message was sent to */
+ bool node_ids[MAX_NUM_BACKENDS]; /* backend node map which this
+ * message was sent to */
POOL_QUERY_CONTEXT *query_context; /* query context */
+
/*
* If "flush" message arrives, this flag is set to true until all buffered
* message for frontend are sent out.
*/
bool flush_pending;
- bool is_tx_started_by_multi_statement; /* true if an explicit transaction has been started by
- multi statement query */
-} POOL_PENDING_MESSAGE;
+ bool is_tx_started_by_multi_statement; /* true if an explicit
+ * transaction has been
+ * started by multi
+ * statement query */
+} POOL_PENDING_MESSAGE;
-typedef enum {
- TEMP_TABLE_CREATING = 1, /* temp table creating, not committed yet. */
- TEMP_TABLE_DROPPING, /* temp table dropping, not committed yet. */
- TEMP_TABLE_CREATE_COMMITTED, /* temp table created and committed. */
- TEMP_TABLE_DROP_COMMITTED, /* temp table dropped and committed. */
-} POOL_TEMP_TABLE_STATE;
+typedef enum
+{
+ TEMP_TABLE_CREATING = 1, /* temp table creating, not committed yet. */
+ TEMP_TABLE_DROPPING, /* temp table dropping, not committed yet. */
+ TEMP_TABLE_CREATE_COMMITTED, /* temp table created and committed. */
+ TEMP_TABLE_DROP_COMMITTED, /* temp table dropped and committed. */
+} POOL_TEMP_TABLE_STATE;
-typedef struct {
+typedef struct
+{
char tablename[MAX_IDENTIFIER_LEN]; /* temporary table name */
- POOL_TEMP_TABLE_STATE state; /* see above */
-} POOL_TEMP_TABLE;
+ POOL_TEMP_TABLE_STATE state; /* see above */
+} POOL_TEMP_TABLE;
typedef enum
@@ -306,7 +312,10 @@ typedef struct
int preferred_main_node_id;
#endif
- /* Whether snapshot is acquired in this transaction. Only used by Snapshot Isolation mode. */
+ /*
+ * Whether snapshot is acquired in this transaction. Only used by Snapshot
+ * Isolation mode.
+ */
SI_STATE si_state;
/* Whether transaction is read only. Only used by Snapshot Isolation mode. */
SI_STATE transaction_read_only;
@@ -321,22 +330,24 @@ typedef struct
* transaction has been
* started by a
* multi-statement-query */
+
/*
- * True if query cache feature disabled until session ends.
- * This is set when SET ROLE/SET SESSION AUTHORIZATION executed.
+ * True if query cache feature disabled until session ends. This is set
+ * when SET ROLE/SET SESSION AUTHORIZATION executed.
*/
- bool query_cache_disabled;
+ bool query_cache_disabled;
+
/*
* True if query cache feature disabled until current transaction ends.
* This is set when REVOKE executed in a transaction.
*/
- bool query_cache_disabled_tx;
+ bool query_cache_disabled_tx;
-} POOL_SESSION_CONTEXT;
+} POOL_SESSION_CONTEXT;
-extern void pool_init_session_context(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void pool_init_session_context(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
extern void pool_session_context_destroy(void);
-extern POOL_SESSION_CONTEXT * pool_get_session_context(bool noerror);
+extern POOL_SESSION_CONTEXT *pool_get_session_context(bool noerror);
extern int pool_get_local_session_id(void);
extern bool pool_is_query_in_progress(void);
extern void pool_set_query_in_progress(void);
@@ -350,18 +361,18 @@ extern void pool_unset_doing_extended_query_message(void);
extern bool pool_is_ignore_till_sync(void);
extern void pool_set_ignore_till_sync(void);
extern void pool_unset_ignore_till_sync(void);
-extern POOL_SENT_MESSAGE * pool_create_sent_message(char kind, int len, char *contents,
- int num_tsparams, const char *name,
- POOL_QUERY_CONTEXT * query_context);
-extern void pool_add_sent_message(POOL_SENT_MESSAGE * message);
+extern POOL_SENT_MESSAGE *pool_create_sent_message(char kind, int len, char *contents,
+ int num_tsparams, const char *name,
+ POOL_QUERY_CONTEXT *query_context);
+extern void pool_add_sent_message(POOL_SENT_MESSAGE *message);
extern bool pool_remove_sent_message(char kind, const char *name);
extern void pool_remove_sent_messages(char kind);
extern void pool_clear_sent_message_list(void);
-extern void pool_sent_message_destroy(POOL_SENT_MESSAGE * message);
-extern POOL_SENT_MESSAGE * pool_get_sent_message(char kind, const char *name, POOL_SENT_MESSAGE_STATE state);
-extern void pool_set_sent_message_state(POOL_SENT_MESSAGE * message);
+extern void pool_sent_message_destroy(POOL_SENT_MESSAGE *message);
+extern POOL_SENT_MESSAGE *pool_get_sent_message(char kind, const char *name, POOL_SENT_MESSAGE_STATE state);
+extern void pool_set_sent_message_state(POOL_SENT_MESSAGE *message);
extern void pool_zap_query_context_in_sent_messages(POOL_QUERY_CONTEXT *query_context);
-extern POOL_SENT_MESSAGE * pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT * query_context);
+extern POOL_SENT_MESSAGE *pool_get_sent_message_by_query_context(POOL_QUERY_CONTEXT *query_context);
extern void pool_unset_writing_transaction(void);
extern void pool_set_writing_transaction(void);
extern bool pool_is_writing_transaction(void);
@@ -375,28 +386,28 @@ extern void pool_unset_command_success(void);
extern void pool_set_command_success(void);
extern bool pool_is_command_success(void);
extern void pool_copy_prep_where(bool *src, bool *dest);
-extern bool can_query_context_destroy(POOL_QUERY_CONTEXT * qc);
+extern bool can_query_context_destroy(POOL_QUERY_CONTEXT *qc);
extern void pool_pending_messages_init(void);
extern void pool_pending_messages_destroy(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_create(char kind, int len, char *contents);
-extern void pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE * message);
-extern void pool_pending_message_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_query_set(POOL_PENDING_MESSAGE * message, POOL_QUERY_CONTEXT * query_context);
-extern void pool_pending_message_add(POOL_PENDING_MESSAGE * message);
-extern POOL_PENDING_MESSAGE * pool_pending_message_head_message(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_pull_out(void);
-extern POOL_PENDING_MESSAGE * pool_pending_message_get(POOL_MESSAGE_TYPE type);
-extern char pool_get_close_message_spec(POOL_PENDING_MESSAGE * msg);
-extern char *pool_get_close_message_name(POOL_PENDING_MESSAGE * msg);
+extern POOL_PENDING_MESSAGE *pool_pending_message_create(char kind, int len, char *contents);
+extern void pool_pending_message_free_pending_message(POOL_PENDING_MESSAGE *message);
+extern void pool_pending_message_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_query_context_dest_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_query_set(POOL_PENDING_MESSAGE *message, POOL_QUERY_CONTEXT *query_context);
+extern void pool_pending_message_add(POOL_PENDING_MESSAGE *message);
+extern POOL_PENDING_MESSAGE *pool_pending_message_head_message(void);
+extern POOL_PENDING_MESSAGE *pool_pending_message_pull_out(void);
+extern POOL_PENDING_MESSAGE *pool_pending_message_get(POOL_MESSAGE_TYPE type);
+extern char pool_get_close_message_spec(POOL_PENDING_MESSAGE *msg);
+extern char *pool_get_close_message_name(POOL_PENDING_MESSAGE *msg);
extern void pool_pending_message_reset_previous_message(void);
-extern void pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE * message);
-extern POOL_PENDING_MESSAGE * pool_pending_message_get_previous_message(void);
+extern void pool_pending_message_set_previous_message(POOL_PENDING_MESSAGE *message);
+extern POOL_PENDING_MESSAGE *pool_pending_message_get_previous_message(void);
extern bool pool_pending_message_exists(void);
extern const char *pool_pending_message_type_to_string(POOL_MESSAGE_TYPE type);
extern void pool_check_pending_message_and_reply(POOL_MESSAGE_TYPE type, char kind);
-extern POOL_PENDING_MESSAGE * pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT * qc);
-extern int pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE * msg);
+extern POOL_PENDING_MESSAGE *pool_pending_message_find_lastest_by_query_context(POOL_QUERY_CONTEXT *qc);
+extern int pool_pending_message_get_target_backend_id(POOL_PENDING_MESSAGE *msg);
extern int pool_pending_message_get_message_num_by_backend_id(int backend_id);
extern void pool_pending_message_set_flush_request(void);
extern void dump_pending_message(void);
@@ -409,12 +420,12 @@ extern void pool_unset_suspend_reading_from_frontend(void);
extern void pool_temp_tables_init(void);
extern void pool_temp_tables_destroy(void);
-extern void pool_temp_tables_add(char * tablename, POOL_TEMP_TABLE_STATE state);
-extern POOL_TEMP_TABLE * pool_temp_tables_find(char * tablename);
-extern void pool_temp_tables_delete(char * tablename, POOL_TEMP_TABLE_STATE state);
-extern void pool_temp_tables_commit_pending(void);
-extern void pool_temp_tables_remove_pending(void);
-extern void pool_temp_tables_dump(void);
+extern void pool_temp_tables_add(char *tablename, POOL_TEMP_TABLE_STATE state);
+extern POOL_TEMP_TABLE *pool_temp_tables_find(char *tablename);
+extern void pool_temp_tables_delete(char *tablename, POOL_TEMP_TABLE_STATE state);
+extern void pool_temp_tables_commit_pending(void);
+extern void pool_temp_tables_remove_pending(void);
+extern void pool_temp_tables_dump(void);
extern bool is_tx_started_by_multi_statement_query(void);
extern void set_tx_started_by_multi_statement_query(void);
diff --git a/src/include/main/health_check.h b/src/include/main/health_check.h
index ad26f2f0a..29f59b7fd 100644
--- a/src/include/main/health_check.h
+++ b/src/include/main/health_check.h
@@ -25,26 +25,36 @@
/*
* Health check statistics per node
*/
-typedef struct {
- uint64 total_count; /* total count of health check */
- uint64 success_count; /* total count of successful health check */
- uint64 fail_count; /* total count of failed health check */
- uint64 skip_count; /* total count of skipped health check */
- uint64 retry_count; /* total count of health check retries */
- uint32 max_retry_count; /* max retry count in a health check session */
- uint64 total_health_check_duration; /* sum of health check duration */
- int32 max_health_check_duration; /* maximum duration spent for a health check session in milli seconds */
- int32 min_health_check_duration; /* minimum duration spent for a health check session in milli seconds */
- time_t last_health_check; /* last health check timestamp */
- time_t last_successful_health_check; /* last successful health check timestamp */
- time_t last_skip_health_check; /* last skipped health check timestamp */
- time_t last_failed_health_check; /* last failed health check timestamp */
+typedef struct
+{
+ uint64 total_count; /* total count of health check */
+ uint64 success_count; /* total count of successful health check */
+ uint64 fail_count; /* total count of failed health check */
+ uint64 skip_count; /* total count of skipped health check */
+ uint64 retry_count; /* total count of health check retries */
+ uint32 max_retry_count; /* max retry count in a health check
+ * session */
+ uint64 total_health_check_duration; /* sum of health check
+ * duration */
+ int32 max_health_check_duration; /* maximum duration spent for a
+ * health check session in milli
+ * seconds */
+ int32 min_health_check_duration; /* minimum duration spent for a
+ * health check session in milli
+ * seconds */
+ time_t last_health_check; /* last health check timestamp */
+ time_t last_successful_health_check; /* last successful health
+ * check timestamp */
+ time_t last_skip_health_check; /* last skipped health check timestamp */
+ time_t last_failed_health_check; /* last failed health check
+ * timestamp */
} POOL_HEALTH_CHECK_STATISTICS;
-extern volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats area in shared memory */
+extern volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats
+ * area in shared memory */
extern void do_health_check_child(int *node_id);
-extern size_t health_check_stats_shared_memory_size(void);
-extern void health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr);
+extern size_t health_check_stats_shared_memory_size(void);
+extern void health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr);
-#endif /* health_check_h */
+#endif /* health_check_h */
diff --git a/src/include/main/pgpool_logger.h b/src/include/main/pgpool_logger.h
index 0b2fc089c..82adbd2e1 100644
--- a/src/include/main/pgpool_logger.h
+++ b/src/include/main/pgpool_logger.h
@@ -67,7 +67,7 @@ typedef struct
int32 pid; /* writer's pid */
char is_last; /* last chunk of message? 't' or 'f' ('T' or
* 'F' for CSV case) */
- char data[]; /* data payload starts here */
+ char data[]; /* data payload starts here */
} PipeProtoHeader;
typedef union
diff --git a/src/include/main/pool_internal_comms.h b/src/include/main/pool_internal_comms.h
index 6b4dc60d8..b20b85d6f 100644
--- a/src/include/main/pool_internal_comms.h
+++ b/src/include/main/pool_internal_comms.h
@@ -42,4 +42,4 @@ extern void register_backend_state_sync_req_interrupt(void);
extern void register_inform_quarantine_nodes_req(void);
extern bool register_node_operation_request(POOL_REQUEST_KIND kind,
int *node_id_set, int count, unsigned char flags);
-#endif /* pool_internal_comms_h */
+#endif /* pool_internal_comms_h */
diff --git a/src/include/parser/explain.h b/src/include/parser/explain.h
index 1a44ae216..b2255b01e 100644
--- a/src/include/parser/explain.h
+++ b/src/include/parser/explain.h
@@ -19,7 +19,7 @@ typedef enum ExplainFormat
EXPLAIN_FORMAT_XML,
EXPLAIN_FORMAT_JSON,
EXPLAIN_FORMAT_YAML
-} ExplainFormat;
+} ExplainFormat;
typedef struct ExplainState
{
@@ -41,12 +41,12 @@ typedef struct ExplainState
List *rtable_names; /* alias names for RTEs */
List *deparse_cxt; /* context list for deparsing expressions */
Bitmapset *printed_subplans; /* ids of SubPlans we've printed */
-} ExplainState;
+} ExplainState;
/* Hook for plugins to get control in ExplainOneQuery() */
typedef void (*ExplainOneQuery_hook_type) (Query *query,
IntoClause *into,
- ExplainState *es,
+ ExplainState * es,
const char *queryString,
ParamListInfo params);
extern PGDLLIMPORT ExplainOneQuery_hook_type ExplainOneQuery_hook;
@@ -57,42 +57,42 @@ extern PGDLLIMPORT explain_get_index_name_hook_type explain_get_index_name_hook;
extern void ExplainQuery(ExplainStmt *stmt, const char *queryString,
- ParamListInfo params, DestReceiver *dest);
+ ParamListInfo params, DestReceiver * dest);
-extern ExplainState *NewExplainState(void);
+extern ExplainState * NewExplainState(void);
extern TupleDesc ExplainResultDesc(ExplainStmt *stmt);
extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into,
- ExplainState *es,
- const char *queryString, ParamListInfo params);
+ ExplainState * es,
+ const char *queryString, ParamListInfo params);
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
- ExplainState *es, const char *queryString,
- ParamListInfo params, const instr_time *planduration);
+extern void ExplainOnePlan(PlannedStmt * plannedstmt, IntoClause *into,
+ ExplainState * es, const char *queryString,
+ ParamListInfo params, const instr_time * planduration);
-extern void ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc);
-extern void ExplainPrintTriggers(ExplainState *es, QueryDesc *queryDesc);
+extern void ExplainPrintPlan(ExplainState * es, QueryDesc * queryDesc);
+extern void ExplainPrintTriggers(ExplainState * es, QueryDesc * queryDesc);
-extern void ExplainQueryText(ExplainState *es, QueryDesc *queryDesc);
+extern void ExplainQueryText(ExplainState * es, QueryDesc * queryDesc);
-extern void ExplainBeginOutput(ExplainState *es);
-extern void ExplainEndOutput(ExplainState *es);
-extern void ExplainSeparatePlans(ExplainState *es);
+extern void ExplainBeginOutput(ExplainState * es);
+extern void ExplainEndOutput(ExplainState * es);
+extern void ExplainSeparatePlans(ExplainState * es);
extern void ExplainPropertyList(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyListNested(const char *qlabel, List *data,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyText(const char *qlabel, const char *value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyInteger(const char *qlabel, int value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyLong(const char *qlabel, long value,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyFloat(const char *qlabel, double value, int ndigits,
- ExplainState *es);
+ ExplainState * es);
extern void ExplainPropertyBool(const char *qlabel, bool value,
- ExplainState *es);
+ ExplainState * es);
#endif /* EXPLAIN_H */
diff --git a/src/include/parser/extensible.h b/src/include/parser/extensible.h
index b11b0a58e..03089137e 100644
--- a/src/include/parser/extensible.h
+++ b/src/include/parser/extensible.h
@@ -27,7 +27,7 @@ typedef struct ExtensibleNode
{
NodeTag type;
const char *extnodename; /* identifier of ExtensibleNodeMethods */
-} ExtensibleNode;
+} ExtensibleNode;
/*
* node_size is the size of an extensible node of this type in bytes.
@@ -62,11 +62,11 @@ typedef struct ExtensibleNodeMethods
void (*nodeOut) (struct StringInfoData *str,
const struct ExtensibleNode *node);
void (*nodeRead) (struct ExtensibleNode *node);
-} ExtensibleNodeMethods;
+} ExtensibleNodeMethods;
-extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods *method);
+extern void RegisterExtensibleNodeMethods(const ExtensibleNodeMethods * method);
extern const ExtensibleNodeMethods *GetExtensibleNodeMethods(const char *name,
- bool missing_ok);
+ bool missing_ok);
/*
* Flags for custom paths, indicating what capabilities the resulting scan
diff --git a/src/include/parser/gramparse.h b/src/include/parser/gramparse.h
index db644c278..9571a5ee3 100644
--- a/src/include/parser/gramparse.h
+++ b/src/include/parser/gramparse.h
@@ -69,10 +69,10 @@ typedef struct base_yy_extra_type
/* from parser.c */
-extern int base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
- core_yyscan_t yyscanner);
-extern int minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp,
+extern int base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp,
core_yyscan_t yyscanner);
+extern int minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp,
+ core_yyscan_t yyscanner);
/* from gram.y */
extern void parser_init(base_yy_extra_type *yyext);
diff --git a/src/include/parser/nodes.h b/src/include/parser/nodes.h
index 1d5bb3ed6..d0d4f3cce 100644
--- a/src/include/parser/nodes.h
+++ b/src/include/parser/nodes.h
@@ -32,9 +32,9 @@ typedef enum NodeTag
T_Invalid = 0,
/* pgpool Extension */
- T_PgpoolVariableSetStmt,
- T_PgpoolVariableShowStmt,
- T_PgpoolQueryCacheStmt,
+ T_PgpoolVariableSetStmt,
+ T_PgpoolVariableShowStmt,
+ T_PgpoolQueryCacheStmt,
#include "nodetags.h"
} NodeTag;
@@ -201,7 +201,7 @@ extern void outToken(struct StringInfoData *str, const char *s);
extern void outBitmapset(struct StringInfoData *str,
const struct Bitmapset *bms);
extern void outDatum(struct StringInfoData *str, uintptr_t value,
- int typlen, bool typbyval);
+ int typlen, bool typbyval);
extern char *nodeToString(const void *obj);
extern char *nodeToStringWithLocations(const void *obj);
extern char *bmsToString(const struct Bitmapset *bms);
@@ -362,7 +362,7 @@ typedef enum AggStrategy
AGG_SORTED, /* grouped agg, input must be sorted */
AGG_HASHED, /* grouped agg, use internal hashtable */
AGG_MIXED, /* grouped agg, hash and sort both used */
-} AggStrategy;
+} AggStrategy;
/*
* AggSplit -
@@ -406,13 +406,13 @@ typedef enum SetOpCmd
SETOPCMD_INTERSECT_ALL,
SETOPCMD_EXCEPT,
SETOPCMD_EXCEPT_ALL,
-} SetOpCmd;
+} SetOpCmd;
typedef enum SetOpStrategy
{
SETOP_SORTED, /* input must be sorted */
SETOP_HASHED, /* use internal hashtable */
-} SetOpStrategy;
+} SetOpStrategy;
/*
* OnConflictAction -
diff --git a/src/include/parser/parsenodes.h b/src/include/parser/parsenodes.h
index 1593bad65..cf0ee5bcd 100644
--- a/src/include/parser/parsenodes.h
+++ b/src/include/parser/parsenodes.h
@@ -916,7 +916,7 @@ typedef struct PartitionBoundSpec
List *lowerdatums; /* List of PartitionRangeDatums */
List *upperdatums; /* List of PartitionRangeDatums */
- ParseLoc location; /* token location, or -1 if unknown */
+ ParseLoc location; /* token location, or -1 if unknown */
} PartitionBoundSpec;
/*
@@ -950,7 +950,7 @@ typedef struct PartitionRangeDatum
typedef struct SinglePartitionSpec
{
NodeTag type;
-} SinglePartitionSpec;
+} SinglePartitionSpec;
/*
* PartitionCmd - info for ALTER TABLE/INDEX ATTACH/DETACH PARTITION commands
@@ -1299,7 +1299,7 @@ typedef struct RTEPermissionInfo
Bitmapset *selectedCols; /* columns needing SELECT permission */
Bitmapset *insertedCols; /* columns needing INSERT permission */
Bitmapset *updatedCols; /* columns needing UPDATE permission */
-} RTEPermissionInfo;
+} RTEPermissionInfo;
/*
* RangeTblFunction -
@@ -2498,7 +2498,7 @@ typedef struct GrantStmt
NodeTag type;
bool is_grant; /* true = GRANT, false = REVOKE */
GrantTargetType targtype; /* type of the grant target */
- ObjectType objtype; /* kind of object being operated on */
+ ObjectType objtype; /* kind of object being operated on */
List *objects; /* list of RangeVar nodes, ObjectWithArgs
* nodes, or plain names (as String values) */
List *privileges; /* list of AccessPriv nodes */
@@ -3491,7 +3491,7 @@ typedef struct InlineCodeBlock
Oid langOid; /* OID of selected language */
bool langIsTrusted; /* trusted property of the language */
bool atomic; /* atomic execution context */
-} InlineCodeBlock;
+} InlineCodeBlock;
/* ----------------------
* CALL statement
@@ -3521,7 +3521,7 @@ typedef struct CallContext
NodeTag type;
bool atomic;
-} CallContext;
+} CallContext;
/* ----------------------
* Alter Object Rename Statement
diff --git a/src/include/parser/parser.h b/src/include/parser/parser.h
index 9e9384d86..e412c438e 100644
--- a/src/include/parser/parser.h
+++ b/src/include/parser/parser.h
@@ -77,5 +77,5 @@ extern Node *makeIntConst(int val, int location);
extern List *get_dummy_write_query_tree(void);
extern List *get_dummy_read_query_tree(void);
-extern Node * get_dummy_insert_query_node(void);
+extern Node *get_dummy_insert_query_node(void);
#endif /* PARSER_H */
diff --git a/src/include/parser/pg_class.h b/src/include/parser/pg_class.h
index eb5c48ccf..7ebf4c439 100644
--- a/src/include/parser/pg_class.h
+++ b/src/include/parser/pg_class.h
@@ -145,7 +145,7 @@ CATALOG(pg_class,1259,RelationRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(83,Relat
* the format of pg_class relation.
* ----------------
*/
-typedef FormData_pg_class *Form_pg_class;
+typedef FormData_pg_class * Form_pg_class;
DECLARE_UNIQUE_INDEX_PKEY(pg_class_oid_index, 2662, ClassOidIndexId, pg_class, btree(oid oid_ops));
DECLARE_UNIQUE_INDEX(pg_class_relname_nsp_index, 2663, ClassNameNspIndexId, pg_class, btree(relname name_ops, relnamespace oid_ops));
@@ -164,7 +164,7 @@ MAKE_SYSCACHE(RELNAMENSP, pg_class_relname_nsp_index, 128);
#define RELKIND_COMPOSITE_TYPE 'c' /* composite type */
#define RELKIND_FOREIGN_TABLE 'f' /* foreign table */
#define RELKIND_PARTITIONED_TABLE 'p' /* partitioned table */
-#define RELKIND_PARTITIONED_INDEX 'I' /* partitioned index */
+#define RELKIND_PARTITIONED_INDEX 'I' /* partitioned index */
#endif /* NOT_USED_IN_PGPOOL */
diff --git a/src/include/parser/pg_list.h b/src/include/parser/pg_list.h
index 738185c19..4a63a466d 100644
--- a/src/include/parser/pg_list.h
+++ b/src/include/parser/pg_list.h
@@ -97,7 +97,7 @@ typedef struct ForBothCellState
const List *l2;
int i1; /* current element indexes */
int i2;
-} ForBothCellState;
+} ForBothCellState;
typedef struct ForThreeState
{
@@ -105,7 +105,7 @@ typedef struct ForThreeState
const List *l2;
const List *l3;
int i; /* common element index */
-} ForThreeState;
+} ForThreeState;
typedef struct ForFourState
{
@@ -114,7 +114,7 @@ typedef struct ForFourState
const List *l3;
const List *l4;
int i; /* common element index */
-} ForFourState;
+} ForFourState;
typedef struct ForFiveState
{
@@ -124,7 +124,7 @@ typedef struct ForFiveState
const List *l4;
const List *l5;
int i; /* common element index */
-} ForFiveState;
+} ForFiveState;
/*
* These routines are small enough, and used often enough, to justify being
diff --git a/src/include/parser/pg_trigger.h b/src/include/parser/pg_trigger.h
index 8756f7560..745fe049a 100644
--- a/src/include/parser/pg_trigger.h
+++ b/src/include/parser/pg_trigger.h
@@ -79,7 +79,7 @@ CATALOG(pg_trigger,2620,TriggerRelationId)
* the format of pg_trigger relation.
* ----------------
*/
-typedef FormData_pg_trigger *Form_pg_trigger;
+typedef FormData_pg_trigger * Form_pg_trigger;
DECLARE_TOAST(pg_trigger, 2336, 2337);
diff --git a/src/include/parser/pg_wchar.h b/src/include/parser/pg_wchar.h
index 807e2df83..59582d1ff 100644
--- a/src/include/parser/pg_wchar.h
+++ b/src/include/parser/pg_wchar.h
@@ -486,7 +486,7 @@ typedef struct
uint8 b4_4_lower; /* min/max allowed value for 4th input byte */
uint8 b4_4_upper;
-} pg_mb_radix_tree;
+} pg_mb_radix_tree;
/*
* UTF-8 to local code conversion map (for combined characters)
@@ -496,7 +496,7 @@ typedef struct
uint32 utf1; /* UTF-8 code 1 */
uint32 utf2; /* UTF-8 code 2 */
uint32 code; /* local code */
-} pg_utf_to_local_combined;
+} pg_utf_to_local_combined;
/*
* local code to UTF-8 conversion map (for combined characters)
@@ -506,7 +506,7 @@ typedef struct
uint32 code; /* local code */
uint32 utf1; /* UTF-8 code 1 */
uint32 utf2; /* UTF-8 code 2 */
-} pg_local_to_utf_combined;
+} pg_local_to_utf_combined;
/*
* callback function for algorithmic encoding conversions (in either direction)
@@ -745,14 +745,14 @@ extern unsigned short CNStoBIG5(unsigned short cns, unsigned char lc);
extern int UtfToLocal(const unsigned char *utf, int len,
unsigned char *iso,
- const pg_mb_radix_tree *map,
- const pg_utf_to_local_combined *cmap, int cmapsize,
+ const pg_mb_radix_tree * map,
+ const pg_utf_to_local_combined * cmap, int cmapsize,
utf_local_conversion_func conv_func,
int encoding, bool noError);
extern int LocalToUtf(const unsigned char *iso, int len,
unsigned char *utf,
- const pg_mb_radix_tree *map,
- const pg_local_to_utf_combined *cmap, int cmapsize,
+ const pg_mb_radix_tree * map,
+ const pg_local_to_utf_combined * cmap, int cmapsize,
utf_local_conversion_func conv_func,
int encoding, bool noError);
@@ -787,7 +787,7 @@ extern int mic2latin_with_table(const unsigned char *mic, unsigned char *p,
const unsigned char *tab, bool noError);
#ifdef WIN32
-extern WCHAR *pgwin32_message_to_UTF16(const char *str, int len, int *utf16len);
+extern WCHAR * pgwin32_message_to_UTF16(const char *str, int len, int *utf16len);
#endif
#endif /* PG_WCHAR_H */
diff --git a/src/include/parser/pool_parser.h b/src/include/parser/pool_parser.h
index 1c1e46c25..f10a9f8da 100644
--- a/src/include/parser/pool_parser.h
+++ b/src/include/parser/pool_parser.h
@@ -186,6 +186,6 @@ typedef int16 AttrNumber;
* for portability. Don't use "offsetof(struct s, f[0])", as this doesn't
* work with MSVC and with C++ compilers.
*/
-#define FLEXIBLE_ARRAY_MEMBER /* empty */
+#define FLEXIBLE_ARRAY_MEMBER /* empty */
#endif /* POOL_PARSER_H */
diff --git a/src/include/parser/scanner.h b/src/include/parser/scanner.h
index 4c6953a31..8c78b0a57 100644
--- a/src/include/parser/scanner.h
+++ b/src/include/parser/scanner.h
@@ -136,12 +136,12 @@ extern PGDLLIMPORT const uint16 ScanKeywordTokens[];
/* Entry points in parser/scan.l */
extern core_yyscan_t scanner_init(const char *str,
- int slen,
+ int slen,
core_yy_extra_type *yyext,
const ScanKeywordList *keywordlist,
const uint16 *keyword_tokens);
extern void scanner_finish(core_yyscan_t yyscanner);
-extern int core_yylex(core_YYSTYPE *yylval_param, YYLTYPE *yylloc_param,
+extern int core_yylex(core_YYSTYPE *yylval_param, YYLTYPE * yylloc_param,
core_yyscan_t yyscanner);
extern int scanner_errposition(int location, core_yyscan_t yyscanner);
extern void setup_scanner_errposition_callback(ScannerCallbackState *scbstate,
diff --git a/src/include/pcp/libpcp_ext.h b/src/include/pcp/libpcp_ext.h
index 3a6d87858..fe5d9a362 100644
--- a/src/include/pcp/libpcp_ext.h
+++ b/src/include/pcp/libpcp_ext.h
@@ -65,7 +65,7 @@ typedef enum
CON_CONNECT_WAIT, /* waiting for connection starting */
CON_UP, /* up and running */
CON_DOWN /* down, disconnected */
-} BACKEND_STATUS;
+} BACKEND_STATUS;
/* backend status name strings */
#define BACKEND_STATUS_CON_UNUSED "unused"
@@ -80,7 +80,7 @@ typedef enum
typedef struct
{
BACKEND_STATUS status[MAX_NUM_BACKENDS];
-} BackendStatusRecord;
+} BackendStatusRecord;
typedef enum
{
@@ -88,7 +88,7 @@ typedef enum
ROLE_REPLICA,
ROLE_PRIMARY,
ROLE_STANDBY
-} SERVER_ROLE;
+} SERVER_ROLE;
/*
* PostgreSQL backend descriptor. Placed on shared memory area.
@@ -98,24 +98,31 @@ typedef struct
char backend_hostname[MAX_DB_HOST_NAMELEN]; /* backend host name */
int backend_port; /* backend port numbers */
BACKEND_STATUS backend_status; /* backend status */
- char pg_backend_status[NAMEDATALEN]; /* backend status examined by show pool_nodes and pcp_node_info*/
+ char pg_backend_status[NAMEDATALEN]; /* backend status examined by
+ * show pool_nodes and
+ * pcp_node_info */
time_t status_changed_time; /* backend status changed time */
double backend_weight; /* normalized backend load balance ratio */
double unnormalized_weight; /* described parameter */
char backend_data_directory[MAX_PATH_LENGTH];
- char backend_application_name[NAMEDATALEN]; /* application_name for walreceiver */
+ char backend_application_name[NAMEDATALEN]; /* application_name for
+ * walreceiver */
unsigned short flag; /* various flags */
bool quarantine; /* true if node is CON_DOWN because of
* quarantine */
uint64 standby_delay; /* The replication delay against the primary */
- bool standby_delay_by_time; /* true if standby_delay is measured in microseconds, not bytes */
+ bool standby_delay_by_time; /* true if standby_delay is measured
+ * in microseconds, not bytes */
SERVER_ROLE role; /* Role of server. used by pcp_node_info and
* failover() to keep track of quarantined
* primary node */
- char pg_role[NAMEDATALEN]; /* backend role examined by show pool_nodes and pcp_node_info*/
- char replication_state [NAMEDATALEN]; /* "state" from pg_stat_replication */
- char replication_sync_state [NAMEDATALEN]; /* "sync_state" from pg_stat_replication */
-} BackendInfo;
+ char pg_role[NAMEDATALEN]; /* backend role examined by show
+ * pool_nodes and pcp_node_info */
+ char replication_state[NAMEDATALEN]; /* "state" from
+ * pg_stat_replication */
+ char replication_sync_state[NAMEDATALEN]; /* "sync_state" from
+ * pg_stat_replication */
+} BackendInfo;
typedef struct
{
@@ -125,7 +132,7 @@ typedef struct
* reloading pgpool.conf. */
BackendInfo backend_info[MAX_NUM_BACKENDS];
-} BackendDesc;
+} BackendDesc;
typedef enum
{
@@ -134,7 +141,7 @@ typedef enum
IDLE,
IDLE_IN_TRANS,
CONNECTING
-} ProcessStatus;
+} ProcessStatus;
/*
* mamimum cancel key length
@@ -152,11 +159,11 @@ typedef struct
int major; /* protocol major version */
int minor; /* protocol minor version */
int pid; /* backend process id */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
int32 keylen; /* cancel key length */
int counter; /* used counter */
time_t create_time; /* connection creation time */
- time_t client_connection_time; /* client connection time */
+ time_t client_connection_time; /* client connection time */
time_t client_disconnection_time; /* client last disconnection time */
int client_idle_duration; /* client idle duration time (s) */
int load_balancing_node; /* load balancing node */
@@ -175,7 +182,7 @@ typedef struct
* it should not be treated as a backend node failure. This flag is used
* to handle pg_terminate_backend()
*/
-} ConnectionInfo;
+} ConnectionInfo;
/*
* process information
@@ -188,24 +195,27 @@ typedef struct
{
pid_t pid; /* OS's process id */
time_t start_time; /* fork() time */
- char connected; /* if not 0 this process is already used*/
+ char connected; /* if not 0 this process is already used */
int wait_for_connect; /* waiting time for client connection (s) */
ConnectionInfo *connection_info; /* head of the connection info for
* this process */
- int client_connection_count; /* how many times clients used this process */
- ProcessStatus status;
- char client_host[NI_MAXHOST]; /* client host. Only valid if status != WAIT_FOR_CONNECT */
- char client_port[NI_MAXSERV]; /* client port. Only valid if status != WAIT_FOR_CONNECT */
- char statement[MAXSTMTLEN]; /* the last statement sent to backend */
- uint64 node_ids[2]; /* "statement" is sent to the node id (bitmap) */
+ int client_connection_count; /* how many times clients used
+ * this process */
+ ProcessStatus status;
+ char client_host[NI_MAXHOST]; /* client host. Only valid if
+ * status != WAIT_FOR_CONNECT */
+ char client_port[NI_MAXSERV]; /* client port. Only valid if
+ * status != WAIT_FOR_CONNECT */
+ char statement[MAXSTMTLEN]; /* the last statement sent to backend */
+ uint64 node_ids[2]; /* "statement" is sent to the node id (bitmap) */
bool need_to_restart; /* If non 0, exit this child process as
* soon as current session ends. Typical
* case this flag being set is failback a
* node in streaming replication mode. */
bool exit_if_idle;
- int pooled_connections; /* Total number of pooled connections
- * by this child */
-} ProcessInfo;
+ int pooled_connections; /* Total number of pooled connections by
+ * this child */
+} ProcessInfo;
/*
* reporting types
@@ -229,7 +239,7 @@ typedef struct
char name[POOLCONFIG_MAXNAMELEN + 1];
char value[POOLCONFIG_MAXVALLEN + 1];
char desc[POOLCONFIG_MAXDESCLEN + 1];
-} POOL_REPORT_CONFIG;
+} POOL_REPORT_CONFIG;
/* nodes report struct */
typedef struct
@@ -248,7 +258,7 @@ typedef struct
char rep_state[POOLCONFIG_MAXWEIGHTLEN + 1];
char rep_sync_state[POOLCONFIG_MAXWEIGHTLEN + 1];
char last_status_change[POOLCONFIG_MAXDATELEN];
-} POOL_REPORT_NODES;
+} POOL_REPORT_NODES;
/* processes report struct */
typedef struct
@@ -261,7 +271,7 @@ typedef struct
char backend_connection_time[POOLCONFIG_MAXDATELEN + 1];
char pool_counter[POOLCONFIG_MAXCOUNTLEN + 1];
char status[POOLCONFIG_MAXPROCESSSTATUSLEN + 1];
-} POOL_REPORT_PROCESSES;
+} POOL_REPORT_PROCESSES;
/* pools reporting struct */
typedef struct
@@ -287,13 +297,13 @@ typedef struct
char client_host[NI_MAXHOST];
char client_port[NI_MAXSERV];
char statement[MAXSTMTLEN];
-} POOL_REPORT_POOLS;
+} POOL_REPORT_POOLS;
/* version struct */
typedef struct
{
char version[POOLCONFIG_MAXVALLEN + 1];
-} POOL_REPORT_VERSION;
+} POOL_REPORT_VERSION;
/* health check statistics report struct */
typedef struct
@@ -304,21 +314,21 @@ typedef struct
char status[POOLCONFIG_MAXSTATLEN + 1];
char role[POOLCONFIG_MAXWEIGHTLEN + 1];
char last_status_change[POOLCONFIG_MAXDATELEN];
- char total_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char success_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char fail_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char skip_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char retry_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char average_retry_count[POOLCONFIG_MAXLONGCOUNTLEN+1];
- char max_retry_count[POOLCONFIG_MAXCOUNTLEN+1];
- char max_health_check_duration[POOLCONFIG_MAXCOUNTLEN+1];
- char min_health_check_duration[POOLCONFIG_MAXCOUNTLEN+1];
- char average_health_check_duration[POOLCONFIG_MAXLONGCOUNTLEN+1];
+ char total_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char success_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char fail_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char skip_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char retry_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char average_retry_count[POOLCONFIG_MAXLONGCOUNTLEN + 1];
+ char max_retry_count[POOLCONFIG_MAXCOUNTLEN + 1];
+ char max_health_check_duration[POOLCONFIG_MAXCOUNTLEN + 1];
+ char min_health_check_duration[POOLCONFIG_MAXCOUNTLEN + 1];
+ char average_health_check_duration[POOLCONFIG_MAXLONGCOUNTLEN + 1];
char last_health_check[POOLCONFIG_MAXDATELEN];
char last_successful_health_check[POOLCONFIG_MAXDATELEN];
char last_skip_health_check[POOLCONFIG_MAXDATELEN];
char last_failed_health_check[POOLCONFIG_MAXDATELEN];
-} POOL_HEALTH_CHECK_STATS;
+} POOL_HEALTH_CHECK_STATS;
/* show backend statistics report struct */
typedef struct
@@ -333,11 +343,11 @@ typedef struct
char update_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
char delete_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
char ddl_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char other_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char panic_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char fatal_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
- char error_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
-} POOL_BACKEND_STATS;
+ char other_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char panic_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char fatal_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+ char error_cnt[POOLCONFIG_MAXWEIGHTLEN + 1];
+} POOL_BACKEND_STATS;
typedef enum
{
@@ -401,8 +411,8 @@ extern PCPResultInfo * pcp_node_info(PCPConnInfo * pcpCon, int nid);
extern PCPResultInfo * pcp_health_check_stats(PCPConnInfo * pcpCon, int nid);
extern PCPResultInfo * pcp_process_count(PCPConnInfo * pcpConn);
extern PCPResultInfo * pcp_process_info(PCPConnInfo * pcpConn, int pid);
-extern PCPResultInfo * pcp_reload_config(PCPConnInfo * pcpConn,char command_scope);
-extern PCPResultInfo * pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope);
+extern PCPResultInfo * pcp_reload_config(PCPConnInfo * pcpConn, char command_scope);
+extern PCPResultInfo * pcp_log_rotate(PCPConnInfo * pcpConn, char command_scope);
extern PCPResultInfo * pcp_invalidate_query_cache(PCPConnInfo * pcpConn);
extern PCPResultInfo * pcp_detach_node(PCPConnInfo * pcpConn, int nid);
@@ -430,8 +440,8 @@ extern int pcp_result_is_empty(PCPResultInfo * res);
extern char *role_to_str(SERVER_ROLE role);
-extern int * pool_health_check_stats_offsets(int *n);
-extern int * pool_report_pools_offsets(int *n);
+extern int *pool_health_check_stats_offsets(int *n);
+extern int *pool_report_pools_offsets(int *n);
/* ------------------------------
* pcp_error.c
diff --git a/src/include/pcp/pcp.h b/src/include/pcp/pcp.h
index 2aab91a10..e40b96bdc 100644
--- a/src/include/pcp/pcp.h
+++ b/src/include/pcp/pcp.h
@@ -42,7 +42,8 @@ typedef struct PCPWDNodeInfo
char nodeName[WD_MAX_HOST_NAMELEN];
char hostName[WD_MAX_HOST_NAMELEN]; /* host name */
char stateName[WD_MAX_HOST_NAMELEN]; /* state name */
- char membership_status_string[WD_MAX_HOST_NAMELEN]; /* membership status of this node */
+ char membership_status_string[WD_MAX_HOST_NAMELEN]; /* membership status of
+ * this node */
int wd_port; /* watchdog port */
int wd_priority; /* node priority in leader election */
int pgpool_port; /* pgpool port */
diff --git a/src/include/pcp/pcp_stream.h b/src/include/pcp/pcp_stream.h
index b718b64fd..ad006b4f5 100644
--- a/src/include/pcp/pcp_stream.h
+++ b/src/include/pcp/pcp_stream.h
@@ -41,13 +41,13 @@ typedef struct
int po; /* pending data offset */
int bufsz; /* pending data buffer size */
int len; /* pending data length */
-} PCP_CONNECTION;
+} PCP_CONNECTION;
-extern PCP_CONNECTION * pcp_open(int fd);
-extern void pcp_close(PCP_CONNECTION * pc);
-extern int pcp_read(PCP_CONNECTION * pc, void *buf, int len);
-extern int pcp_write(PCP_CONNECTION * pc, void *buf, int len);
-extern int pcp_flush(PCP_CONNECTION * pc);
+extern PCP_CONNECTION *pcp_open(int fd);
+extern void pcp_close(PCP_CONNECTION *pc);
+extern int pcp_read(PCP_CONNECTION *pc, void *buf, int len);
+extern int pcp_write(PCP_CONNECTION *pc, void *buf, int len);
+extern int pcp_flush(PCP_CONNECTION *pc);
#define UNIX_DOMAIN_PATH "/tmp"
diff --git a/src/include/pcp/pcp_worker.h b/src/include/pcp/pcp_worker.h
index 6b5710fcd..90e6f82a3 100644
--- a/src/include/pcp/pcp_worker.h
+++ b/src/include/pcp/pcp_worker.h
@@ -29,4 +29,4 @@ extern void pcp_mark_recovery_finished(void);
extern bool pcp_mark_recovery_in_progress(void);
-#endif /* pcp_worker_h */
+#endif /* pcp_worker_h */
diff --git a/src/include/pcp/recovery.h b/src/include/pcp/recovery.h
index bbf069033..2280a766c 100644
--- a/src/include/pcp/recovery.h
+++ b/src/include/pcp/recovery.h
@@ -24,7 +24,7 @@
extern void start_recovery(int recovery_node);
extern void finish_recovery(void);
-extern int wait_connection_closed(void);
-extern int ensure_conn_counter_validity(void);
+extern int wait_connection_closed(void);
+extern int ensure_conn_counter_validity(void);
-#endif /* recovery_h */
+#endif /* recovery_h */
diff --git a/src/include/pool.h b/src/include/pool.h
index c34a06d2a..233d36608 100644
--- a/src/include/pool.h
+++ b/src/include/pool.h
@@ -95,7 +95,7 @@ typedef enum
POOL_ERROR,
POOL_FATAL,
POOL_DEADLOCK
-} POOL_STATUS;
+} POOL_STATUS;
typedef enum
{
@@ -103,7 +103,7 @@ typedef enum
POOL_SOCKET_VALID,
POOL_SOCKET_ERROR,
POOL_SOCKET_EOF
-} POOL_SOCKET_STATE;
+} POOL_SOCKET_STATE;
/*
* Imported from src/include/libpq/pqcomm.h as of PostgreSQL 18.
@@ -164,7 +164,7 @@ typedef struct StartupPacket_v2
char options[SM_OPTIONS]; /* Optional additional args */
char unused[SM_UNUSED]; /* Unused */
char tty[SM_TTY]; /* Tty for debug output */
-} StartupPacket_v2;
+} StartupPacket_v2;
/* startup packet info */
typedef struct
@@ -185,8 +185,8 @@ typedef struct CancelPacket
{
int protoVersion; /* Protocol version */
int pid; /* backend process id */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
-} CancelPacket;
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+} CancelPacket;
#define MAX_PASSWORD_SIZE 1024
@@ -285,7 +285,7 @@ typedef struct
PasswordMapping *passwordMapping;
ConnectionInfo *con_info; /* shared memory coninfo used for handling the
* query containing pg_terminate_backend */
-} POOL_CONNECTION;
+} POOL_CONNECTION;
/*
* connection pool structure
@@ -294,26 +294,28 @@ typedef struct
{
StartupPacket *sp; /* startup packet info */
int pid; /* backend pid */
- char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+ char key[MAX_CANCELKEY_LENGTH]; /* cancel key */
+
/*
- * Cancel key length. In protocol version 3.0, it is 4.
- * In 3.2 or later, the maximum length is 256.
+ * Cancel key length. In protocol version 3.0, it is 4. In 3.2 or later,
+ * the maximum length is 256.
*/
int32 keylen;
POOL_CONNECTION *con;
time_t closetime; /* absolute time in second when the connection
* closed if 0, that means the connection is
* under use. */
+
/*
* Protocol version after negotiation. If nplen == 0, no negotiation has
* been done.
*/
int negotiated_major;
int negotiated_minor;
- char *negotiateProtocolMsg; /* Raw NegotiateProtocol messag */
+ char *negotiateProtocolMsg; /* Raw NegotiateProtocol messag */
int32 nplen; /* message length of NegotiateProtocol messag */
-} POOL_CONNECTION_POOL_SLOT;
+} POOL_CONNECTION_POOL_SLOT;
typedef struct
{
@@ -324,7 +326,7 @@ typedef struct
*/
ConnectionInfo *info;
POOL_CONNECTION_POOL_SLOT *slots[MAX_NUM_BACKENDS];
-} POOL_CONNECTION_POOL;
+} POOL_CONNECTION_POOL;
/* Defined in pool_session_context.h */
@@ -343,7 +345,7 @@ extern int pool_get_major_version(void);
extern bool pool_is_node_to_be_sent_in_current_query(int node_id);
extern int pool_virtual_main_db_node_id(void);
-extern BACKEND_STATUS * my_backend_status[];
+extern BACKEND_STATUS *my_backend_status[];
extern int my_main_node_id;
#define VALID_BACKEND(backend_id) \
@@ -402,7 +404,7 @@ typedef enum
POOL_NODE_STATUS_PRIMARY, /* primary node */
POOL_NODE_STATUS_STANDBY, /* standby node */
POOL_NODE_STATUS_INVALID /* invalid node (split brain, stand alone) */
-} POOL_NODE_STATUS;
+} POOL_NODE_STATUS;
/* Clustering mode macros */
#define REPLICATION (pool_config->backend_clustering_mode == CM_NATIVE_REPLICATION || \
@@ -432,10 +434,11 @@ typedef enum
#define ACCEPT_FD_SEM 4
#define SI_CRITICAL_REGION_SEM 5
#define FOLLOW_PRIMARY_SEM 6
-#define MAIN_EXIT_HANDLER_SEM 7 /* used in exit_hander in pgpool main process */
+#define MAIN_EXIT_HANDLER_SEM 7 /* used in exit_hander in pgpool main
+ * process */
#define MAX_REQUEST_QUEUE_SIZE 10
-#define MAX_SEC_WAIT_FOR_CLUSTER_TRANSACTION 10 /* time in seconds to keep
+#define MAX_SEC_WAIT_FOR_CLUSTER_TRANSACTION 10 /* time in seconds to keep
* retrying for a watchdog
* command if the cluster is
* not in stable state */
@@ -465,7 +468,7 @@ typedef enum
CLOSE_IDLE_REQUEST,
PROMOTE_NODE_REQUEST,
NODE_QUARANTINE_REQUEST
-} POOL_REQUEST_KIND;
+} POOL_REQUEST_KIND;
#define REQ_DETAIL_SWITCHOVER 0x00000001 /* failover due to switch over */
#define REQ_DETAIL_WATCHDOG 0x00000002 /* failover req from watchdog */
@@ -473,8 +476,10 @@ typedef enum
* require majority vote */
#define REQ_DETAIL_UPDATE 0x00000008 /* failover req is just an update
* node status request */
-#define REQ_DETAIL_PROMOTE 0x00000010 /* failover req is actually promoting the specified standby node.
- * current primary will be detached */
+#define REQ_DETAIL_PROMOTE 0x00000010 /* failover req is actually
+ * promoting the specified standby
+ * node. current primary will be
+ * detached */
typedef struct
{
@@ -482,36 +487,46 @@ typedef struct
unsigned char request_details; /* option flags kind */
int node_id[MAX_NUM_BACKENDS]; /* request node id */
int count; /* request node ids count */
-} POOL_REQUEST_NODE;
+} POOL_REQUEST_NODE;
typedef struct
{
POOL_REQUEST_NODE request[MAX_REQUEST_QUEUE_SIZE];
int request_queue_head;
int request_queue_tail;
- int main_node_id; /* the youngest node id which is not in down
+ int main_node_id; /* the youngest node id which is not in down
* status */
int primary_node_id; /* the primary node id in streaming
* replication mode */
- int conn_counter; /* number of connections from clients to pgpool */
+ int conn_counter; /* number of connections from clients to
+ * pgpool */
bool switching; /* it true, failover or failback is in
* progress */
- /* greater than 0 if follow primary command or detach_false_primary in
- * execution */
+
+ /*
+ * greater than 0 if follow primary command or detach_false_primary in
+ * execution
+ */
bool follow_primary_count;
- bool follow_primary_lock_pending; /* watchdog process can't wait
- * for follow_primary lock acquisition
- * in case it is held at the time of
- * request.
- * This flag indicates that lock was requested
- * by watchdog coordinator and next contender should
- * wait for the coordinator to release the lock
- */
- bool follow_primary_lock_held_remotely; /* true when lock is held by
- watchdog coordinator*/
- bool follow_primary_ongoing; /* true if follow primary command is ongoing */
- bool query_cache_invalidate_request; /* true if pcp_invalidate_query_cache requested */
-} POOL_REQUEST_INFO;
+ bool follow_primary_lock_pending; /* watchdog process can't wait
+ * for follow_primary lock
+ * acquisition in case it is
+ * held at the time of
+ * request. This flag
+ * indicates that lock was
+ * requested by watchdog
+ * coordinator and next
+ * contender should wait for
+ * the coordinator to release
+ * the lock */
+ bool follow_primary_lock_held_remotely; /* true when lock is held
+ * by watchdog coordinator */
+ bool follow_primary_ongoing; /* true if follow primary command is
+ * ongoing */
+ bool query_cache_invalidate_request; /* true if
+ * pcp_invalidate_query_cache
+ * requested */
+} POOL_REQUEST_INFO;
/* description of row. corresponding to RowDescription message */
typedef struct
@@ -523,13 +538,13 @@ typedef struct
int typeoid; /* data type oid */
int size; /* data length minus means variable data type */
int mod; /* data type modifier */
-} AttrInfo;
+} AttrInfo;
typedef struct
{
int num_attrs; /* number of attributes */
AttrInfo *attrinfo;
-} RowDesc;
+} RowDesc;
typedef struct
{
@@ -539,7 +554,7 @@ typedef struct
* excluding termination null */
char **data; /* actual row character data terminated with
* null */
-} POOL_SELECT_RESULT;
+} POOL_SELECT_RESULT;
/*
* recovery mode
@@ -572,8 +587,9 @@ typedef enum
PT_PCP_WORKER,
PT_HEALTH_CHECK,
PT_LOGGER,
- PT_LAST_PTYPE /* last ptype marker. any ptype must be above this. */
-} ProcessType;
+ PT_LAST_PTYPE /* last ptype marker. any ptype must be above
+ * this. */
+} ProcessType;
typedef enum
@@ -585,17 +601,17 @@ typedef enum
BACKEND_CONNECTING,
PROCESSING,
EXITING
-} ProcessState;
+} ProcessState;
/*
* Snapshot isolation manage area in shared memory
*/
typedef struct
{
- uint32 commit_counter; /* number of committing children */
+ uint32 commit_counter; /* number of committing children */
uint32 snapshot_counter; /* number of snapshot acquiring children */
- pid_t *snapshot_waiting_children; /* array size is num_init_children */
- pid_t *commit_waiting_children; /* array size is num_init_children */
+ pid_t *snapshot_waiting_children; /* array size is num_init_children */
+ pid_t *commit_waiting_children; /* array size is num_init_children */
} SI_ManageInfo;
/*
@@ -606,14 +622,14 @@ extern pid_t mypid; /* parent pid */
extern pid_t myProcPid; /* process pid */
extern ProcessType processType;
extern ProcessState processState;
-extern bool reset_query_error; /* true if error occurs in reset queries */
+extern bool reset_query_error; /* true if error occurs in reset queries */
extern void set_application_name(ProcessType ptype);
extern void set_application_name_with_string(char *string);
extern void set_application_name_with_suffix(ProcessType ptype, int suffix);
extern char *get_application_name(void);
extern char *get_application_name_for_process(ProcessType ptype);
-void SetProcessGlobalVariables(ProcessType pType);
+void SetProcessGlobalVariables(ProcessType pType);
extern volatile SI_ManageInfo *si_manage_info;
extern volatile sig_atomic_t sigusr2_received;
@@ -623,9 +639,9 @@ extern volatile sig_atomic_t backend_timer_expired; /* flag for connection
extern volatile sig_atomic_t health_check_timer_expired; /* non 0 if health check
* timer expired */
extern int my_proc_id; /* process table id (!= UNIX's PID) */
-extern ProcessInfo * process_info; /* shmem process information table */
-extern ConnectionInfo * con_info; /* shmem connection info table */
-extern POOL_REQUEST_INFO * Req_info;
+extern ProcessInfo *process_info; /* shmem process information table */
+extern ConnectionInfo *con_info; /* shmem connection info table */
+extern POOL_REQUEST_INFO *Req_info;
extern volatile sig_atomic_t *InRecovery;
extern volatile sig_atomic_t got_sighup;
extern volatile sig_atomic_t exit_request;
@@ -660,7 +676,7 @@ extern void pcp_main(int *fds);
extern void do_child(int *fds);
extern void child_exit(int code);
-extern void cancel_request(CancelPacket * sp, int32 len);
+extern void cancel_request(CancelPacket *sp, int32 len);
extern void check_stop_request(void);
extern void pool_initialize_private_backend_status(void);
extern int send_to_pg_frontend(char *data, int len, bool flush);
@@ -673,22 +689,22 @@ extern void set_process_status(ProcessStatus status);
extern void *pool_shared_memory_create(size_t size);
extern void pool_shmem_exit(int code);
extern void initialize_shared_memory_main_segment(size_t size);
-extern void * pool_shared_memory_segment_get_chunk(size_t size);
+extern void *pool_shared_memory_segment_get_chunk(size_t size);
/* pgpool_main.c*/
-extern BackendInfo * pool_get_node_info(int node_number);
+extern BackendInfo *pool_get_node_info(int node_number);
extern int pool_get_node_count(void);
extern int *pool_get_process_list(int *array_size);
-extern ProcessInfo * pool_get_process_info(pid_t pid);
+extern ProcessInfo *pool_get_process_info(pid_t pid);
extern void pool_sleep(unsigned int second);
extern int PgpoolMain(bool discard_status, bool clear_memcache_oidmaps);
extern int pool_send_to_frontend(char *data, int len, bool flush);
extern int pool_frontend_exists(void);
extern pid_t pool_waitpid(int *status);
extern int write_status_file(void);
-extern POOL_NODE_STATUS * verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots);
-extern POOL_NODE_STATUS * pool_get_node_status(void);
+extern POOL_NODE_STATUS *verify_backend_node_status(POOL_CONNECTION_POOL_SLOT **slots);
+extern POOL_NODE_STATUS *pool_get_node_status(void);
extern void pool_set_backend_status_changed_time(int backend_id);
extern int get_next_main_node(void);
extern bool pool_acquire_follow_primary_lock(bool block, bool remote_reques);
@@ -702,10 +718,10 @@ extern size_t strlcpy(char *dst, const char *src, size_t siz);
/* pool_worker_child.c */
extern void do_worker_child(void);
-extern int get_query_result(POOL_CONNECTION_POOL_SLOT * *slots, int backend_id, char *query, POOL_SELECT_RESULT * *res);
+extern int get_query_result(POOL_CONNECTION_POOL_SLOT **slots, int backend_id, char *query, POOL_SELECT_RESULT **res);
/* utils/pg_strong_random.c */
-void pg_strong_random_init(void);
-bool pg_strong_random(void *buf, size_t len);
+void pg_strong_random_init(void);
+bool pg_strong_random(void *buf, size_t len);
#endif /* POOL_H */
diff --git a/src/include/pool_config.h b/src/include/pool_config.h
index 4dece394e..3d12c0a9f 100644
--- a/src/include/pool_config.h
+++ b/src/include/pool_config.h
@@ -55,20 +55,20 @@ typedef struct
int type;
int flag;
regex_t regexv;
-} RegPattern;
+} RegPattern;
typedef enum ProcessManagementModes
{
PM_STATIC = 1,
PM_DYNAMIC
-} ProcessManagementModes;
+} ProcessManagementModes;
typedef enum ProcessManagementSstrategies
{
PM_STRATEGY_AGGRESSIVE = 1,
PM_STRATEGY_GENTLE,
PM_STRATEGY_LAZY
-} ProcessManagementSstrategies;
+} ProcessManagementSstrategies;
typedef enum NativeReplicationSubModes
{
@@ -85,28 +85,28 @@ typedef enum ClusteringModes
CM_SLONY,
CM_RAW,
CM_SNAPSHOT_ISOLATION
-} ClusteringModes;
+} ClusteringModes;
typedef enum LogStandbyDelayModes
{
LSD_ALWAYS = 1,
LSD_OVER_THRESHOLD,
LSD_NONE
-} LogStandbyDelayModes;
+} LogStandbyDelayModes;
typedef enum MemCacheMethod
{
SHMEM_CACHE = 1,
MEMCACHED_CACHE
-} MemCacheMethod;
+} MemCacheMethod;
typedef enum WdLifeCheckMethod
{
LIFECHECK_BY_QUERY = 1,
LIFECHECK_BY_HB,
LIFECHECK_BY_EXTERNAL
-} WdLifeCheckMethod;
+} WdLifeCheckMethod;
typedef enum DLBOW_OPTION
{
@@ -115,13 +115,13 @@ typedef enum DLBOW_OPTION
DLBOW_TRANS_TRANSACTION,
DLBOW_ALWAYS,
DLBOW_DML_ADAPTIVE
-} DLBOW_OPTION;
+} DLBOW_OPTION;
typedef enum RELQTARGET_OPTION
{
RELQTARGET_PRIMARY = 1,
RELQTARGET_LOAD_BALANCE_NODE
-} RELQTARGET_OPTION;
+} RELQTARGET_OPTION;
typedef enum CHECK_TEMP_TABLE_OPTION
{
@@ -130,7 +130,7 @@ typedef enum CHECK_TEMP_TABLE_OPTION
CHECK_TEMP_NONE,
CHECK_TEMP_ON,
CHECK_TEMP_OFF,
-} CHECK_TEMP_TABLE_OPTION;
+} CHECK_TEMP_TABLE_OPTION;
/* log_backend_messages */
typedef enum BGMSG_OPTION
@@ -156,13 +156,13 @@ typedef struct WdNodeInfo
char hostname[WD_MAX_HOST_NAMELEN]; /* host name */
int pgpool_port; /* pgpool port */
int wd_port; /* watchdog port */
-} WdNodeInfo;
+} WdNodeInfo;
typedef struct WdNodesConfig
{
int num_wd; /* number of watchdogs */
- WdNodeInfo wd_node_info[MAX_WATCHDOG_NUM];
-} WdNodesConfig;
+ WdNodeInfo wd_node_info[MAX_WATCHDOG_NUM];
+} WdNodesConfig;
typedef struct
@@ -170,7 +170,7 @@ typedef struct
char addr[WD_MAX_HOST_NAMELEN];
char if_name[WD_MAX_IF_NAME_LEN];
int dest_port;
-} WdHbIf;
+} WdHbIf;
#define WD_INFO(wd_id) (pool_config->wd_nodes.wd_node_info[(wd_id)])
#define WD_HB_IF(if_id) (pool_config->hb_dest_if[(if_id)])
@@ -190,7 +190,7 @@ typedef struct
* retries */
int connect_timeout; /* timeout value before giving up
* connecting to backend */
-} HealthCheckParams;
+} HealthCheckParams;
/*
* For dml adaptive object relations
@@ -203,41 +203,41 @@ typedef enum
OBJECT_TYPE_FUNCTION,
OBJECT_TYPE_RELATION,
OBJECT_TYPE_UNKNOWN
-} DBObjectTypes;
+} DBObjectTypes;
typedef struct
{
- char *name;
+ char *name;
DBObjectTypes object_type;
-} DBObject;
+} DBObject;
typedef struct
{
DBObject left_token;
DBObject right_token;
-} DBObjectRelation;
+} DBObjectRelation;
/*
* configuration parameters
*/
typedef struct
{
- ClusteringModes backend_clustering_mode; /* Backend clustering mode */
- ProcessManagementModes process_management;
+ ClusteringModes backend_clustering_mode; /* Backend clustering mode */
+ ProcessManagementModes process_management;
ProcessManagementSstrategies process_management_strategy;
- char **listen_addresses; /* hostnames/IP addresses to listen on */
+ char **listen_addresses; /* hostnames/IP addresses to listen on */
int port; /* port # to bind */
- char **pcp_listen_addresses; /* PCP listen address to listen on */
+ char **pcp_listen_addresses; /* PCP listen address to listen on */
int pcp_port; /* PCP port # to bind */
- char **unix_socket_directories; /* pgpool socket directories */
- char *unix_socket_group; /* owner group of pgpool sockets */
+ char **unix_socket_directories; /* pgpool socket directories */
+ char *unix_socket_group; /* owner group of pgpool sockets */
int unix_socket_permissions; /* pgpool sockets permissions */
char *wd_ipc_socket_dir; /* watchdog command IPC socket directory */
- char **pcp_socket_dir; /* PCP socket directory */
- int num_init_children; /* Maximum number of child to
- * accept connections */
- int min_spare_children; /* Minimum number of idle children */
- int max_spare_children; /* Minimum number of idle children */
+ char **pcp_socket_dir; /* PCP socket directory */
+ int num_init_children; /* Maximum number of child to accept
+ * connections */
+ int min_spare_children; /* Minimum number of idle children */
+ int max_spare_children; /* Minimum number of idle children */
int listen_backlog_multiplier; /* determines the size of the
* connection queue */
int reserved_connections; /* # of reserved connections */
@@ -270,7 +270,7 @@ typedef struct
char *pid_file_name; /* pid file name */
bool replication_mode; /* replication mode */
bool log_connections; /* logs incoming connections */
- bool log_disconnections; /* logs closing connections */
+ bool log_disconnections; /* logs closing connections */
bool log_pcp_processes; /* logs pcp processes */
bool log_hostname; /* resolve hostname */
bool enable_pool_hba; /* enables pool_hba.conf file
@@ -295,19 +295,21 @@ typedef struct
* false, just abort the
* transaction to keep
* the consistency. */
- bool auto_failback; /* If true, backend node reattach,
- * when backend node detached and
+ bool auto_failback; /* If true, backend node reattach, when
+ * backend node detached and
* replication_status is 'stream' */
- int auto_failback_interval; /* min interval of executing auto_failback */
+ int auto_failback_interval; /* min interval of executing
+ * auto_failback */
bool replicate_select; /* replicate SELECT statement when load
* balancing is disabled. */
char **reset_query_list; /* comma separated list of queries to be
* issued at the end of session */
char **read_only_function_list; /* list of functions with no side
- * effects */
+ * effects */
char **write_function_list; /* list of functions with side effects */
- char **primary_routing_query_pattern_list; /* list of query patterns that
- * should be sent to primary node */
+ char **primary_routing_query_pattern_list; /* list of query patterns
+ * that should be sent to
+ * primary node */
char *log_line_prefix; /* printf-style string to output at
* beginning of each log line */
int log_error_verbosity; /* controls how much detail about
@@ -320,8 +322,8 @@ typedef struct
bool logging_collector;
int log_rotation_age;
int log_rotation_size;
- char *log_directory;
- char *log_filename;
+ char *log_directory;
+ char *log_filename;
bool log_truncate_on_rotation;
int log_file_mode;
@@ -333,15 +335,19 @@ typedef struct
* greater than 0 to enable the
* functionality. */
- int delay_threshold_by_time; /* If the standby server delays more than
- * delay_threshold_in_time, any query goes to the
- * primary only. The unit is in seconds. 0
- * disables the check. Default is 0.
- * If delay_threshold_in_time is greater than 0,
- * delay_threshold will be ignored.
- * Note that health_check_period required to be
- * greater than 0 to enable the
- * functionality. */
+ int delay_threshold_by_time; /* If the standby server delays
+ * more than
+ * delay_threshold_in_time, any
+ * query goes to the primary only.
+ * The unit is in seconds. 0
+ * disables the check. Default is
+ * 0. If delay_threshold_in_time
+ * is greater than 0,
+ * delay_threshold will be
+ * ignored. Note that
+ * health_check_period required to
+ * be greater than 0 to enable the
+ * functionality. */
bool prefer_lower_delay_standby;
@@ -366,11 +372,11 @@ typedef struct
char *sr_check_database; /* PostgreSQL database name for streaming
* replication check */
char *failover_command; /* execute command when failover happens */
- char *follow_primary_command; /* execute command when failover is
+ char *follow_primary_command; /* execute command when failover is
* ended */
char *failback_command; /* execute command when failback happens */
- bool failover_on_backend_error; /* If true, trigger fail over when
+ bool failover_on_backend_error; /* If true, trigger fail over when
* writing to the backend
* communication socket fails.
* This is the same behavior of
@@ -378,8 +384,8 @@ typedef struct
* set to false, pgpool will
* report an error and disconnect
* the session. */
- bool failover_on_backend_shutdown; /* If true, trigger fail over
- when backend is going down */
+ bool failover_on_backend_shutdown; /* If true, trigger fail over
+ * when backend is going down */
bool detach_false_primary; /* If true, detach false primary */
char *recovery_user; /* PostgreSQL user name for online recovery */
char *recovery_password; /* PostgreSQL user password for online
@@ -406,8 +412,8 @@ typedef struct
bool log_statement; /* logs all SQL statements */
bool log_per_node_statement; /* logs per node detailed SQL
* statements */
- bool notice_per_node_statement; /* logs notice message for per node detailed SQL
- * statements */
+ bool notice_per_node_statement; /* logs notice message for per
+ * node detailed SQL statements */
bool log_client_messages; /* If true, logs any client messages */
int log_backend_messages; /* logs any backend messages */
char *lobj_lock_table; /* table name to lock for rewriting
@@ -422,20 +428,27 @@ typedef struct
/* followings till syslog, does not exist in the configuration file */
int num_reset_queries; /* number of queries in reset_query_list */
- int num_listen_addresses; /* number of entries in listen_addresses */
- int num_pcp_listen_addresses; /* number of entries in pcp_listen_addresses */
- int num_unix_socket_directories; /* number of entries in unix_socket_directories */
- int num_pcp_socket_directories; /* number of entries in pcp_socket_dir */
+ int num_listen_addresses; /* number of entries in
+ * listen_addresses */
+ int num_pcp_listen_addresses; /* number of entries in
+ * pcp_listen_addresses */
+ int num_unix_socket_directories; /* number of entries in
+ * unix_socket_directories */
+ int num_pcp_socket_directories; /* number of entries in
+ * pcp_socket_dir */
int num_read_only_function_list; /* number of functions in
- * read_only_function_list */
+ * read_only_function_list */
int num_write_function_list; /* number of functions in
* write_function_list */
- int num_cache_safe_memqcache_table_list; /* number of functions in
- * cache_safe_memqcache_table_list */
- int num_cache_unsafe_memqcache_table_list; /* number of functions in
- * cache_unsafe_memqcache_table_list */
- int num_primary_routing_query_pattern_list; /* number of query patterns in
- * primary_routing_query_pattern_list */
+ int num_cache_safe_memqcache_table_list; /* number of functions
+ * in
+ * cache_safe_memqcache_table_list */
+ int num_cache_unsafe_memqcache_table_list; /* number of functions
+ * in
+ * cache_unsafe_memqcache_table_list */
+ int num_primary_routing_query_pattern_list; /* number of query
+ * patterns in
+ * primary_routing_query_pattern_list */
int num_wd_monitoring_interfaces_list; /* number of items in
* wd_monitoring_interfaces_list */
/* ssl configuration */
@@ -446,25 +459,32 @@ typedef struct
char *ssl_ca_cert; /* path to root (CA) certificate */
char *ssl_ca_cert_dir; /* path to directory containing CA
* certificates */
- char *ssl_crl_file; /* path to the SSL certificate revocation list file */
+ char *ssl_crl_file; /* path to the SSL certificate revocation list
+ * file */
char *ssl_ciphers; /* allowed ssl ciphers */
- bool ssl_prefer_server_ciphers; /*Use SSL cipher preferences, rather than the client's*/
+ bool ssl_prefer_server_ciphers; /* Use SSL cipher preferences,
+ * rather than the client's */
char *ssl_ecdh_curve; /* the curve to use in ECDH key exchange */
- char *ssl_dh_params_file; /* path to the Diffie-Hellman parameters contained file */
- char *ssl_passphrase_command; /* path to the Diffie-Hellman parameters contained file */
+ char *ssl_dh_params_file; /* path to the Diffie-Hellman parameters
+ * contained file */
+ char *ssl_passphrase_command; /* path to the Diffie-Hellman
+ * parameters contained file */
int64 relcache_expire; /* relation cache life time in seconds */
int relcache_size; /* number of relation cache life entry */
- CHECK_TEMP_TABLE_OPTION check_temp_table; /* how to check temporary table */
+ CHECK_TEMP_TABLE_OPTION check_temp_table; /* how to check temporary
+ * table */
bool check_unlogged_table; /* enable unlogged table check */
- bool enable_shared_relcache; /* If true, relation cache stored in memory cache */
- RELQTARGET_OPTION relcache_query_target; /* target node to send relcache queries */
+ bool enable_shared_relcache; /* If true, relation cache stored in
+ * memory cache */
+ RELQTARGET_OPTION relcache_query_target; /* target node to send
+ * relcache queries */
/*
* followings are for regex support and do not exist in the configuration
* file
*/
- RegPattern *lists_patterns; /* Precompiled regex patterns for write/readonly
- * lists */
+ RegPattern *lists_patterns; /* Precompiled regex patterns for
+ * write/readonly lists */
int pattc; /* number of regexp pattern */
int current_pattern_size; /* size of the regex pattern array */
@@ -502,8 +522,10 @@ typedef struct
* by default */
char *memqcache_oiddir; /* Temporary work directory to record
* table oids */
- char **cache_safe_memqcache_table_list; /* list of tables to memqcache */
- char **cache_unsafe_memqcache_table_list; /* list of tables not to memqcache */
+ char **cache_safe_memqcache_table_list; /* list of tables to
+ * memqcache */
+ char **cache_unsafe_memqcache_table_list; /* list of tables not to
+ * memqcache */
RegPattern *lists_memqcache_table_patterns; /* Precompiled regex patterns
* for cache safe/unsafe lists */
@@ -515,12 +537,11 @@ typedef struct
* user_redirect_preference_list =
* 'postgres:primary,user[0-4]:1,user[5-9]:2'
*/
- char *user_redirect_preference_list; /* raw string in
- * pgpool.conf */
- RegArray *redirect_usernames; /* Precompiled regex patterns for db
+ char *user_redirect_preference_list; /* raw string in pgpool.conf */
+ RegArray *redirect_usernames; /* Precompiled regex patterns for db
* preference list */
- Left_right_tokens *user_redirect_tokens; /* db redirect for dbname and node
- * string */
+ Left_right_tokens *user_redirect_tokens; /* db redirect for dbname and
+ * node string */
/*
* database_redirect_preference_list =
@@ -571,10 +592,13 @@ typedef struct
* will not be load balanced
* until the session ends. */
- char *dml_adaptive_object_relationship_list; /* objects relationship list*/
+ char *dml_adaptive_object_relationship_list; /* objects relationship
+ * list */
DBObjectRelation *parsed_dml_adaptive_object_relationship_list;
- bool statement_level_load_balance; /* if on, select load balancing node per statement */
+ bool statement_level_load_balance; /* if on, select load
+ * balancing node per
+ * statement */
/*
* add for watchdog
@@ -589,23 +613,26 @@ typedef struct
* failover requests to
* build consensus */
bool enable_consensus_with_half_votes;
- /* apply majority rule for consensus
- * and quorum computation at 50% of
- * votes in a cluster with an even
- * number of nodes.
- */
+
+ /*
+ * apply majority rule for consensus and quorum computation at 50% of
+ * votes in a cluster with an even number of nodes.
+ */
bool wd_remove_shutdown_nodes;
- /* revoke membership of properly shutdown watchdog
- * nodes.
- */
- int wd_lost_node_removal_timeout;
- /* timeout in seconds to revoke membership of
- * LOST watchdog nodes
- */
- int wd_no_show_node_removal_timeout;
- /* time in seconds to revoke membership of
- * NO-SHOW watchdog node
- */
+
+ /*
+ * revoke membership of properly shutdown watchdog nodes.
+ */
+ int wd_lost_node_removal_timeout;
+
+ /*
+ * timeout in seconds to revoke membership of LOST watchdog nodes
+ */
+ int wd_no_show_node_removal_timeout;
+
+ /*
+ * time in seconds to revoke membership of NO-SHOW watchdog node
+ */
WdLifeCheckMethod wd_lifecheck_method; /* method of lifecheck.
* 'heartbeat' or 'query' */
@@ -617,10 +644,11 @@ typedef struct
* leader pgpool goes down. */
int wd_priority; /* watchdog node priority, during leader
* election */
- int pgpool_node_id; /* pgpool (watchdog) node id */
+ int pgpool_node_id; /* pgpool (watchdog) node id */
WdNodesConfig wd_nodes; /* watchdog lists */
char *trusted_servers; /* icmp reachable server list (A,B,C) */
- char *trusted_server_command; /* Executes this command when upper servers are observed */
+ char *trusted_server_command; /* Executes this command when upper
+ * servers are observed */
char *delegate_ip; /* delegate IP address */
int wd_interval; /* lifecheck interval (sec) */
char *wd_authkey; /* Authentication key for watchdog
@@ -642,17 +670,19 @@ typedef struct
* signal (sec) */
int wd_heartbeat_deadtime; /* Deadtime interval for heartbeat
* signal (sec) */
- WdHbIf hb_ifs[WD_MAX_IF_NUM]; /* heartbeat interfaces of all watchdog nodes */
- WdHbIf hb_dest_if[WD_MAX_IF_NUM]; /* heartbeat destination interfaces */
- int num_hb_dest_if; /* number of interface devices */
+ WdHbIf hb_ifs[WD_MAX_IF_NUM]; /* heartbeat interfaces of all
+ * watchdog nodes */
+ WdHbIf hb_dest_if[WD_MAX_IF_NUM]; /* heartbeat destination
+ * interfaces */
+ int num_hb_dest_if; /* number of interface devices */
char **wd_monitoring_interfaces_list; /* network interface name list
* to be monitored by watchdog */
- bool health_check_test; /* if on, enable health check testing */
+ bool health_check_test; /* if on, enable health check testing */
-} POOL_CONFIG;
+} POOL_CONFIG;
-extern POOL_CONFIG * pool_config;
-extern char config_file_dir[]; /* directory path of config file pgpool.conf */
+extern POOL_CONFIG *pool_config;
+extern char config_file_dir[]; /* directory path of config file pgpool.conf */
typedef enum
{
@@ -661,7 +691,7 @@ typedef enum
CFGCXT_RELOAD,
CFGCXT_PCP,
CFGCXT_SESSION
-} ConfigContext;
+} ConfigContext;
typedef struct ConfigVariable
{
@@ -675,7 +705,7 @@ extern int pool_init_config(void);
extern bool pool_get_config(const char *config_file, ConfigContext context);
extern int eval_logical(const char *str);
extern char *pool_flag_to_str(unsigned short flag);
-extern char *backend_status_to_str(BackendInfo * bi);
+extern char *backend_status_to_str(BackendInfo *bi);
/* methods used for regexp support */
extern int add_regex_pattern(const char *type, char *s);
diff --git a/src/include/pool_config_variables.h b/src/include/pool_config_variables.h
index 7aaa65052..1c6a2955e 100644
--- a/src/include/pool_config_variables.h
+++ b/src/include/pool_config_variables.h
@@ -43,7 +43,7 @@ typedef enum
WATCHDOG_LIFECHECK,
GENERAL_CONFIG,
CACHE_CONFIG
-} config_group;
+} config_group;
typedef enum
{
@@ -58,7 +58,7 @@ typedef enum
CONFIG_VAR_TYPE_DOUBLE_ARRAY,
CONFIG_VAR_TYPE_STRING_ARRAY,
CONFIG_VAR_TYPE_GROUP
-} config_type;
+} config_type;
/*
* The possible values of an enum variable are specified by an array of
@@ -96,17 +96,17 @@ typedef enum
#define DEFAULT_FOR_NO_VALUE_ARRAY_VAR 0x0020
/* From PG's src/include/utils/guc.h */
-#define GUC_UNIT_KB 0x1000 /* value is in kilobytes */
-#define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */
-#define GUC_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
-#define GUC_UNIT_MB 0x4000 /* value is in megabytes */
-#define GUC_UNIT_BYTE 0x8000 /* value is in bytes */
-#define GUC_UNIT_MEMORY 0xF000 /* mask for size-related units */
-
-#define GUC_UNIT_MS 0x10000 /* value is in milliseconds */
-#define GUC_UNIT_S 0x20000 /* value is in seconds */
-#define GUC_UNIT_MIN 0x30000 /* value is in minutes */
-#define GUC_UNIT_TIME 0xF0000 /* mask for time-related units */
+#define GUC_UNIT_KB 0x1000 /* value is in kilobytes */
+#define GUC_UNIT_BLOCKS 0x2000 /* value is in blocks */
+#define GUC_UNIT_XBLOCKS 0x3000 /* value is in xlog blocks */
+#define GUC_UNIT_MB 0x4000 /* value is in megabytes */
+#define GUC_UNIT_BYTE 0x8000 /* value is in bytes */
+#define GUC_UNIT_MEMORY 0xF000 /* mask for size-related units */
+
+#define GUC_UNIT_MS 0x10000 /* value is in milliseconds */
+#define GUC_UNIT_S 0x20000 /* value is in seconds */
+#define GUC_UNIT_MIN 0x30000 /* value is in minutes */
+#define GUC_UNIT_TIME 0xF0000 /* mask for time-related units */
#define GUC_UNIT (GUC_UNIT_MEMORY | GUC_UNIT_TIME)
/*
@@ -335,17 +335,17 @@ struct config_grouped_array_var
extern void InitializeConfigOptions(void);
extern bool set_one_config_option(const char *name, const char *value,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
extern bool set_config_options(ConfigVariable *head_p,
- ConfigContext context, GucSource source, int elevel);
+ ConfigContext context, GucSource source, int elevel);
#ifndef POOL_PRIVATE
-extern bool report_config_variable(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *var_name);
-extern bool report_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern bool set_config_option_for_session(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value);
-bool reset_all_variables(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern bool report_config_variable(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *var_name);
+extern bool report_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern bool set_config_option_for_session(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value);
+bool reset_all_variables(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
#endif
#endif /* POOL_CONFIG_VARIABLES_H */
diff --git a/src/include/pool_type.h b/src/include/pool_type.h
index 8db8ed000..6e8fe85dc 100644
--- a/src/include/pool_type.h
+++ b/src/include/pool_type.h
@@ -108,12 +108,12 @@ typedef enum
{
LOAD_UNSELECTED = 0,
LOAD_SELECTED
-} LOAD_BALANCE_STATUS;
+} LOAD_BALANCE_STATUS;
extern int assert_enabled;
extern void ExceptionalCondition(const char *conditionName,
- const char *errorType,
- const char *fileName, int lineNumber) __attribute__((noreturn));
+ const char *errorType,
+ const char *fileName, int lineNumber) __attribute__((noreturn));
#define MAXIMUM_ALIGNOF 8
diff --git a/src/include/protocol/pool_connection_pool.h b/src/include/protocol/pool_connection_pool.h
index b7f35ce7e..225b9c09d 100644
--- a/src/include/protocol/pool_connection_pool.h
+++ b/src/include/protocol/pool_connection_pool.h
@@ -22,14 +22,14 @@
#ifndef pool_connection_pool_h
#define pool_connection_pool_h
-extern POOL_CONNECTION_POOL * pool_connection_pool; /* connection pool */
+extern POOL_CONNECTION_POOL *pool_connection_pool; /* connection pool */
-extern int pool_init_cp(void);
-extern POOL_CONNECTION_POOL * pool_create_cp(void);
-extern POOL_CONNECTION_POOL * pool_get_cp(char *user, char *database, int protoMajor, int check_socket);
+extern int pool_init_cp(void);
+extern POOL_CONNECTION_POOL *pool_create_cp(void);
+extern POOL_CONNECTION_POOL *pool_get_cp(char *user, char *database, int protoMajor, int check_socket);
extern void pool_discard_cp(char *user, char *database, int protoMajor);
extern void pool_backend_timer(void);
-extern void pool_connection_pool_timer(POOL_CONNECTION_POOL * backend);
+extern void pool_connection_pool_timer(POOL_CONNECTION_POOL *backend);
extern RETSIGTYPE pool_backend_timer_handler(int sig);
extern int connect_inet_domain_socket(int slot, bool retry);
extern int connect_unix_domain_socket(int slot, bool retry);
@@ -40,4 +40,4 @@ extern void close_all_backend_connections(void);
extern void update_pooled_connection_count(void);
extern int in_use_backend_id(POOL_CONNECTION_POOL *pool);
-#endif /* pool_connection_pool_h */
+#endif /* pool_connection_pool_h */
diff --git a/src/include/protocol/pool_pg_utils.h b/src/include/protocol/pool_pg_utils.h
index bd9493572..7a9117788 100644
--- a/src/include/protocol/pool_pg_utils.h
+++ b/src/include/protocol/pool_pg_utils.h
@@ -31,27 +31,26 @@
*/
typedef struct
{
- short major; /* major version number in up to 3 digits decimal.
- * Examples: 120, 110, 100, 96.
- */
- short minor; /* minor version number in up to 2 digits decimal.
- * Examples: 0, 1, 2, 10, 23.
- */
- char version_string[MAX_PG_VERSION_STRING+1]; /* original version string */
-} PGVersion;
+ short major; /* major version number in up to 3 digits
+ * decimal. Examples: 120, 110, 100, 96. */
+ short minor; /* minor version number in up to 2 digits
+ * decimal. Examples: 0, 1, 2, 10, 23. */
+ char version_string[MAX_PG_VERSION_STRING + 1]; /* original version
+ * string */
+} PGVersion;
-extern void send_startup_packet(POOL_CONNECTION_POOL_SLOT * cp);
+extern void send_startup_packet(POOL_CONNECTION_POOL_SLOT *cp);
extern void pool_free_startup_packet(StartupPacket *sp);
-extern POOL_CONNECTION_POOL_SLOT * make_persistent_db_connection(
- int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
-extern POOL_CONNECTION_POOL_SLOT * make_persistent_db_connection_noerror(
- int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
-extern void discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT * cp);
+extern POOL_CONNECTION_POOL_SLOT *make_persistent_db_connection(
+ int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
+extern POOL_CONNECTION_POOL_SLOT *make_persistent_db_connection_noerror(
+ int db_node_id, char *hostname, int port, char *dbname, char *user, char *password, bool retry);
+extern void discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT *cp);
extern int select_load_balancing_node(void);
-extern PGVersion *Pgversion(POOL_CONNECTION_POOL * backend);
+extern PGVersion *Pgversion(POOL_CONNECTION_POOL *backend);
/* pool_pg_utils.c */
extern bool si_snapshot_prepared(void);
@@ -62,4 +61,4 @@ extern void si_commit_request(void);
extern void si_commit_done(void);
extern int check_replication_delay(int node_id);
-#endif /* pool_pg_utils_h */
+#endif /* pool_pg_utils_h */
diff --git a/src/include/protocol/pool_process_query.h b/src/include/protocol/pool_process_query.h
index e799b4d9d..f54b694a1 100644
--- a/src/include/protocol/pool_process_query.h
+++ b/src/include/protocol/pool_process_query.h
@@ -30,57 +30,57 @@
extern void reset_variables(void);
extern void reset_connection(void);
-extern void per_node_statement_log(POOL_CONNECTION_POOL * backend,
+extern void per_node_statement_log(POOL_CONNECTION_POOL *backend,
int node_id, char *query);
-extern int pool_extract_error_message(bool read_kind, POOL_CONNECTION * backend,
+extern int pool_extract_error_message(bool read_kind, POOL_CONNECTION *backend,
int major, bool unread, char **message);
-extern POOL_STATUS do_command(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+extern POOL_STATUS do_command(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
char *query, int protoMajor, int pid, char *key, int keylen, int no_ready_for_query);
-extern void do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, int major);
-extern void free_select_result(POOL_SELECT_RESULT * result);
+extern void do_query(POOL_CONNECTION *backend, char *query, POOL_SELECT_RESULT **result, int major);
+extern void free_select_result(POOL_SELECT_RESULT *result);
extern int compare(const void *p1, const void *p2);
-extern void do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major);
-extern POOL_STATUS pool_discard_packet_contents(POOL_CONNECTION_POOL * cp);
+extern void do_error_execute_command(POOL_CONNECTION_POOL *backend, int node_id, int major);
+extern POOL_STATUS pool_discard_packet_contents(POOL_CONNECTION_POOL *cp);
extern void pool_dump_valid_backend(int backend_id);
-extern bool pool_push_pending_data(POOL_CONNECTION * backend);
+extern bool pool_push_pending_data(POOL_CONNECTION *backend);
-extern void pool_send_frontend_exits(POOL_CONNECTION_POOL * backend);
-extern POOL_STATUS ParameterStatus(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void pool_send_frontend_exits(POOL_CONNECTION_POOL *backend);
+extern POOL_STATUS ParameterStatus(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void pool_send_error_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- int line);
-extern void pool_send_fatal_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- int line);
-extern void pool_send_severity_message(POOL_CONNECTION * frontend, int protoMajor,
- char *code,
- char *message,
- char *detail,
- char *hint,
- char *file,
- char *severity,
- int line);
+extern void pool_send_error_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ int line);
+extern void pool_send_fatal_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ int line);
+extern void pool_send_severity_message(POOL_CONNECTION *frontend, int protoMajor,
+ char *code,
+ char *message,
+ char *detail,
+ char *hint,
+ char *file,
+ char *severity,
+ int line);
-extern POOL_STATUS SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern POOL_STATUS SimpleForwardToBackend(char kind, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int len, char *contents);
+extern POOL_STATUS SimpleForwardToFrontend(char kind, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern POOL_STATUS SimpleForwardToBackend(char kind, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int len, char *contents);
-extern POOL_STATUS pool_process_query(POOL_CONNECTION * frontend,
-POOL_CONNECTION_POOL * backend,
-int reset_request);
-extern bool is_backend_cache_empty(POOL_CONNECTION_POOL * backend);
-extern void pool_send_readyforquery(POOL_CONNECTION * frontend);
+extern POOL_STATUS pool_process_query(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ int reset_request);
+extern bool is_backend_cache_empty(POOL_CONNECTION_POOL *backend);
+extern void pool_send_readyforquery(POOL_CONNECTION *frontend);
extern char *extract_error_kind(char *message, int major);
-#endif /* pool_process_query_h */
+#endif /* pool_process_query_h */
diff --git a/src/include/protocol/pool_proto_modules.h b/src/include/protocol/pool_proto_modules.h
index 28668aa6e..ae6ec5269 100644
--- a/src/include/protocol/pool_proto_modules.h
+++ b/src/include/protocol/pool_proto_modules.h
@@ -48,112 +48,112 @@ extern char *parsed_query;
/*
* modules defined in pool_proto_modules.c
*/
-extern POOL_STATUS SimpleQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS SimpleQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Execute(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Execute(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Parse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Parse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Bind(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Bind(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Describe(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Describe(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS Close(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS Close(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS FunctionCall3(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS FunctionCall3(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents);
-extern POOL_STATUS ReadyForQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, bool send_ready, bool cache_commit);
+extern POOL_STATUS ReadyForQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, bool send_ready, bool cache_commit);
-extern POOL_STATUS ParseComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ParseComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS BindComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS BindComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CloseComplete(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CloseComplete(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ParameterDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ParameterDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ErrorResponse3(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ErrorResponse3(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyInResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CopyInResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyOutResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CopyOutResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CopyDataRows(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, int copyin);
+extern POOL_STATUS CopyDataRows(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, int copyin);
-extern POOL_STATUS FunctionCall(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS FunctionCall(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ProcessFrontendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ProcessFrontendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS ProcessBackendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS ProcessBackendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int *state, short *num_fields);
-extern void handle_query_context(POOL_CONNECTION_POOL * backend);;
+extern void handle_query_context(POOL_CONNECTION_POOL *backend);;
extern void pool_emit_log_for_message_length_diff(int *length_array, char *name);
-extern void per_node_statement_notice(POOL_CONNECTION_POOL * backend, int node_id, char *query);
+extern void per_node_statement_notice(POOL_CONNECTION_POOL *backend, int node_id, char *query);
extern void log_backend_messages(unsigned char kind, int backend_id);
/*
* modules defined in pool_proto2.c
*/
-extern POOL_STATUS AsciiRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS AsciiRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields);
-extern POOL_STATUS BinaryRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS BinaryRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields);
-extern POOL_STATUS CompletedResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CompletedResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS CursorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS CursorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void EmptyQueryResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void EmptyQueryResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS FunctionResultResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS FunctionResultResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern POOL_STATUS NotificationResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS NotificationResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern int RowDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- short *result);
+extern int RowDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ short *result);
-extern void wait_for_query_response_with_trans_cleanup(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+extern void wait_for_query_response_with_trans_cleanup(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
int protoVersion, int pid, char *key, int keylen);
-extern POOL_STATUS wait_for_query_response(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoVersion);
+extern POOL_STATUS wait_for_query_response(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoVersion);
extern bool is_select_query(Node *node, char *sql);
extern bool is_commit_query(Node *node);
extern bool is_rollback_query(Node *node);
@@ -161,57 +161,57 @@ extern bool is_commit_or_rollback_query(Node *node);
extern bool is_rollback_to_query(Node *node);
extern bool is_strict_query(Node *node); /* returns non 0 if this is strict
* query */
-extern int need_insert_lock(POOL_CONNECTION_POOL * backend, char *query, Node *node);
-extern POOL_STATUS insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *query, InsertStmt *node, int lock_kind);
+extern int need_insert_lock(POOL_CONNECTION_POOL *backend, char *query, Node *node);
+extern POOL_STATUS insert_lock(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *query, InsertStmt *node, int lock_kind);
extern char *parse_copy_data(char *buf, int len, char delimiter, int col_id);
extern int check_copy_from_stdin(Node *node); /* returns non 0 if this is a
* COPY FROM STDIN */
-extern void query_ps_status(char *query, POOL_CONNECTION_POOL * backend); /* show ps status */
-extern POOL_STATUS start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node);
-extern POOL_STATUS end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern int detect_deadlock_error(POOL_CONNECTION * backend, int major);
-extern int detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread);
-extern int detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major);
-extern int detect_query_cancel_error(POOL_CONNECTION * backend, int major);
-extern int detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION * backend, int major);
-extern int detect_idle_session_timeout_error(POOL_CONNECTION * backend, int major);
-extern bool is_partition_table(POOL_CONNECTION_POOL * backend, Node *node);
-extern POOL_STATUS pool_discard_packet(POOL_CONNECTION_POOL * cp);
-extern void query_cache_register(char kind, POOL_CONNECTION * frontend, char *database, char *data, int data_len);
+extern void query_ps_status(char *query, POOL_CONNECTION_POOL *backend); /* show ps status */
+extern POOL_STATUS start_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, Node *node);
+extern POOL_STATUS end_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern int detect_deadlock_error(POOL_CONNECTION *backend, int major);
+extern int detect_serialization_error(POOL_CONNECTION *backend, int major, bool unread);
+extern int detect_active_sql_transaction_error(POOL_CONNECTION *backend, int major);
+extern int detect_query_cancel_error(POOL_CONNECTION *backend, int major);
+extern int detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION *backend, int major);
+extern int detect_idle_session_timeout_error(POOL_CONNECTION *backend, int major);
+extern bool is_partition_table(POOL_CONNECTION_POOL *backend, Node *node);
+extern POOL_STATUS pool_discard_packet(POOL_CONNECTION_POOL *cp);
+extern void query_cache_register(char kind, POOL_CONNECTION *frontend, char *database, char *data, int data_len);
extern int is_drop_database(Node *node); /* returns non 0 if this is a DROP
* DATABASE command */
-extern void send_simplequery_message(POOL_CONNECTION * backend, int len, char *string, int major);
-extern POOL_STATUS send_extended_protocol_message(POOL_CONNECTION_POOL * backend,
+extern void send_simplequery_message(POOL_CONNECTION *backend, int len, char *string, int major);
+extern POOL_STATUS send_extended_protocol_message(POOL_CONNECTION_POOL *backend,
int node_id, char *kind,
int len, char *string);
-extern int synchronize(POOL_CONNECTION * cp);
-extern void read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *decided_kind);
-extern void read_kind_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *kind, int node);
-extern void do_error_command(POOL_CONNECTION * backend, int major);
-extern void raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend);
+extern int synchronize(POOL_CONNECTION *cp);
+extern void read_kind_from_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *decided_kind);
+extern void read_kind_from_one_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *kind, int node);
+extern void do_error_command(POOL_CONNECTION *backend, int major);
+extern void raise_intentional_error_if_need(POOL_CONNECTION_POOL *backend);
-extern void pool_at_command_success(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void pool_at_command_success(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
/*
* modules defined in CommandComplete.c
*/
-extern POOL_STATUS CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool command_complete);
+extern POOL_STATUS CommandComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, bool command_complete);
-extern int pool_read_message_length(POOL_CONNECTION_POOL * cp);
-extern int *pool_read_message_length2(POOL_CONNECTION_POOL * cp);
-extern signed char pool_read_kind(POOL_CONNECTION_POOL * cp);
-extern int pool_read_int(POOL_CONNECTION_POOL * cp);
+extern int pool_read_message_length(POOL_CONNECTION_POOL *cp);
+extern int *pool_read_message_length2(POOL_CONNECTION_POOL *cp);
+extern signed char pool_read_kind(POOL_CONNECTION_POOL *cp);
+extern int pool_read_int(POOL_CONNECTION_POOL *cp);
/* pool_proto2.c */
-extern POOL_STATUS ErrorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern POOL_STATUS ErrorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern void NoticeResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+extern void NoticeResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
-extern char per_node_error_log(POOL_CONNECTION_POOL * backend, int node_id,
+extern char per_node_error_log(POOL_CONNECTION_POOL *backend, int node_id,
char *query, char *prefix, bool unread);
extern void init_pi_set(void);
diff --git a/src/include/query_cache/pool_memqcache.h b/src/include/query_cache/pool_memqcache.h
index 0e6b1aba8..8c809b37e 100644
--- a/src/include/query_cache/pool_memqcache.h
+++ b/src/include/query_cache/pool_memqcache.h
@@ -42,7 +42,7 @@
* block". Each block is assigned a "cache block id", which is
* starting with 0.
*/
-typedef char *POOL_CACHE_BLOCK; /* pointer to cache block */
+typedef char *POOL_CACHE_BLOCK; /* pointer to cache block */
typedef unsigned int POOL_CACHE_BLOCKID; /* cache block id */
typedef unsigned int POOL_CACHE_ITEMID; /* cache item id */
@@ -53,7 +53,7 @@ typedef struct
{
POOL_CACHE_BLOCKID blockid;
POOL_CACHE_ITEMID itemid;
-} POOL_CACHEID; /* cache id */
+} POOL_CACHEID; /* cache id */
/*
* Each block has management space called "cache block header" at the
@@ -70,12 +70,12 @@ typedef struct
unsigned char flags; /* flags. see above */
unsigned int num_items; /* number of items */
unsigned int free_bytes; /* total free space in bytes */
-} POOL_CACHE_BLOCK_HEADER;
+} POOL_CACHE_BLOCK_HEADER;
typedef struct
{
char query_hash[POOL_MD5_HASHKEYLEN];
-} POOL_QUERY_HASH;
+} POOL_QUERY_HASH;
#define POOL_ITEM_USED 0x0001 /* is this item used? */
#define POOL_ITEM_HAS_NEXT 0x0002 /* is this item has "next" item? */
@@ -90,7 +90,7 @@ typedef struct
POOL_CACHEID next; /* next cache item if any */
unsigned int offset; /* item offset in this block */
unsigned char flags; /* flags. see above */
-} POOL_CACHE_ITEM_POINTER;
+} POOL_CACHE_ITEM_POINTER;
/*
* Each block holds several "cache item", which consists of variable
@@ -114,13 +114,13 @@ typedef struct
unsigned int total_length; /* total length in bytes including myself */
time_t timestamp; /* cache creation time */
int64 expire; /* cache expire duration in seconds */
-} POOL_CACHE_ITEM_HEADER;
+} POOL_CACHE_ITEM_HEADER;
typedef struct
{
POOL_CACHE_ITEM_HEADER header; /* cache item header */
char data[1]; /* variable length data follows */
-} POOL_CACHE_ITEM;
+} POOL_CACHE_ITEM;
/*
* Possible the largest free space size in bytes
@@ -134,7 +134,7 @@ typedef struct
extern int memcached_connect(void);
extern void memcached_disconnect(void);
-extern void memqcache_register(char kind, POOL_CONNECTION * frontend, char *data, int data_len);
+extern void memqcache_register(char kind, POOL_CONNECTION *frontend, char *data, int data_len);
/*
* Cache key
@@ -144,7 +144,7 @@ typedef union
POOL_CACHEID cacheid; /* cache key (shmem configuration) */
char hashkey[POOL_MD5_HASHKEYLEN]; /* cache key (memcached
* configuration) */
-} POOL_CACHEKEY;
+} POOL_CACHEKEY;
/*
* Internal buffer structure
@@ -154,7 +154,7 @@ typedef struct
size_t bufsize; /* buffer size */
size_t buflen; /* used length */
char *buf; /* buffer */
-} POOL_INTERNAL_BUFFER;
+} POOL_INTERNAL_BUFFER;
/*
* Temporary query cache buffer
@@ -168,7 +168,7 @@ typedef struct
POOL_INTERNAL_BUFFER *buffer;
int num_oids;
POOL_INTERNAL_BUFFER *oids;
-} POOL_TEMP_QUERY_CACHE;
+} POOL_TEMP_QUERY_CACHE;
/*
* Temporary query cache buffer array
@@ -178,7 +178,7 @@ typedef struct
int num_caches;
int array_size;
POOL_TEMP_QUERY_CACHE *caches[1]; /* actual data continues... */
-} POOL_QUERY_CACHE_ARRAY;
+} POOL_QUERY_CACHE_ARRAY;
/*
* Query cache statistics structure. This area must be placed on shared
@@ -189,7 +189,7 @@ typedef struct
time_t start_time; /* start time when the statistics begins */
long long int num_selects; /* number of successful SELECTs */
long long int num_cache_hits; /* number of SELECTs extracted from cache */
-} POOL_QUERY_CACHE_STATS;
+} POOL_QUERY_CACHE_STATS;
/*
* Shared memory cache stats interface.
@@ -207,7 +207,7 @@ typedef struct
* fragment(unusable) cache
* entries */
POOL_QUERY_CACHE_STATS cache_stats;
-} POOL_SHMEM_STATS;
+} POOL_SHMEM_STATS;
/*--------------------------------------------------------------------------------
* On shared memory hash table implementation
@@ -220,7 +220,7 @@ typedef struct POOL_HASH_ELEMENT
struct POOL_HASH_ELEMENT *next; /* link to next entry */
POOL_QUERY_HASH hashkey; /* MD5 hash key */
POOL_CACHEID cacheid; /* logical location of this cache element */
-} POOL_HASH_ELEMENT;
+} POOL_HASH_ELEMENT;
typedef uint32 POOL_HASH_KEY;
@@ -229,7 +229,7 @@ typedef struct
{
POOL_HASH_KEY hashkey; /* hash key */
POOL_HASH_ELEMENT *element; /* hash element */
-} POOL_HEADER_ELEMENT;
+} POOL_HEADER_ELEMENT;
/* Hash header */
typedef struct
@@ -237,7 +237,7 @@ typedef struct
long nhash; /* number of hash keys (power of 2) */
uint32 mask; /* mask for hash function */
POOL_HEADER_ELEMENT elements[1]; /* actual hash elements follows */
-} POOL_HASH_HEADER;
+} POOL_HASH_HEADER;
typedef enum
{
@@ -247,16 +247,16 @@ typedef enum
extern int pool_hash_init(int nelements);
extern size_t pool_hash_size(int nelements);
-extern POOL_CACHEID * pool_hash_search(POOL_QUERY_HASH * key);
-extern int pool_hash_delete(POOL_QUERY_HASH * key);
+extern POOL_CACHEID *pool_hash_search(POOL_QUERY_HASH *key);
+extern int pool_hash_delete(POOL_QUERY_HASH *key);
extern uint32 hash_any(unsigned char *k, int keylen);
-extern POOL_STATUS pool_fetch_from_memory_cache(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+extern POOL_STATUS pool_fetch_from_memory_cache(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
char *contents, bool use_fake_cache, bool *foundp);
-extern int pool_fetch_cache(POOL_CONNECTION_POOL * backend, const char *query, char **buf, size_t *len);
-extern int pool_catalog_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen);
+extern int pool_fetch_cache(POOL_CONNECTION_POOL *backend, const char *query, char **buf, size_t *len);
+extern int pool_catalog_commit_cache(POOL_CONNECTION_POOL *backend, char *query, char *data, size_t datalen);
extern bool pool_is_likely_select(char *query);
extern bool pool_is_table_in_unsafe_list(const char *table_name);
@@ -276,26 +276,26 @@ extern size_t pool_shared_memory_fsmm_size(void);
extern int pool_init_fsmm(size_t size);
extern void pool_allocate_fsmm_clock_hand(void);
-extern POOL_QUERY_CACHE_ARRAY * pool_create_query_cache_array(void);
-extern void pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array);
+extern POOL_QUERY_CACHE_ARRAY *pool_create_query_cache_array(void);
+extern void pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY *cache_array);
-extern POOL_TEMP_QUERY_CACHE * pool_create_temp_query_cache(char *query);
-extern void pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node, char state,
- bool partial_fetch);
+extern POOL_TEMP_QUERY_CACHE *pool_create_temp_query_cache(char *query);
+extern void pool_handle_query_cache(POOL_CONNECTION_POOL *backend, char *query, Node *node, char state,
+ bool partial_fetch);
extern int pool_init_memqcache_stats(void);
-extern POOL_QUERY_CACHE_STATS * pool_get_memqcache_stats(void);
+extern POOL_QUERY_CACHE_STATS *pool_get_memqcache_stats(void);
extern void pool_reset_memqcache_stats(void);
extern long long int pool_stats_count_up_num_selects(long long int num);
extern long long int pool_stats_count_up_num_cache_hits(void);
extern long long int pool_tmp_stats_count_up_num_selects(void);
extern long long int pool_tmp_stats_get_num_selects(void);
extern void pool_tmp_stats_reset_num_selects(void);
-extern POOL_SHMEM_STATS * pool_get_shmem_storage_stats(void);
+extern POOL_SHMEM_STATS *pool_get_shmem_storage_stats(void);
-extern POOL_TEMP_QUERY_CACHE * pool_get_current_cache(void);
-extern POOL_TEMP_QUERY_CACHE * pool_get_current_cache(void);
-extern void pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache);
+extern POOL_TEMP_QUERY_CACHE *pool_get_current_cache(void);
+extern POOL_TEMP_QUERY_CACHE *pool_get_current_cache(void);
+extern void pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache);
extern void pool_discard_current_temp_query_cache(void);
extern void pool_shmem_lock(POOL_MEMQ_LOCK_TYPE type);
@@ -308,6 +308,6 @@ extern void pool_init_whole_cache_blocks(void);
extern void clear_query_cache(void);
-extern bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL * backend);
+extern bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL *backend);
#endif /* POOL_MEMQCACHE_H */
diff --git a/src/include/rewrite/pool_lobj.h b/src/include/rewrite/pool_lobj.h
index edd1373eb..192fab2b1 100644
--- a/src/include/rewrite/pool_lobj.h
+++ b/src/include/rewrite/pool_lobj.h
@@ -27,6 +27,6 @@
#define POOL_LOBJ_H
#include "pool.h"
-extern char *pool_rewrite_lo_creat(char kind, char *packet, int packet_len, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int *len);
+extern char *pool_rewrite_lo_creat(char kind, char *packet, int packet_len, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int *len);
#endif /* POOL_LOBJ_H */
diff --git a/src/include/rewrite/pool_timestamp.h b/src/include/rewrite/pool_timestamp.h
index e58ddfb0b..d8472ab58 100644
--- a/src/include/rewrite/pool_timestamp.h
+++ b/src/include/rewrite/pool_timestamp.h
@@ -31,8 +31,8 @@
#include "parser/nodes.h"
#include "context/pool_session_context.h"
-extern char *rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node, bool rewrite_to_params, POOL_SENT_MESSAGE * message);
-extern char *bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend, POOL_SENT_MESSAGE * message, const char *orig_msg, int *len);
+extern char *rewrite_timestamp(POOL_CONNECTION_POOL *backend, Node *node, bool rewrite_to_params, POOL_SENT_MESSAGE *message);
+extern char *bind_rewrite_timestamp(POOL_CONNECTION_POOL *backend, POOL_SENT_MESSAGE *message, const char *orig_msg, int *len);
extern bool isSystemType(Node *node, const char *name);
#endif /* POOL_TIMESTAMP_H */
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 92eb298fa..70960fd08 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -209,7 +209,7 @@ typedef enum
extern bool message_level_is_interesting(int elevel);
extern bool errstart(int elevel, const char *filename, int lineno,
- const char *funcname, const char *domain);
+ const char *funcname, const char *domain);
extern void errfinish(int dummy,...);
#define errcode(sqlerrcode) \
@@ -222,56 +222,56 @@ extern int return_code(int retcode);
extern int get_return_code(void);
extern int
-errmsg(const char *fmt,...)
+ errmsg(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errmsg_internal(const char *fmt,...)
+ errmsg_internal(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errmsg_plural(const char *fmt_singular, const char *fmt_plural,
- unsigned long n,...)
+ errmsg_plural(const char *fmt_singular, const char *fmt_plural,
+ unsigned long n,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
extern int
-errdetail(const char *fmt,...)
+ errdetail(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_internal(const char *fmt,...)
+ errdetail_internal(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_log(const char *fmt,...)
+ errdetail_log(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int
-errdetail_plural(const char *fmt_singular, const char *fmt_plural,
- unsigned long n,...)
+ errdetail_plural(const char *fmt_singular, const char *fmt_plural,
+ unsigned long n,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 4)))
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 4)));
extern int
-errhint(const char *fmt,...)
+ errhint(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
/*
* errcontext() is typically called in error context callback functions, not
@@ -285,10 +285,10 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int set_errcontext_domain(const char *domain);
extern int
-errcontext_msg(const char *fmt,...)
+ errcontext_msg(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
extern int errhidestmt(bool hide_stmt);
@@ -342,10 +342,10 @@ extern int getinternalerrposition(void);
extern void elog_start(const char *filename, int lineno, const char *funcname);
extern void
-elog_finish(int elevel, const char *fmt,...)
+ elog_finish(int elevel, const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
/* Support for attaching context information to error reports */
@@ -516,10 +516,10 @@ extern void set_syslog_parameters(const char *ident, int facility);
* safely (memory context, GUC load etc)
*/
extern void
-write_stderr(const char *fmt,...)
+ write_stderr(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+ __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
void shmem_exit(int code);
void on_exit_reset(void);
diff --git a/src/include/utils/fe_ports.h b/src/include/utils/fe_ports.h
index 5a5811e5c..d7c783024 100644
--- a/src/include/utils/fe_ports.h
+++ b/src/include/utils/fe_ports.h
@@ -54,16 +54,16 @@ void *repalloc(void *pointer, Size size);
#ifdef __GNUC__
extern int
-errhint(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errhint(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
extern int
-errdetail(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errdetail(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
extern void
-errmsg(const char *fmt,...)
-__attribute__((format(printf, 1, 2)));
+ errmsg(const char *fmt,...)
+ __attribute__((format(printf, 1, 2)));
#else
extern int errhint(const char *fmt,...);
extern int errdetail(const char *fmt,...);
@@ -71,7 +71,7 @@ extern void errmsg(const char *fmt,...);
#endif
extern bool errstart(int elevel, const char *filename, int lineno,
- const char *funcname, const char *domain);
+ const char *funcname, const char *domain);
extern void errfinish(int dummy,...);
/*
diff --git a/src/include/utils/getopt_long.h b/src/include/utils/getopt_long.h
index dd5d123e2..a5432c712 100644
--- a/src/include/utils/getopt_long.h
+++ b/src/include/utils/getopt_long.h
@@ -36,9 +36,9 @@ struct option
#ifndef HAVE_GETOPT_LONG
-extern int getopt_long(int argc, char *const argv[],
- const char *optstring,
- const struct option *longopts, int *longindex);
+extern int getopt_long(int argc, char *const argv[],
+ const char *optstring,
+ const struct option *longopts, int *longindex);
#endif
#endif /* GETOPT_LONG_H */
diff --git a/src/include/utils/json.h b/src/include/utils/json.h
index 67cc0255a..cb4378a9c 100644
--- a/src/include/utils/json.h
+++ b/src/include/utils/json.h
@@ -95,7 +95,7 @@ extern "C"
size_t value_extra; /* how much extra space to allocate for
* values? */
- } json_settings;
+ } json_settings;
#define json_enable_comments 0x01
@@ -110,7 +110,7 @@ extern "C"
json_boolean,
json_null
- } json_type;
+ } json_type;
extern const struct _json_value json_value_none;
@@ -121,7 +121,7 @@ extern "C"
struct _json_value *value;
- } json_object_entry;
+ } json_object_entry;
typedef struct _json_value
{
@@ -285,13 +285,13 @@ extern "C"
#endif
- } json_value;
+ } json_value;
json_value *json_parse(const json_char * json,
size_t length);
#define json_error_max 128
- json_value *json_parse_ex(json_settings * settings,
+ json_value *json_parse_ex(json_settings *settings,
const json_char * json,
size_t length,
char *error);
@@ -302,7 +302,7 @@ extern "C"
/* Not usually necessary, unless you used a custom mem_alloc and now want to
* use a custom mem_free.
*/
- void json_value_free_ex(json_settings * settings,
+ void json_value_free_ex(json_settings *settings,
json_value *);
@@ -311,10 +311,10 @@ extern "C"
#endif
/* pgpool-II extensions */
-json_value *json_get_value_for_key(json_value * source, const char *key);
-int json_get_int_value_for_key(json_value * source, const char *key, int *value);
-int json_get_long_value_for_key(json_value * source, const char *key, long *value);
-char *json_get_string_value_for_key(json_value * source, const char *key);
-int json_get_bool_value_for_key(json_value * source, const char *key, bool *value);
+json_value *json_get_value_for_key(json_value *source, const char *key);
+int json_get_int_value_for_key(json_value *source, const char *key, int *value);
+int json_get_long_value_for_key(json_value *source, const char *key, long *value);
+char *json_get_string_value_for_key(json_value *source, const char *key);
+int json_get_bool_value_for_key(json_value *source, const char *key, bool *value);
#endif
diff --git a/src/include/utils/json_writer.h b/src/include/utils/json_writer.h
index bc9c78b07..028e03234 100644
--- a/src/include/utils/json_writer.h
+++ b/src/include/utils/json_writer.h
@@ -28,13 +28,13 @@ typedef enum JWElementType
{
JWOBJECT,
JWARRAY
-} JWElementType;
+} JWElementType;
typedef struct JWStack
{
JWElementType elementType;
int elementCount;
-} JWStack;
+} JWStack;
typedef struct JsonNode
{
@@ -42,28 +42,28 @@ typedef struct JsonNode
bool pretty;
int stack_ptr;
JWStack stack[MAX_STACK_DEPTH];
-} JsonNode;
+} JsonNode;
-extern JsonNode * jw_create(JWElementType rootElement, bool pretty_output);
-extern JsonNode * jw_create_with_array(bool pretty_output);
-extern JsonNode * jw_create_with_object(bool pretty_output);
-extern bool jw_put_string(JsonNode * jNode, char *key, char *value);
-extern bool jw_put_int(JsonNode * jNode, char *key, int value);
-extern bool jw_put_bool(JsonNode * jNode, char *key, bool value);
-extern bool jw_put_long(JsonNode * jNode, char *key, long value);
-extern bool jw_put_null(JsonNode * jNode, char *key);
-extern bool jw_put_string_value(JsonNode * jNode, char *value);
-extern bool jw_put_int_value(JsonNode * jNode, int value);
-extern bool jw_put_bool_value(JsonNode * jNode, bool value);
-extern bool jw_put_long_value(JsonNode * jNode, long value);
-extern bool jw_put_null_value(JsonNode * jNode);
-extern bool jw_start_element(JsonNode * jNode, JWElementType element, char *key);
-extern bool jw_start_array(JsonNode * jNode, char *key);
-extern bool jw_start_object(JsonNode * jNode, char *key);
-extern bool jw_end_element(JsonNode * jNode);
-extern bool jw_finish_document(JsonNode * jNode);
-extern char *jw_get_json_string(JsonNode * jNode);
-extern int jw_get_json_length(JsonNode * jNode);
-extern void jw_destroy(JsonNode * jNode);
+extern JsonNode *jw_create(JWElementType rootElement, bool pretty_output);
+extern JsonNode *jw_create_with_array(bool pretty_output);
+extern JsonNode *jw_create_with_object(bool pretty_output);
+extern bool jw_put_string(JsonNode *jNode, char *key, char *value);
+extern bool jw_put_int(JsonNode *jNode, char *key, int value);
+extern bool jw_put_bool(JsonNode *jNode, char *key, bool value);
+extern bool jw_put_long(JsonNode *jNode, char *key, long value);
+extern bool jw_put_null(JsonNode *jNode, char *key);
+extern bool jw_put_string_value(JsonNode *jNode, char *value);
+extern bool jw_put_int_value(JsonNode *jNode, int value);
+extern bool jw_put_bool_value(JsonNode *jNode, bool value);
+extern bool jw_put_long_value(JsonNode *jNode, long value);
+extern bool jw_put_null_value(JsonNode *jNode);
+extern bool jw_start_element(JsonNode *jNode, JWElementType element, char *key);
+extern bool jw_start_array(JsonNode *jNode, char *key);
+extern bool jw_start_object(JsonNode *jNode, char *key);
+extern bool jw_end_element(JsonNode *jNode);
+extern bool jw_finish_document(JsonNode *jNode);
+extern char *jw_get_json_string(JsonNode *jNode);
+extern int jw_get_json_length(JsonNode *jNode);
+extern void jw_destroy(JsonNode *jNode);
#endif
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index 0df8cd138..bdf93ea2a 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -73,14 +73,14 @@ extern void MemoryContextResetOnly(MemoryContext context);
extern void MemoryContextResetChildren(MemoryContext context);
extern void MemoryContextDeleteChildren(MemoryContext context);
extern void MemoryContextSetParent(MemoryContext context,
- MemoryContext new_parent);
+ MemoryContext new_parent);
extern Size GetMemoryChunkSpace(void *pointer);
extern MemoryContext MemoryContextGetParent(MemoryContext context);
extern bool MemoryContextIsEmpty(MemoryContext context);
extern void MemoryContextStats(MemoryContext context);
extern void MemoryContextStatsDetail(MemoryContext context, int max_children);
extern void MemoryContextAllowInCriticalSection(MemoryContext context,
- bool allow);
+ bool allow);
#ifdef MEMORY_CONTEXT_CHECKING
extern void MemoryContextCheck(MemoryContext context);
@@ -129,9 +129,9 @@ GetMemoryChunkContext(void *pointer)
* specific creation routines, and noplace else.
*/
extern MemoryContext MemoryContextCreate(NodeTag tag, Size size,
- MemoryContextMethods *methods,
- MemoryContext parent,
- const char *name);
+ MemoryContextMethods *methods,
+ MemoryContext parent,
+ const char *name);
/*
@@ -140,16 +140,16 @@ extern MemoryContext MemoryContextCreate(NodeTag tag, Size size,
/* aset.c */
extern MemoryContext AllocSetContextCreate(MemoryContext parent,
- const char *name,
- Size minContextSize,
- Size initBlockSize,
- Size maxBlockSize);
+ const char *name,
+ Size minContextSize,
+ Size initBlockSize,
+ Size maxBlockSize);
/* slab.c */
extern MemoryContext SlabContextCreate(MemoryContext parent,
- const char *name,
- Size blockSize,
- Size chunkSize);
+ const char *name,
+ Size blockSize,
+ Size chunkSize);
/*
* Recommended default alloc parameters, suitable for "ordinary" contexts
diff --git a/src/include/utils/palloc.h b/src/include/utils/palloc.h
index b6d0718fd..541e8e57e 100644
--- a/src/include/utils/palloc.h
+++ b/src/include/utils/palloc.h
@@ -76,7 +76,7 @@ extern MemoryContext CurrentMemoryContext;
extern void *MemoryContextAlloc(MemoryContext context, Size size);
extern void *MemoryContextAllocZero(MemoryContext context, Size size);
extern void *MemoryContextAllocExtended(MemoryContext context,
- Size size, int flags);
+ Size size, int flags);
extern void *palloc(Size size);
extern void *palloc0(Size size);
@@ -108,7 +108,7 @@ MemoryContextSwitchTo(MemoryContext context)
/* Registration of memory context reset/delete callbacks */
extern void MemoryContextRegisterResetCallback(MemoryContext context,
- MemoryContextCallback *cb);
+ MemoryContextCallback *cb);
/*
* These are like standard strdup() except the copied string is
diff --git a/src/include/utils/pool_ip.h b/src/include/utils/pool_ip.h
index beccb2766..caa1a7cc8 100644
--- a/src/include/utils/pool_ip.h
+++ b/src/include/utils/pool_ip.h
@@ -32,24 +32,24 @@
#include "pool_type.h"
-extern int SockAddr_cidr_mask(struct sockaddr_storage *mask,
- char *numbits, int family);
+extern int SockAddr_cidr_mask(struct sockaddr_storage *mask,
+ char *numbits, int family);
typedef void (*PgIfAddrCallback) (struct sockaddr *addr, struct sockaddr *netmask, void *cb_data);
-extern int getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp,
- struct addrinfo **result);
+extern int getaddrinfo_all(const char *hostname, const char *servname,
+ const struct addrinfo *hintp,
+ struct addrinfo **result);
extern void freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai);
-extern int getnameinfo_all(const struct sockaddr_storage *addr, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+extern int getnameinfo_all(const struct sockaddr_storage *addr, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
-extern int rangeSockAddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask);
+extern int rangeSockAddr(const struct sockaddr_storage *addr,
+ const struct sockaddr_storage *netaddr,
+ const struct sockaddr_storage *netmask);
/* imported from PostgreSQL getaddrinfo.c */
diff --git a/src/include/utils/pool_params.h b/src/include/utils/pool_params.h
index c50eca60f..7a95ede28 100644
--- a/src/include/utils/pool_params.h
+++ b/src/include/utils/pool_params.h
@@ -26,14 +26,14 @@ typedef struct
int num; /* number of entries */
char **names; /* parameter names */
char **values; /* values */
-} ParamStatus;
+} ParamStatus;
-extern int pool_init_params(ParamStatus * params);
-extern void pool_discard_params(ParamStatus * params);
-extern char *pool_find_name(ParamStatus * params, char *name, int *pos);
-extern int pool_get_param(ParamStatus * params, int index, char **name, char **value);
-extern int pool_add_param(ParamStatus * params, char *name, char *value);
-extern void pool_param_debug_print(ParamStatus * params);
+extern int pool_init_params(ParamStatus *params);
+extern void pool_discard_params(ParamStatus *params);
+extern char *pool_find_name(ParamStatus *params, char *name, int *pos);
+extern int pool_get_param(ParamStatus *params, int index, char **name, char **value);
+extern int pool_add_param(ParamStatus *params, char *name, char *value);
+extern void pool_param_debug_print(ParamStatus *params);
-#endif /* pool_params_h */
+#endif /* pool_params_h */
diff --git a/src/include/utils/pool_process_reporting.h b/src/include/utils/pool_process_reporting.h
index daed4e62d..087a93c93 100644
--- a/src/include/utils/pool_process_reporting.h
+++ b/src/include/utils/pool_process_reporting.h
@@ -26,30 +26,30 @@
#ifndef POOL_PROCESS_REPORTING_H
#define POOL_PROCESS_REPORTING_H
-extern void send_row_description(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
- short num_fields, char **field_names);
-extern void send_complete_and_ready(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *message, const int num_rows);
-extern POOL_REPORT_CONFIG * get_config(int *nrows);
-extern POOL_REPORT_POOLS * get_pools(int *nrows);
-extern POOL_REPORT_PROCESSES * get_processes(int *nrows);
-extern POOL_REPORT_NODES * get_nodes(int *nrows, int node_id);
-extern POOL_REPORT_VERSION * get_version(void);
+extern void send_row_description(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
+ short num_fields, char **field_names);
+extern void send_complete_and_ready(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *message, const int num_rows);
+extern POOL_REPORT_CONFIG *get_config(int *nrows);
+extern POOL_REPORT_POOLS *get_pools(int *nrows);
+extern POOL_REPORT_PROCESSES *get_processes(int *nrows);
+extern POOL_REPORT_NODES *get_nodes(int *nrows, int node_id);
+extern POOL_REPORT_VERSION *get_version(void);
extern POOL_HEALTH_CHECK_STATS *get_health_check_stats(int *nrows);
extern POOL_BACKEND_STATS *get_backend_stats(int *nrows);
-extern void config_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void pools_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void processes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void nodes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void version_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void cache_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void show_health_check_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
-extern void show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+extern void config_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void pools_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void processes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void nodes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void version_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void cache_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void show_health_check_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
+extern void show_backend_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
-extern void send_config_var_detail_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value, const char *description);
-extern void send_config_var_value_only_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *value);
+extern void send_config_var_detail_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value, const char *description);
+extern void send_config_var_value_only_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *value);
extern char *get_backend_status_string(BACKEND_STATUS status);
-extern int * pool_report_pools_offsets(int *n);
+extern int *pool_report_pools_offsets(int *n);
#endif
diff --git a/src/include/utils/pool_relcache.h b/src/include/utils/pool_relcache.h
index 70b7d3143..16f2fcd61 100644
--- a/src/include/utils/pool_relcache.h
+++ b/src/include/utils/pool_relcache.h
@@ -44,7 +44,7 @@ typedef struct
int refcnt; /* reference count */
int session_id; /* LocalSessionId */
time_t expire; /* cache expiration absolute time in seconds */
-} PoolRelCache;
+} PoolRelCache;
#define MAX_QUERY_LENGTH 1500
typedef struct
@@ -69,17 +69,17 @@ typedef struct
bool no_cache_if_zero; /* if register func returns 0, do not
* cache the data */
PoolRelCache *cache; /* cache data */
-} POOL_RELCACHE;
+} POOL_RELCACHE;
-extern POOL_RELCACHE * pool_create_relcache(int cachesize, char *sql,
- func_ptr register_func, func_ptr unregister_func,
- bool issessionlocal);
-extern void pool_discard_relcache(POOL_RELCACHE * relcache);
-extern void *pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, char *table);
+extern POOL_RELCACHE *pool_create_relcache(int cachesize, char *sql,
+ func_ptr register_func, func_ptr unregister_func,
+ bool issessionlocal);
+extern void pool_discard_relcache(POOL_RELCACHE *relcache);
+extern void *pool_search_relcache(POOL_RELCACHE *relcache, POOL_CONNECTION_POOL *backend, char *table);
extern char *remove_quotes_and_schema_from_relname(char *table);
-extern void *int_register_func(POOL_SELECT_RESULT * res);
+extern void *int_register_func(POOL_SELECT_RESULT *res);
extern void *int_unregister_func(void *data);
-extern void *string_register_func(POOL_SELECT_RESULT * res);
+extern void *string_register_func(POOL_SELECT_RESULT *res);
extern void *string_unregister_func(void *data);
extern bool SplitIdentifierString(char *rawstring, char separator, Node **namelist);
diff --git a/src/include/utils/pool_select_walker.h b/src/include/utils/pool_select_walker.h
index 0ae7ffe7c..9eb9760c8 100644
--- a/src/include/utils/pool_select_walker.h
+++ b/src/include/utils/pool_select_walker.h
@@ -51,8 +51,8 @@ typedef struct
bool row_security; /* true if row security enabled */
int num_oids; /* number of oids */
int table_oids[POOL_MAX_SELECT_OIDS]; /* table oids */
- char table_names[POOL_MAX_SELECT_OIDS][NAMEDATALEN]; /* table names */
-} SelectContext;
+ char table_names[POOL_MAX_SELECT_OIDS][NAMEDATALEN]; /* table names */
+} SelectContext;
extern int pool_get_terminate_backend_pid(Node *node);
extern bool pool_has_function_call(Node *node);
@@ -68,7 +68,7 @@ extern bool pool_has_pgpool_regclass(void);
extern bool pool_has_to_regclass(void);
extern bool raw_expression_tree_walker(Node *node, bool (*walker) (), void *context);
extern int pool_table_name_to_oid(char *table_name);
-extern int pool_extract_table_oids_from_select_stmt(Node *node, SelectContext * ctx);
+extern int pool_extract_table_oids_from_select_stmt(Node *node, SelectContext *ctx);
extern RangeVar *makeRangeVarFromNameList(List *names);
extern char *make_table_name_from_rangevar(RangeVar *rangevar);
extern char *make_function_name_from_funccall(FuncCall *fcall);
diff --git a/src/include/utils/pool_ssl.h b/src/include/utils/pool_ssl.h
index 8ea6398cf..3420e2842 100644
--- a/src/include/utils/pool_ssl.h
+++ b/src/include/utils/pool_ssl.h
@@ -69,13 +69,13 @@ CD1mpF1Bn5x8vYlLIhkmuquiXsNV6TILOwIBAg==\n\
#endif
-extern void pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp);
-extern void pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp);
-extern void pool_ssl_close(POOL_CONNECTION * cp);
-extern int pool_ssl_read(POOL_CONNECTION * cp, void *buf, int size);
-extern int pool_ssl_write(POOL_CONNECTION * cp, const void *buf, int size);
-extern bool pool_ssl_pending(POOL_CONNECTION * cp);
+extern void pool_ssl_negotiate_serverclient(POOL_CONNECTION *cp);
+extern void pool_ssl_negotiate_clientserver(POOL_CONNECTION *cp);
+extern void pool_ssl_close(POOL_CONNECTION *cp);
+extern int pool_ssl_read(POOL_CONNECTION *cp, void *buf, int size);
+extern int pool_ssl_write(POOL_CONNECTION *cp, const void *buf, int size);
+extern bool pool_ssl_pending(POOL_CONNECTION *cp);
extern int SSL_ServerSide_init(void);
-#endif /* pool_ssl_h */
+#endif /* pool_ssl_h */
diff --git a/src/include/utils/pool_stream.h b/src/include/utils/pool_stream.h
index 8ddba327b..6d639af15 100644
--- a/src/include/utils/pool_stream.h
+++ b/src/include/utils/pool_stream.h
@@ -44,30 +44,30 @@
(connection)->len = 0; \
} while (0)
-extern POOL_CONNECTION * pool_open(int fd, bool backend_connection);
-extern void pool_close(POOL_CONNECTION * cp);
-extern int pool_read(POOL_CONNECTION * cp, void *buf, int len);
-extern void pool_read_with_error(POOL_CONNECTION * cp, void *buf, int len,
- const char *err_context);
+extern POOL_CONNECTION *pool_open(int fd, bool backend_connection);
+extern void pool_close(POOL_CONNECTION *cp);
+extern int pool_read(POOL_CONNECTION *cp, void *buf, int len);
+extern void pool_read_with_error(POOL_CONNECTION *cp, void *buf, int len,
+ const char *err_context);
-extern char *pool_read2(POOL_CONNECTION * cp, int len);
-extern int pool_write(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_write_noerror(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_flush(POOL_CONNECTION * cp);
-extern int pool_flush_noerror(POOL_CONNECTION * cp);
-extern int pool_flush_it(POOL_CONNECTION * cp);
-extern void pool_write_and_flush(POOL_CONNECTION * cp, void *buf, int len);
-extern int pool_write_and_flush_noerror(POOL_CONNECTION * cp, void *buf, int len);
-extern char *pool_read_string(POOL_CONNECTION * cp, int *len, int line);
-extern int pool_unread(POOL_CONNECTION * cp, void *data, int len);
-extern int pool_push(POOL_CONNECTION * cp, void *data, int len);
-extern void pool_pop(POOL_CONNECTION * cp, int *len);
-extern int pool_stacklen(POOL_CONNECTION * cp);
+extern char *pool_read2(POOL_CONNECTION *cp, int len);
+extern int pool_write(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_write_noerror(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_flush(POOL_CONNECTION *cp);
+extern int pool_flush_noerror(POOL_CONNECTION *cp);
+extern int pool_flush_it(POOL_CONNECTION *cp);
+extern void pool_write_and_flush(POOL_CONNECTION *cp, void *buf, int len);
+extern int pool_write_and_flush_noerror(POOL_CONNECTION *cp, void *buf, int len);
+extern char *pool_read_string(POOL_CONNECTION *cp, int *len, int line);
+extern int pool_unread(POOL_CONNECTION *cp, void *data, int len);
+extern int pool_push(POOL_CONNECTION *cp, void *data, int len);
+extern void pool_pop(POOL_CONNECTION *cp, int *len);
+extern int pool_stacklen(POOL_CONNECTION *cp);
-extern void pool_set_db_node_id(POOL_CONNECTION * con, int db_node_id);
+extern void pool_set_db_node_id(POOL_CONNECTION *con, int db_node_id);
extern void pool_set_timeout(int timeoutval);
extern int pool_get_timeout(void);
-extern int pool_check_fd(POOL_CONNECTION * cp);
+extern int pool_check_fd(POOL_CONNECTION *cp);
#endif /* POOL_STREAM_H */
diff --git a/src/include/utils/ps_status.h b/src/include/utils/ps_status.h
index a8fb3f1a5..12f9f22d5 100644
--- a/src/include/utils/ps_status.h
+++ b/src/include/utils/ps_status.h
@@ -24,14 +24,15 @@
#include "pool.h"
#include <netdb.h>
-extern char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for set_ps_display */
+extern char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for
+ * set_ps_display */
extern char **save_ps_display_args(int argc, char **argv);
extern void init_ps_display(const char *username, const char *dbname,
- const char *host_info, const char *initial_str);
+ const char *host_info, const char *initial_str);
extern void set_ps_display(const char *activity, bool force);
extern const char *get_ps_display(int *displen);
-extern void pool_ps_idle_display(POOL_CONNECTION_POOL * backend);
+extern void pool_ps_idle_display(POOL_CONNECTION_POOL *backend);
-#endif /* ps_status_h */
+#endif /* ps_status_h */
diff --git a/src/include/utils/regex_array.h b/src/include/utils/regex_array.h
index 2317a56d7..1ad9d7283 100644
--- a/src/include/utils/regex_array.h
+++ b/src/include/utils/regex_array.h
@@ -34,12 +34,12 @@ typedef struct
int size; /* regex array size */
int pos; /* next regex array index position */
regex_t **regex; /* regular expression array */
-} RegArray;
+} RegArray;
RegArray *create_regex_array(void);
-int add_regex_array(RegArray * ar, char *pattern);
-int regex_array_match(RegArray * ar, char *pattern);
-void destroy_regex_array(RegArray * ar);
+int add_regex_array(RegArray *ar, char *pattern);
+int regex_array_match(RegArray *ar, char *pattern);
+void destroy_regex_array(RegArray *ar);
/*
* String left-right token type
@@ -49,16 +49,16 @@ typedef struct
char *left_token;
char *right_token;
double weight_token;
-} Left_right_token;
+} Left_right_token;
typedef struct
{
int pos;
int size;
Left_right_token *token;
-} Left_right_tokens;
+} Left_right_tokens;
Left_right_tokens *create_lrtoken_array(void);
-void extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens * lrtokens);
+void extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens *lrtokens);
#endif
diff --git a/src/include/utils/sha2.h b/src/include/utils/sha2.h
index 85104b3b6..487b26dbc 100644
--- a/src/include/utils/sha2.h
+++ b/src/include/utils/sha2.h
@@ -95,22 +95,22 @@ typedef struct pg_sha512_ctx pg_sha384_ctx;
/* Interface routines for SHA224/256/384/512 */
extern void pg_sha224_init(pg_sha224_ctx *ctx);
extern void pg_sha224_update(pg_sha224_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha224_final(pg_sha224_ctx *ctx, uint8 *dest);
extern void pg_sha256_init(pg_sha256_ctx *ctx);
extern void pg_sha256_update(pg_sha256_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha256_final(pg_sha256_ctx *ctx, uint8 *dest);
extern void pg_sha384_init(pg_sha384_ctx *ctx);
extern void pg_sha384_update(pg_sha384_ctx *ctx,
- const uint8 *, size_t len);
+ const uint8 *, size_t len);
extern void pg_sha384_final(pg_sha384_ctx *ctx, uint8 *dest);
extern void pg_sha512_init(pg_sha512_ctx *ctx);
extern void pg_sha512_update(pg_sha512_ctx *ctx, const uint8 *input0,
- size_t len);
+ size_t len);
extern void pg_sha512_final(pg_sha512_ctx *ctx, uint8 *dest);
#endif /* _PG_SHA2_H_ */
diff --git a/src/include/utils/ssl_utils.h b/src/include/utils/ssl_utils.h
index e6aabcac7..39effa1f9 100644
--- a/src/include/utils/ssl_utils.h
+++ b/src/include/utils/ssl_utils.h
@@ -26,9 +26,9 @@
#define SSL_UTILS_H
extern void calculate_hmac_sha256(const char *data, int len, char *buf);
-extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
- const char *password, unsigned char *plaintext);
-extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
- const char *password, unsigned char *ciphertext);
+extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
+ const char *password, unsigned char *plaintext);
+extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
+ const char *password, unsigned char *ciphertext);
#endif
diff --git a/src/include/utils/statistics.h b/src/include/utils/statistics.h
index 8691b9b25..6e12a79eb 100644
--- a/src/include/utils/statistics.h
+++ b/src/include/utils/statistics.h
@@ -21,19 +21,19 @@
#ifndef statistics_h
#define statistics_h
-extern size_t stat_shared_memory_size(void);
-extern void stat_set_stat_area(void *address);
-extern void stat_init_stat_area(void);
-extern void stat_count_up(int backend_node_id, Node *parsetree);
-extern void error_stat_count_up(int backend_node_id, char *str);
-extern uint64 stat_get_select_count(int backend_node_id);
-extern uint64 stat_get_insert_count(int backend_node_id);
-extern uint64 stat_get_update_count(int backend_node_id);
-extern uint64 stat_get_delete_count(int backend_node_id);
-extern uint64 stat_get_ddl_count(int backend_node_id);
-extern uint64 stat_get_other_count(int backend_node_id);
-extern uint64 stat_get_panic_count(int backend_node_id);
-extern uint64 stat_get_fatal_count(int backend_node_id);
-extern uint64 stat_get_error_count(int backend_node_id);
+extern size_t stat_shared_memory_size(void);
+extern void stat_set_stat_area(void *address);
+extern void stat_init_stat_area(void);
+extern void stat_count_up(int backend_node_id, Node *parsetree);
+extern void error_stat_count_up(int backend_node_id, char *str);
+extern uint64 stat_get_select_count(int backend_node_id);
+extern uint64 stat_get_insert_count(int backend_node_id);
+extern uint64 stat_get_update_count(int backend_node_id);
+extern uint64 stat_get_delete_count(int backend_node_id);
+extern uint64 stat_get_ddl_count(int backend_node_id);
+extern uint64 stat_get_other_count(int backend_node_id);
+extern uint64 stat_get_panic_count(int backend_node_id);
+extern uint64 stat_get_fatal_count(int backend_node_id);
+extern uint64 stat_get_error_count(int backend_node_id);
-#endif /* statistics_h */
+#endif /* statistics_h */
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 88123b672..d4be6e31d 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -63,7 +63,7 @@ typedef struct
* years */
int32 day; /* days, after time for alignment */
int32 month; /* months and years, after time for alignment */
-} Interval;
+} Interval;
/* Limits on the "precision" option (typmod) for these data types */
diff --git a/src/include/version.h b/src/include/version.h
index 1c8796c3b..0af58a638 100644
--- a/src/include/version.h
+++ b/src/include/version.h
@@ -1,2 +1 @@
#define PGPOOLVERSION "tasukiboshi"
-
diff --git a/src/include/watchdog/watchdog.h b/src/include/watchdog/watchdog.h
index 7a1a2e9f3..8803283f5 100644
--- a/src/include/watchdog/watchdog.h
+++ b/src/include/watchdog/watchdog.h
@@ -83,7 +83,7 @@ typedef enum
WD_SHUTDOWN,
WD_ADD_MESSAGE_SENT,
WD_NETWORK_ISOLATION
-} WD_STATES;
+} WD_STATES;
typedef enum
{
@@ -93,7 +93,7 @@ typedef enum
WD_SOCK_CONNECTED,
WD_SOCK_ERROR,
WD_SOCK_CLOSED
-} WD_SOCK_STATE;
+} WD_SOCK_STATE;
typedef enum
{
@@ -120,13 +120,14 @@ typedef enum
WD_EVENT_WD_STATE_REQUIRE_RELOAD,
WD_EVENT_I_AM_APPEARING_LOST,
WD_EVENT_I_AM_APPEARING_FOUND
-} WD_EVENTS;
+} WD_EVENTS;
/*
* If you add a new lost reason. Remember to add entry in
* wd_node_lost_reasons (watchdog.c)
*/
-typedef enum {
+typedef enum
+{
NODE_LOST_UNKNOWN_REASON = 0,
NODE_LOST_BY_LIFECHECK,
NODE_LOST_BY_SEND_FAILURE,
@@ -141,12 +142,13 @@ typedef enum {
* wd_cluster_membership_status (watchdog.c)
*/
-typedef enum {
+typedef enum
+{
WD_NODE_MEMBERSHIP_ACTIVE,
WD_NODE_REVOKED_SHUTDOWN,
WD_NODE_REVOKED_NO_SHOW,
WD_NODE_REVOKED_LOST
-}WD_NODE_MEMBERSHIP_STATUS;
+} WD_NODE_MEMBERSHIP_STATUS;
typedef struct SocketConnection
{
@@ -154,7 +156,7 @@ typedef struct SocketConnection
struct timeval tv; /* connect time of socket */
char addr[48]; /* ip address of socket connection */
WD_SOCK_STATE sock_state; /* current state of socket */
-} SocketConnection;
+} SocketConnection;
typedef struct WatchdogNode
{
@@ -166,28 +168,24 @@ typedef struct WatchdogNode
* from the node */
struct timeval last_sent_time; /* timestamp when last packet was sent on
* the node */
- struct timeval lost_time; /* timestamp when the remote node was lost on coordinator
- */
- WD_NODE_MEMBERSHIP_STATUS membership_status; /* status of node membership
- *in watchdog cluster
- Only valid for remote nodes */
- bool has_lost_us; /*
- * True when this remote node thinks
- * we are lost
- */
- int sending_failures_count; /* number of times we have failed
- * to send message to the node.
- * Gets reset after successful sent
- */
- int missed_beacon_count; /* number of times the node has
- * failed to reply for beacon.
- * message
- */
+ struct timeval lost_time; /* timestamp when the remote node was lost on
+ * coordinator */
+ WD_NODE_MEMBERSHIP_STATUS membership_status; /* status of node
+ * membership in watchdog
+ * cluster Only valid for
+ * remote nodes */
+ bool has_lost_us; /* True when this remote node thinks we are
+ * lost */
+ int sending_failures_count; /* number of times we have failed to
+ * send message to the node. Gets
+ * reset after successful sent */
+ int missed_beacon_count; /* number of times the node has failed
+ * to reply for beacon. message */
WD_NODE_LOST_REASONS node_lost_reason;
- char pgp_version[MAX_VERSION_STR_LEN]; /* Pgpool-II version */
- int wd_data_major_version; /* watchdog messaging version major*/
- int wd_data_minor_version; /* watchdog messaging version minor*/
+ char pgp_version[MAX_VERSION_STR_LEN]; /* Pgpool-II version */
+ int wd_data_major_version; /* watchdog messaging version major */
+ int wd_data_minor_version; /* watchdog messaging version minor */
char nodeName[WD_MAX_NODE_NAMELEN]; /* name of this node */
char hostname[WD_MAX_HOST_NAMELEN]; /* host name */
@@ -195,7 +193,8 @@ typedef struct WatchdogNode
int pgpool_port; /* pgpool port */
int wd_priority; /* watchdog priority */
char delegate_ip[WD_MAX_HOST_NAMELEN]; /* delegate IP */
- int pgpool_node_id; /* pgpool node id specified in pgpool_node_id file */
+ int pgpool_node_id; /* pgpool node id specified in pgpool_node_id
+ * file */
int standby_nodes_count; /* number of standby nodes joined the
* cluster only applicable when this
* WatchdogNode is the
@@ -207,7 +206,7 @@ typedef struct WatchdogNode
* initiated by remote */
SocketConnection client_socket; /* socket connections for this node
* initiated by local */
-} WatchdogNode;
+} WatchdogNode;
/*
* Argument for WD Exec cluster command
@@ -217,9 +216,9 @@ typedef struct WatchdogNode
typedef struct WDExecCommandArg
{
- char arg_name[WD_MAX_ARG_NAME_LEN];
- char arg_value[WD_MAX_ARG_VALUE_LEN];
-} WDExecCommandArg;
+ char arg_name[WD_MAX_ARG_NAME_LEN];
+ char arg_value[WD_MAX_ARG_VALUE_LEN];
+} WDExecCommandArg;
extern pid_t initialize_watchdog(void);
diff --git a/src/include/watchdog/wd_commands.h b/src/include/watchdog/wd_commands.h
index 34c5b9ac6..61326137d 100644
--- a/src/include/watchdog/wd_commands.h
+++ b/src/include/watchdog/wd_commands.h
@@ -42,7 +42,7 @@ typedef struct WDNodeInfo
int wd_priority; /* node priority */
char delegate_ip[WD_MAX_HOST_NAMELEN]; /* delegate IP */
int id;
-} WDNodeInfo;
+} WDNodeInfo;
typedef struct WDGenericData
{
@@ -54,15 +54,15 @@ typedef struct WDGenericData
bool boolVal;
long longVal;
} data;
-} WDGenericData;
+} WDGenericData;
-extern WDGenericData * get_wd_runtime_variable_value(char *wd_authkey, char *varName);
+extern WDGenericData *get_wd_runtime_variable_value(char *wd_authkey, char *varName);
extern WD_STATES get_watchdog_local_node_state(char *wd_authkey);
extern int get_watchdog_quorum_state(char *wd_authkey);
extern char *wd_get_watchdog_nodes_json(char *wd_authkey, int nodeID);
extern void set_wd_command_timeout(int sec);
-extern char* get_request_json(char *key, char *value, char *authKey);
-extern WDNodeInfo *parse_watchdog_node_info_from_wd_node_json(json_value * source);
+extern char *get_request_json(char *key, char *value, char *authKey);
+extern WDNodeInfo *parse_watchdog_node_info_from_wd_node_json(json_value *source);
#endif /* WD_COMMANDS_H */
diff --git a/src/include/watchdog/wd_internal_commands.h b/src/include/watchdog/wd_internal_commands.h
index 414446372..9bde69115 100644
--- a/src/include/watchdog/wd_internal_commands.h
+++ b/src/include/watchdog/wd_internal_commands.h
@@ -42,7 +42,7 @@ typedef enum WD_LOCK_STANDBY_TYPE
WD_INVALID_LOCK,
/* currently we have only one lock */
WD_FOLLOW_PRIMARY_LOCK
-}WD_LOCK_STANDBY_TYPE;
+} WD_LOCK_STANDBY_TYPE;
extern WdCommandResult wd_start_recovery(void);
@@ -51,9 +51,9 @@ extern WDFailoverCMDResults wd_send_failback_request(int node_id, unsigned char
extern WDFailoverCMDResults wd_degenerate_backend_set(int *node_id_set, int count, unsigned char flags);
extern WDFailoverCMDResults wd_promote_backend(int node_id, unsigned char flags);
-extern WdCommandResult wd_execute_cluster_command(char* clusterCommand,List *argsList);
+extern WdCommandResult wd_execute_cluster_command(char *clusterCommand, List *argsList);
-extern WDPGBackendStatus * get_pg_backend_status_from_leader_wd_node(void);
+extern WDPGBackendStatus *get_pg_backend_status_from_leader_wd_node(void);
extern WD_STATES wd_internal_get_watchdog_local_node_state(void);
extern int wd_internal_get_watchdog_quorum_state(void);
diff --git a/src/include/watchdog/wd_ipc_conn.h b/src/include/watchdog/wd_ipc_conn.h
index e60986774..abc50ddee 100644
--- a/src/include/watchdog/wd_ipc_conn.h
+++ b/src/include/watchdog/wd_ipc_conn.h
@@ -35,7 +35,7 @@ typedef enum WdCommandResult
COMMAND_OK,
COMMAND_FAILED,
COMMAND_TIMEOUT
-} WdCommandResult;
+} WdCommandResult;
typedef struct WDIPCCmdResult
@@ -43,7 +43,7 @@ typedef struct WDIPCCmdResult
char type;
int length;
char *data;
-} WDIPCCmdResult;
+} WDIPCCmdResult;
extern void wd_ipc_conn_initialize(void);
@@ -51,8 +51,8 @@ extern void wd_set_ipc_address(char *socket_dir, int port);
extern size_t estimate_ipc_socket_addr_len(void);
extern char *get_watchdog_ipc_address(void);
-extern WDIPCCmdResult * issue_command_to_watchdog(char type, int timeout_sec, char *data, int data_len, bool blocking);
+extern WDIPCCmdResult *issue_command_to_watchdog(char type, int timeout_sec, char *data, int data_len, bool blocking);
-extern void FreeCmdResult(WDIPCCmdResult * res);
+extern void FreeCmdResult(WDIPCCmdResult *res);
#endif /* WD_IPC_CONN_H */
diff --git a/src/include/watchdog/wd_ipc_defines.h b/src/include/watchdog/wd_ipc_defines.h
index 654f48715..7546bfa7e 100644
--- a/src/include/watchdog/wd_ipc_defines.h
+++ b/src/include/watchdog/wd_ipc_defines.h
@@ -42,7 +42,7 @@ typedef enum WDFailoverCMDResults
FAILOVER_RES_BUILDING_CONSENSUS,
FAILOVER_RES_CONSENSUS_MAY_FAIL,
FAILOVER_RES_TIMEOUT
-} WDFailoverCMDResults;
+} WDFailoverCMDResults;
typedef enum WDValueDataType
{
@@ -50,7 +50,7 @@ typedef enum WDValueDataType
VALUE_DATA_TYPE_STRING,
VALUE_DATA_TYPE_BOOL,
VALUE_DATA_TYPE_LONG
-} WDValueDataType;
+} WDValueDataType;
/* IPC MESSAGES TYPES */
#define WD_REGISTER_FOR_NOTIFICATION '0'
diff --git a/src/include/watchdog/wd_json_data.h b/src/include/watchdog/wd_json_data.h
index 7b53999ee..1016dcae8 100644
--- a/src/include/watchdog/wd_json_data.h
+++ b/src/include/watchdog/wd_json_data.h
@@ -40,30 +40,30 @@ typedef struct WDPGBackendStatus
BACKEND_STATUS backend_status[MAX_NUM_BACKENDS];
char nodeName[WD_MAX_HOST_NAMELEN]; /* name of the watchdog node
* that sent the data */
-} WDPGBackendStatus;
+} WDPGBackendStatus;
-extern WatchdogNode * get_watchdog_node_from_json(char *json_data, int data_len, char **authkey);
-extern char *get_watchdog_node_info_json(WatchdogNode * wdNode, char *authkey);
-extern POOL_CONFIG * get_pool_config_from_json(char *json_data, int data_len);
+extern WatchdogNode *get_watchdog_node_from_json(char *json_data, int data_len, char **authkey);
+extern char *get_watchdog_node_info_json(WatchdogNode *wdNode, char *authkey);
+extern POOL_CONFIG *get_pool_config_from_json(char *json_data, int data_len);
extern char *get_pool_config_json(void);
extern char *get_lifecheck_node_status_change_json(int nodeID, int nodeStatus, char *message, char *authKey);
extern bool parse_node_status_json(char *json_data, int data_len, int *nodeID, int *nodeStatus, char **message);
extern bool parse_beacon_message_json(char *json_data, int data_len, int *state,
- long *seconds_since_node_startup,
- long *seconds_since_current_state,
- int *quorumStatus,
- int *standbyNodesCount,
- bool *escalated);
-extern char *get_beacon_message_json(WatchdogNode * wdNode);
+ long *seconds_since_node_startup,
+ long *seconds_since_current_state,
+ int *quorumStatus,
+ int *standbyNodesCount,
+ bool *escalated);
+extern char *get_beacon_message_json(WatchdogNode *wdNode);
extern char *get_wd_node_function_json(char *func_name, int *node_id_set, int count, unsigned char flags, unsigned int sharedKey, char *authKey);
extern bool parse_wd_node_function_json(char *json_data, int data_len, char **func_name, int **node_id_set, int *count, unsigned char *flags);
extern char *get_wd_simple_message_json(char *message);
-extern WDPGBackendStatus * get_pg_backend_node_status_from_json(char *json_data, int data_len);
-extern char *get_backend_node_status_json(WatchdogNode * wdNode);
+extern WDPGBackendStatus *get_pg_backend_node_status_from_json(char *json_data, int data_len);
+extern char *get_backend_node_status_json(WatchdogNode *wdNode);
extern char *get_simple_request_json(char *key, char *value, unsigned int sharedKey, char *authKey);
@@ -71,11 +71,10 @@ extern bool parse_data_request_json(char *json_data, int data_len, char **reques
extern char *get_data_request_json(char *request_type, unsigned int sharedKey, char *authKey);
extern bool
-parse_wd_exec_cluster_command_json(char *json_data, int data_len,
- char **clusterCommand, List **args_list);
+ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
+ char **clusterCommand, List **args_list);
-extern char *
-get_wd_exec_cluster_command_json(char *clusterCommand,List *args_list,
- unsigned int sharedKey, char *authKey);
+extern char *get_wd_exec_cluster_command_json(char *clusterCommand, List *args_list,
+ unsigned int sharedKey, char *authKey);
#endif
diff --git a/src/include/watchdog/wd_lifecheck.h b/src/include/watchdog/wd_lifecheck.h
index 8594027bf..9460dc346 100644
--- a/src/include/watchdog/wd_lifecheck.h
+++ b/src/include/watchdog/wd_lifecheck.h
@@ -34,12 +34,12 @@ typedef enum NodeState
NODE_EMPTY,
NODE_DEAD,
NODE_ALIVE
-} NodeStates;
+} NodeStates;
typedef struct LifeCheckNode
{
NodeStates nodeState;
- int ID;
+ int ID;
WD_STATES wdState;
char stateName[128];
char hostName[WD_MAX_HOST_NAMELEN];
@@ -49,15 +49,15 @@ typedef struct LifeCheckNode
int retry_lives;
struct timeval hb_send_time; /* send time */
struct timeval hb_last_recv_time; /* recv time */
-} LifeCheckNode;
+} LifeCheckNode;
typedef struct lifeCheckCluster
{
int nodeCount;
struct LifeCheckNode *lifeCheckNodes;
-} LifeCheckCluster;
+} LifeCheckCluster;
-extern LifeCheckCluster * gslifeCheckCluster; /* lives in shared memory */
+extern LifeCheckCluster *gslifeCheckCluster; /* lives in shared memory */
/* wd_lifecheck.c */
@@ -65,8 +65,8 @@ extern pid_t initialize_watchdog_lifecheck(void);
/* wd_heartbeat.c */
-extern pid_t wd_hb_receiver(int fork_wait_time, WdHbIf * hb_if);
-extern pid_t wd_hb_sender(int fork_wait_time, WdHbIf * hb_if);
+extern pid_t wd_hb_receiver(int fork_wait_time, WdHbIf *hb_if);
+extern pid_t wd_hb_sender(int fork_wait_time, WdHbIf *hb_if);
#endif
diff --git a/src/include/watchdog/wd_utils.h b/src/include/watchdog/wd_utils.h
index 757e4a5a8..0c8ad17ca 100644
--- a/src/include/watchdog/wd_utils.h
+++ b/src/include/watchdog/wd_utils.h
@@ -39,10 +39,10 @@ extern void wd_check_network_command_configurations(void);
extern int watchdog_thread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
extern char *string_replace(const char *string, const char *pattern, const char *replacement);
extern void wd_calc_hash(const char *str, int len, char *buf);
-extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
- const char *password, unsigned char *plaintext);
-extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
- const char *password, unsigned char *ciphertext);
+extern int aes_decrypt_with_password(unsigned char *ciphertext, int ciphertext_len,
+ const char *password, unsigned char *plaintext);
+extern int aes_encrypt_with_password(unsigned char *plaintext, int plaintext_len,
+ const char *password, unsigned char *ciphertext);
/* wd_escalation.c */
extern pid_t fork_escalation_process(void);
diff --git a/src/libs/pcp/pcp.c b/src/libs/pcp/pcp.c
index 7e30f1375..3992eeeb7 100644
--- a/src/libs/pcp/pcp.c
+++ b/src/libs/pcp/pcp.c
@@ -66,7 +66,7 @@ static int setNextResultBinaryData(PCPResultInfo * res, void *value, int datalen
static void setResultIntData(PCPResultInfo * res, unsigned int slotno, int value);
static void process_node_info_response(PCPConnInfo * pcpConn, char *buf, int len);
-static void process_health_check_stats_response(PCPConnInfo * pcpConn, char *buf, int len);
+static void process_health_check_stats_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_command_complete_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_watchdog_info_response(PCPConnInfo * pcpConn, char *buf, int len);
static void process_process_info_response(PCPConnInfo * pcpConn, char *buf, int len);
@@ -160,8 +160,8 @@ pcp_connect(char *hostname, int port, char *username, char *password, FILE *Pfde
struct addrinfo hints;
/*
- * getaddrinfo() requires a string because it also accepts service names,
- * such as "http".
+ * getaddrinfo() requires a string because it also accepts service
+ * names, such as "http".
*/
if (asprintf(&portstr, "%d", port) == -1)
{
@@ -217,7 +217,7 @@ pcp_connect(char *hostname, int port, char *username, char *password, FILE *Pfde
pcpConn->connState = PCP_CONNECTION_BAD;
return pcpConn;
}
- break; /* successfully connected */
+ break; /* successfully connected */
}
/* no address available */
@@ -261,7 +261,8 @@ pcp_connect(char *hostname, int port, char *username, char *password, FILE *Pfde
password = password_from_file;
/*
- * If reading password from .pcppass file fails, try to read it from prompt.
+ * If reading password from .pcppass file fails, try to read it from
+ * prompt.
*/
if (password == NULL || *password == '\0')
password = simple_prompt("Password: ", 100, false);
@@ -674,7 +675,7 @@ pcp_terminate_pgpool(PCPConnInfo * pcpConn, char mode, char command_scope)
pcp_internal_error(pcpConn, "invalid PCP connection");
return NULL;
}
- if (command_scope == 'l') /*local only*/
+ if (command_scope == 'l') /* local only */
pcp_write(pcpConn->pcpConn, "T", 1);
else
pcp_write(pcpConn->pcpConn, "t", 1);
@@ -748,7 +749,7 @@ pcp_node_count(PCPConnInfo * pcpConn)
static void
process_node_info_response(PCPConnInfo * pcpConn, char *buf, int len)
{
- char *index;
+ char *index;
BackendInfo *backend_info = NULL;
if (strcmp(buf, "ArraySize") == 0)
@@ -952,17 +953,18 @@ pcp_health_check_stats(PCPConnInfo * pcpConn, int nid)
}
PCPResultInfo *
-pcp_reload_config(PCPConnInfo * pcpConn,char command_scope)
+pcp_reload_config(PCPConnInfo * pcpConn, char command_scope)
{
- int wsize;
+ int wsize;
+
/*
* pcp packet format for pcp_reload_config
* z[size][command_scope]
*/
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "Z", 1);
@@ -970,25 +972,26 @@ pcp_reload_config(PCPConnInfo * pcpConn,char command_scope)
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
pcp_write(pcpConn->pcpConn, &command_scope, sizeof(char));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'Z');
}
PCPResultInfo *
-pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope)
+pcp_log_rotate(PCPConnInfo * pcpConn, char command_scope)
{
- int wsize;
+ int wsize;
+
/*
* pcp packet format for pcp_log_rotate
* v[size][command_scope]
*/
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "V", 1);
@@ -996,9 +999,9 @@ pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope)
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
pcp_write(pcpConn->pcpConn, &command_scope, sizeof(char));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"Z\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'V');
}
@@ -1006,21 +1009,21 @@ pcp_log_rotate(PCPConnInfo * pcpConn,char command_scope)
PCPResultInfo *
pcp_invalidate_query_cache(PCPConnInfo * pcpConn)
{
- int wsize;
+ int wsize;
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
- pcp_internal_error(pcpConn, "invalid PCP connection");
- return NULL;
+ pcp_internal_error(pcpConn, "invalid PCP connection");
+ return NULL;
}
pcp_write(pcpConn->pcpConn, "G", 1);
wsize = htonl(sizeof(int));
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
if (PCPFlush(pcpConn) < 0)
- return NULL;
+ return NULL;
if (pcpConn->Pfdebug)
- fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"G\", len=%d\n", ntohl(wsize));
+ fprintf(pcpConn->Pfdebug, "DEBUG: send: tos=\"G\", len=%d\n", ntohl(wsize));
return process_pcp_response(pcpConn, 'G');
}
@@ -1032,16 +1035,16 @@ pcp_invalidate_query_cache(PCPConnInfo * pcpConn)
* len: length of the data
*/
static void
-process_health_check_stats_response
-(PCPConnInfo * pcpConn, char *buf, int len)
+ process_health_check_stats_response
+ (PCPConnInfo * pcpConn, char *buf, int len)
{
POOL_HEALTH_CHECK_STATS *stats;
- int *offsets;
- int n;
- int i;
- char *p;
- int maxstr;
- char c[] = "CommandComplete";
+ int *offsets;
+ int n;
+ int i;
+ char *p;
+ int maxstr;
+ char c[] = "CommandComplete";
if (strcmp(buf, c) != 0)
{
@@ -1054,19 +1057,19 @@ process_health_check_stats_response
/* Allocate health stats memory */
stats = palloc0(sizeof(POOL_HEALTH_CHECK_STATS));
- p = (char *)stats;
+ p = (char *) stats;
/* Calculate total packet length */
offsets = pool_health_check_stats_offsets(&n);
for (i = 0; i < n; i++)
{
- if (i == n -1)
+ if (i == n - 1)
maxstr = sizeof(POOL_HEALTH_CHECK_STATS) - offsets[i];
else
maxstr = offsets[i + 1] - offsets[i];
- StrNCpy(p + offsets[i], buf, maxstr -1);
+ StrNCpy(p + offsets[i], buf, maxstr - 1);
buf += strlen(buf) + 1;
}
@@ -1173,11 +1176,12 @@ static void
process_process_info_response(PCPConnInfo * pcpConn, char *buf, int len)
{
char *index;
- int *offsets;
- int i, n;
+ int *offsets;
+ int i,
+ n;
int maxstr;
- char *p;
- POOL_REPORT_POOLS *pools = NULL;
+ char *p;
+ POOL_REPORT_POOLS *pools = NULL;
offsets = pool_report_pools_offsets(&n);
@@ -1202,17 +1206,17 @@ process_process_info_response(PCPConnInfo * pcpConn, char *buf, int len)
goto INVALID_RESPONSE;
pools = palloc0(sizeof(POOL_REPORT_POOLS));
- p = (char *)pools;
+ p = (char *) pools;
buf += strlen(buf) + 1;
for (i = 0; i < n; i++)
{
- if (i == n -1)
+ if (i == n - 1)
maxstr = sizeof(POOL_REPORT_POOLS) - offsets[i];
else
maxstr = offsets[i + 1] - offsets[i];
- StrNCpy(p + offsets[i], buf, maxstr -1);
+ StrNCpy(p + offsets[i], buf, maxstr - 1);
buf += strlen(buf) + 1;
}
@@ -1523,7 +1527,8 @@ _pcp_promote_node(PCPConnInfo * pcpConn, int nid, bool gracefully, bool switchov
int wsize;
char node_id[16];
char *sendchar;
- char *switchover_option; /* n: just change node status, s: switchover primary */
+ char *switchover_option; /* n: just change node status, s:
+ * switchover primary */
if (PCPConnectionStatus(pcpConn) != PCP_CONNECTION_OK)
{
@@ -1546,10 +1551,10 @@ _pcp_promote_node(PCPConnInfo * pcpConn, int nid, bool gracefully, bool switchov
pcp_write(pcpConn->pcpConn, sendchar, 1);
/* calculate send buffer size */
- wsize = sizeof(char); /* protocol. 'j' or 'J' */
+ wsize = sizeof(char); /* protocol. 'j' or 'J' */
wsize += strlen(node_id); /* node id + space */
- wsize += sizeof(char); /* promote option */
- wsize += sizeof(int); /* buffer length */
+ wsize += sizeof(char); /* promote option */
+ wsize += sizeof(int); /* buffer length */
wsize = htonl(wsize);
pcp_write(pcpConn->pcpConn, &wsize, sizeof(int));
diff --git a/src/main/health_check.c b/src/main/health_check.c
index 9111d3bf9..f32d54ce0 100644
--- a/src/main/health_check.c
+++ b/src/main/health_check.c
@@ -68,9 +68,10 @@
#include "auth/md5.h"
#include "auth/pool_hba.h"
-volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats area in shared memory */
+volatile POOL_HEALTH_CHECK_STATISTICS *health_check_stats; /* health check stats
+ * area in shared memory */
-static POOL_CONNECTION_POOL_SLOT * slot;
+static POOL_CONNECTION_POOL_SLOT *slot;
static volatile sig_atomic_t reload_config_request = 0;
static volatile sig_atomic_t restart_request = 0;
volatile POOL_HEALTH_CHECK_STATISTICS *stats;
@@ -113,11 +114,12 @@ do_health_check_child(int *node_id)
sigjmp_buf local_sigjmp_buf;
MemoryContext HealthCheckMemoryContext;
char psbuffer[NI_MAXHOST];
- static struct timeval start_time;
- static struct timeval end_time;
- long diff_t;
+ static struct timeval start_time;
+ static struct timeval end_time;
+ long diff_t;
+
+ POOL_HEALTH_CHECK_STATISTICS mystat;
- POOL_HEALTH_CHECK_STATISTICS mystat;
stats = &health_check_stats[*node_id];
/* Set application name */
@@ -180,13 +182,14 @@ do_health_check_child(int *node_id)
{
MemoryContextSwitchTo(HealthCheckMemoryContext);
MemoryContextResetAndDeleteChildren(HealthCheckMemoryContext);
+
/*
- * Since HealthCheckMemoryContext is used for "slot", we need to clear it
- * so that new slot is allocated later on.
+ * Since HealthCheckMemoryContext is used for "slot", we need to clear
+ * it so that new slot is allocated later on.
*/
slot = NULL;
- bool skipped = false;
+ bool skipped = false;
CHECK_REQUEST;
@@ -250,8 +253,9 @@ do_health_check_child(int *node_id)
stats->success_count++;
stats->last_successful_health_check = time(NULL);
- /* The node has become reachable again. Reset
- * the quarantine state
+ /*
+ * The node has become reachable again. Reset the quarantine
+ * state
*/
send_failback_request(*node_id, false, REQ_DETAIL_UPDATE | REQ_DETAIL_WATCHDOG);
}
@@ -273,9 +277,9 @@ do_health_check_child(int *node_id)
discard_persistent_connection(*node_id);
/*
- Update health check duration only if health check was not skipped
- since the duration could be very small (probably 0) if health
- check is skipped.
+ * Update health check duration only if health check was not
+ * skipped since the duration could be very small (probably 0) if
+ * health check is skipped.
*/
if (!skipped)
@@ -296,7 +300,7 @@ do_health_check_child(int *node_id)
stats->min_health_check_duration = diff_t;
}
- memcpy(&mystat, (void *)stats, sizeof(mystat));
+ memcpy(&mystat, (void *) stats, sizeof(mystat));
sleep(pool_config->health_check_params[*node_id].health_check_period);
}
@@ -313,18 +317,19 @@ establish_persistent_connection(int node)
{
BackendInfo *bkinfo;
int retry_cnt;
- static time_t auto_failback_interval = 0; /* resume time of auto_failback */
+ static time_t auto_failback_interval = 0; /* resume time of
+ * auto_failback */
bool check_failback = false;
time_t now;
- char *dbname;
+ char *dbname;
bkinfo = pool_get_node_info(node);
/*
- * If the node is already in down status or unused, do nothing.
- * except when the node state is down because of quarantine operation
- * since we want to detect when the node comes back to life again to
- * remove it from the quarantine state
+ * If the node is already in down status or unused, do nothing. except
+ * when the node state is down because of quarantine operation since we
+ * want to detect when the node comes back to life again to remove it from
+ * the quarantine state
*/
if (bkinfo->backend_status == CON_UNUSED ||
(bkinfo->backend_status == CON_DOWN && bkinfo->quarantine == false))
@@ -335,9 +340,9 @@ establish_persistent_connection(int node)
if (pool_config->auto_failback && auto_failback_interval < now &&
STREAM && !strcmp(bkinfo->replication_state, "streaming") && !Req_info->switching)
{
- ereport(DEBUG1,
- (errmsg("health check DB node: %d (status:%d) for auto_failback", node, bkinfo->backend_status)));
- check_failback = true;
+ ereport(DEBUG1,
+ (errmsg("health check DB node: %d (status:%d) for auto_failback", node, bkinfo->backend_status)));
+ check_failback = true;
}
else
return false;
@@ -432,7 +437,7 @@ establish_persistent_connection(int node)
if (retry_cnt != pool_config->health_check_params[node].health_check_max_retries)
{
- int ret_cnt;
+ int ret_cnt;
retry_cnt++;
ret_cnt = pool_config->health_check_params[node].health_check_max_retries - retry_cnt;
@@ -446,13 +451,13 @@ establish_persistent_connection(int node)
if (check_failback && !Req_info->switching && slot)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("request auto failback, node id:%d", node)));
- /* get current time to use auto_failback_interval */
- now = time(NULL);
- auto_failback_interval = now + pool_config->auto_failback_interval;
+ /* get current time to use auto_failback_interval */
+ now = time(NULL);
+ auto_failback_interval = now + pool_config->auto_failback_interval;
- send_failback_request(node, true, REQ_DETAIL_CONFIRMED);
+ send_failback_request(node, true, REQ_DETAIL_CONFIRMED);
}
}
@@ -553,9 +558,9 @@ static RETSIGTYPE health_check_timer_handler(int sig)
size_t
health_check_stats_shared_memory_size(void)
{
- size_t size;
+ size_t size;
- size = MAXALIGN(sizeof(POOL_HEALTH_CHECK_STATISTICS) * MAX_NUM_BACKENDS);
+ size = MAXALIGN(sizeof(POOL_HEALTH_CHECK_STATISTICS) * MAX_NUM_BACKENDS);
elog(DEBUG1, "health_check_stats_shared_memory_size: requested size: %lu", size);
return size;
}
@@ -566,12 +571,12 @@ health_check_stats_shared_memory_size(void)
void
health_check_stats_init(POOL_HEALTH_CHECK_STATISTICS *addr)
{
- int i;
+ int i;
health_check_stats = addr;
memset((void *) health_check_stats, 0, health_check_stats_shared_memory_size());
- for (i = 0 ;i < MAX_NUM_BACKENDS; i++)
+ for (i = 0; i < MAX_NUM_BACKENDS; i++)
{
health_check_stats[i].min_health_check_duration = INT_MAX;
}
diff --git a/src/main/main.c b/src/main/main.c
index 5d586829f..52a2b81ea 100644
--- a/src/main/main.c
+++ b/src/main/main.c
@@ -355,7 +355,7 @@ main(int argc, char **argv)
}
else
strlcpy(pool_passwd, pool_config->pool_passwd,
- sizeof(pool_passwd));
+ sizeof(pool_passwd));
pool_init_pool_passwd(pool_passwd, POOL_PASSWD_R);
}
@@ -534,7 +534,7 @@ stop_me(void)
for (;;)
{
- int cnt = 5; /* sending signal retry interval */
+ int cnt = 5; /* sending signal retry interval */
if (kill(pid, stop_sig) == -1)
{
@@ -550,8 +550,11 @@ stop_me(void)
fprintf(stderr, ".");
sleep(1);
cnt--;
- /* If pgpool did not stop within 5 seconds, break the loop and try
- * to send the signal again */
+
+ /*
+ * If pgpool did not stop within 5 seconds, break the loop and try
+ * to send the signal again
+ */
if (cnt <= 0)
break;
}
diff --git a/src/main/pgpool_logger.c b/src/main/pgpool_logger.c
index 206086732..9d127183a 100644
--- a/src/main/pgpool_logger.c
+++ b/src/main/pgpool_logger.c
@@ -51,6 +51,7 @@
#define DEVNULL "/dev/null"
typedef int64 pg_time_t;
+
/*
* We read() into a temp buffer twice as big as a chunk, so that any fragment
* left after processing can be moved down to the front and we'll still have
@@ -68,7 +69,7 @@ typedef int64 pg_time_t;
*/
-bool redirection_done = false;
+bool redirection_done = false;
/*
* Private state
@@ -262,8 +263,8 @@ SysLoggerMain(int argc, char *argv[])
/*
* Check if the log directory or filename pattern changed in
- * pgpool.conf. If so, force rotation to make sure we're
- * writing the logfiles in the right place.
+ * pgpool.conf. If so, force rotation to make sure we're writing
+ * the logfiles in the right place.
*/
if (strcmp(pool_config->log_directory, currentLogDir) != 0)
{
@@ -360,9 +361,9 @@ SysLoggerMain(int argc, char *argv[])
* next_rotation_time.
*
* Also note that we need to beware of overflow in calculation of the
- * timeout: with large settings of pool_config->log_rotation_age, next_rotation_time
- * could be more than INT_MAX msec in the future. In that case we'll
- * wait no more than INT_MAX msec, and try again.
+ * timeout: with large settings of pool_config->log_rotation_age,
+ * next_rotation_time could be more than INT_MAX msec in the future.
+ * In that case we'll wait no more than INT_MAX msec, and try again.
*/
timeout.tv_sec = 0;
/* Reset usec everytime before calling sellect */
@@ -384,10 +385,10 @@ SysLoggerMain(int argc, char *argv[])
/*
* Sleep until there's something to do
*/
-
+
FD_ZERO(&rfds);
FD_SET(syslogPipe[0], &rfds);
- rc = select(syslogPipe[0] + 1, &rfds, NULL, NULL, timeout.tv_sec?&timeout:NULL);
+ rc = select(syslogPipe[0] + 1, &rfds, NULL, NULL, timeout.tv_sec ? &timeout : NULL);
if (rc == 1)
{
int bytesRead;
@@ -487,8 +488,8 @@ SysLogger_Start(void)
/*
* The initial logfile is created right in the postmaster, to verify that
- * the pool_config->log_directory is writable. We save the reference time so that the
- * syslogger child process can recompute this file name.
+ * the pool_config->log_directory is writable. We save the reference time
+ * so that the syslogger child process can recompute this file name.
*
* It might look a bit strange to re-do this during a syslogger restart,
* but we must do so since the postmaster closed syslogFile after the
@@ -883,8 +884,8 @@ logfile_open(const char *filename, const char *mode, bool allow_errors)
mode_t oumask;
/*
- * Note we do not let pool_config->log_file_mode disable IWUSR, since we certainly want
- * to be able to write the files ourselves.
+ * Note we do not let pool_config->log_file_mode disable IWUSR, since we
+ * certainly want to be able to write the files ourselves.
*/
oumask = umask((mode_t) ((~(pool_config->log_file_mode | S_IWUSR)) & (S_IRWXU | S_IRWXG | S_IRWXO)));
fh = fopen(filename, mode);
@@ -940,9 +941,9 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
/*
* Decide whether to overwrite or append. We can overwrite if (a)
- * pool_config->log_truncate_on_rotation is set, (b) the rotation was triggered by
- * elapsed time and not something else, and (c) the computed file name is
- * different from what we were previously logging into.
+ * pool_config->log_truncate_on_rotation is set, (b) the rotation was
+ * triggered by elapsed time and not something else, and (c) the computed
+ * file name is different from what we were previously logging into.
*
* Note: last_file_name should never be NULL here, but if it is, append.
*/
@@ -960,8 +961,8 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
/*
* ENFILE/EMFILE are not too surprising on a busy system; just
* keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with pool_config->log_directory and stop
- * trying to create files.
+ * Otherwise, assume something's wrong with
+ * pool_config->log_directory and stop trying to create files.
*/
if (errno != ENFILE && errno != EMFILE)
{
@@ -1010,8 +1011,8 @@ logfile_rotate(bool time_based_rotation, int size_rotation_for)
/*
* ENFILE/EMFILE are not too surprising on a busy system; just
* keep using the old file till we manage to get a new one.
- * Otherwise, assume something's wrong with pool_config->log_directory and stop
- * trying to create files.
+ * Otherwise, assume something's wrong with
+ * pool_config->log_directory and stop trying to create files.
*/
if (errno != ENFILE && errno != EMFILE)
{
@@ -1079,7 +1080,7 @@ logfile_getname(pg_time_t timestamp, const char *suffix)
/* treat pool_config->log_filename as a strftime pattern */
strftime(filename + len, MAXPGPATH - len, pool_config->log_filename,
- localtime(&timestamp));
+ localtime(&timestamp));
if (suffix != NULL)
{
@@ -1099,7 +1100,7 @@ static void
set_next_rotation_time(void)
{
pg_time_t now;
- struct tm *tm;
+ struct tm *tm;
int rotinterval;
/* nothing to do if time-based rotation is disabled */
diff --git a/src/main/pgpool_main.c b/src/main/pgpool_main.c
index e7ffdd8bf..ef2e5dfc1 100644
--- a/src/main/pgpool_main.c
+++ b/src/main/pgpool_main.c
@@ -75,16 +75,16 @@ typedef enum
* required */
SIG_WATCHDOG_QUORUM_CHANGED, /* notify main about cluster quorum change
* of watchdog cluster */
- SIG_INFORM_QUARANTINE_NODES, /* notify main about send degenerate requests
- * for all quarantine nodes */
+ SIG_INFORM_QUARANTINE_NODES, /* notify main about send degenerate
+ * requests for all quarantine nodes */
MAX_INTERRUPTS /* Must be last! */
-} User1SignalReason;
+} User1SignalReason;
typedef struct User1SignalSlot
{
sig_atomic_t signalFlags[MAX_INTERRUPTS];
-} User1SignalSlot;
+} User1SignalSlot;
#ifdef NOT_USED
/*
@@ -128,11 +128,14 @@ typedef struct User1SignalSlot
typedef struct
{
bool all_backend_down; /* true if all backends are down */
- bool search_primary; /* true if we need to seach primary node */
- bool need_to_restart_children; /* true if we need to restart child process */
- bool need_to_restart_pcp; /* true if we need to restart pc process */
+ bool search_primary; /* true if we need to seach primary node */
+ bool need_to_restart_children; /* true if we need to restart
+ * child process */
+ bool need_to_restart_pcp; /* true if we need to restart pc
+ * process */
bool partial_restart; /* true if partial restart is needed */
- bool sync_required; /* true if watchdog synchronization is necessary */
+ bool sync_required; /* true if watchdog synchronization is
+ * necessary */
POOL_REQUEST_KIND reqkind;
int node_id_set[MAX_NUM_BACKENDS];
@@ -142,7 +145,7 @@ typedef struct
/*
* An array to hold down nodes information. Each array member corresponds
* to node id. If nodes[i] is 1, the node i is down.
- */
+ */
int nodes[MAX_NUM_BACKENDS];
} FAILOVER_CONTEXT;
@@ -173,8 +176,8 @@ static RETSIGTYPE reload_config_handler(int sig);
static RETSIGTYPE wakeup_handler(int sig);
static void initialize_shared_mem_objects(bool clear_memcache_oidmaps);
-static int trigger_failover_command(int node, const char *command_line,
- int old_main_node, int new_main_node, int old_primary);
+static int trigger_failover_command(int node, const char *command_line,
+ int old_main_node, int new_main_node, int old_primary);
static int find_primary_node(void);
static int find_primary_node_repeatedly(void);
static void terminate_all_childrens(int sig);
@@ -182,18 +185,18 @@ static void system_will_go_down(int code, Datum arg);
static char *process_name_from_pid(pid_t pid);
static void sync_backend_from_watchdog(void);
static void update_backend_quarantine_status(void);
-static int get_server_version(POOL_CONNECTION_POOL_SLOT * *slots, int node_id);
+static int get_server_version(POOL_CONNECTION_POOL_SLOT **slots, int node_id);
static void get_info_from_conninfo(char *conninfo, char *host, int hostlen, char *port, int portlen);
/*
* Subroutines of failover()
*/
-static int handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id);
-static int handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id);
+static int handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id);
+static int handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id);
static void kill_failover_children(FAILOVER_CONTEXT *failover_context, int node_id);
static void exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id, int promote_node_id);
-static int determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id);
-static int exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id);
+static int determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id);
+static int exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id);
static void save_node_info(FAILOVER_CONTEXT *failover_context, int new_primary_node_id, int new_main_node_id);
static void exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id);
static void exec_notice_pcp_child(FAILOVER_CONTEXT *failover_context);
@@ -201,14 +204,14 @@ static void exec_notice_pcp_child(FAILOVER_CONTEXT *failover_context);
static void check_requests(void);
static void print_signal_member(sigset_t *sig);
static void service_child_processes(void);
-static int select_victim_processes(int *process_info_idxs, int count);
+static int select_victim_processes(int *process_info_idxs, int count);
static struct sockaddr_un *un_addrs; /* unix domain socket path */
static struct sockaddr_un *pcp_un_addrs; /* unix domain socket path for PCP */
ProcessInfo *process_info = NULL; /* Per child info table on shmem */
-volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
- * shmem */
-int current_child_process_count;
+volatile User1SignalSlot *user1SignalSlot = NULL; /* User 1 signal slot on
+ * shmem */
+int current_child_process_count;
/*
* To track health check process ids
@@ -227,18 +230,19 @@ BACKEND_STATUS private_backend_status[MAX_NUM_BACKENDS];
*/
ConnectionInfo *con_info;
-static int *fds = NULL; /* listening file descriptors (UNIX socket,
+static int *fds = NULL; /* listening file descriptors (UNIX socket,
* inet domain sockets) */
-static int *pcp_fds = NULL; /* listening file descriptors for pcp (UNIX socket,
- * inet domain sockets) */
+static int *pcp_fds = NULL; /* listening file descriptors for pcp (UNIX
+ * socket, inet domain sockets) */
extern char *pcp_conf_file; /* path for pcp.conf */
extern char *conf_file;
extern char *hba_file;
-static volatile sig_atomic_t exiting = 0; /* non 0 if I'm exiting */
-static volatile sig_atomic_t switching = 0; /* non 0 if I'm failing over or degenerating */
+static volatile sig_atomic_t exiting = 0; /* non 0 if I'm exiting */
+static volatile sig_atomic_t switching = 0; /* non 0 if I'm failing over or
+ * degenerating */
POOL_REQUEST_INFO *Req_info; /* request info area in shared memory */
volatile sig_atomic_t *InRecovery; /* non 0 if recovery is started */
@@ -269,7 +273,7 @@ int my_main_node_id; /* Main node id buffer */
* Dummy variable to suppress compiler warnings by discarding return values
* from write(2) in signal handlers
*/
-static int dummy_status;
+static int dummy_status;
/*
* Snapshot Isolation manage area
@@ -286,10 +290,10 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
int num_inet_fds = 0;
int num_unix_fds = 0;
int num_pcp_fds = 0;
- int *unix_fds;
- int *inet_fds;
- int *pcp_unix_fds;
- int *pcp_inet_fds;
+ int *unix_fds;
+ int *inet_fds;
+ int *pcp_unix_fds;
+ int *pcp_inet_fds;
int i;
char unix_domain_socket_path[UNIXSOCK_PATH_BUFLEN + 1024];
@@ -318,15 +322,15 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
if (strlen(unix_domain_socket_path) >= UNIXSOCK_PATH_BUFLEN)
{
ereport(WARNING,
- (errmsg("Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
- unix_domain_socket_path,
- (int) (UNIXSOCK_PATH_BUFLEN - 1))));
+ (errmsg("Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
+ unix_domain_socket_path,
+ (int) (UNIXSOCK_PATH_BUFLEN - 1))));
continue;
}
un_addrs = realloc(un_addrs, sizeof(struct sockaddr_un) * (num_unix_fds + 1));
if (un_addrs == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
snprintf(un_addrs[i].sun_path, sizeof(un_addrs[i].sun_path), "%s", unix_domain_socket_path);
num_unix_fds++;
@@ -335,7 +339,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
if (num_unix_fds == 0)
{
ereport(FATAL,
- (errmsg("could not create any Unix-domain sockets")));
+ (errmsg("could not create any Unix-domain sockets")));
}
/* set unix domain socket path for pgpool PCP communication */
@@ -343,21 +347,21 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
{
memset(unix_domain_socket_path, 0, sizeof(unix_domain_socket_path));
snprintf(unix_domain_socket_path, sizeof(unix_domain_socket_path), "%s/.s.PGSQL.%d",
- pool_config->pcp_socket_dir[i],
- pool_config->pcp_port);
+ pool_config->pcp_socket_dir[i],
+ pool_config->pcp_port);
if (strlen(unix_domain_socket_path) >= UNIXSOCK_PATH_BUFLEN)
{
ereport(WARNING,
- (errmsg("PCP Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
- unix_domain_socket_path,
- (int) (UNIXSOCK_PATH_BUFLEN - 1))));
+ (errmsg("PCP Unix-domain socket path \"%s\" is too long (maximum %d bytes)",
+ unix_domain_socket_path,
+ (int) (UNIXSOCK_PATH_BUFLEN - 1))));
continue;
}
pcp_un_addrs = realloc(pcp_un_addrs, sizeof(struct sockaddr_un) * (num_pcp_fds + 1));
if (pcp_un_addrs == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
snprintf(pcp_un_addrs[i].sun_path, sizeof(pcp_un_addrs[i].sun_path), "%s", unix_domain_socket_path);
num_pcp_fds++;
@@ -366,7 +370,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
if (num_pcp_fds == 0)
{
ereport(FATAL,
- (errmsg("could not create any PCP Unix-domain sockets")));
+ (errmsg("could not create any PCP Unix-domain sockets")));
}
/* set up signal handlers */
@@ -374,10 +378,11 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
/* start the log collector if enabled */
pgpool_logger_pid = SysLogger_Start();
- /*
- * If using syslogger, close the read side of the pipe. We don't bother
- * tracking this in fd.c, either.
- */
+
+ /*
+ * If using syslogger, close the read side of the pipe. We don't bother
+ * tracking this in fd.c, either.
+ */
if (syslogPipe[0] >= 0)
close(syslogPipe[0]);
syslogPipe[0] = -1;
@@ -405,18 +410,18 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
wakeup_request = 0;
/*
- * Watchdog process fires SIGUSR2 once in stable state
- * In addition, when watchdog fails to start with FATAL, the process
- * exits and SIGCHLD is fired, so we can also expect SIGCHLD from
- * watchdog process. Finally, we also need to look for the SIGUSR1
- * signal for the failover requests from other watchdog nodes. In
- * case a request arrives at the same time when the watchdog has just
- * been initialized.
+ * Watchdog process fires SIGUSR2 once in stable state In addition,
+ * when watchdog fails to start with FATAL, the process exits and
+ * SIGCHLD is fired, so we can also expect SIGCHLD from watchdog
+ * process. Finally, we also need to look for the SIGUSR1 signal for
+ * the failover requests from other watchdog nodes. In case a request
+ * arrives at the same time when the watchdog has just been
+ * initialized.
*
- * So we need to wait until watchdog is in stable state so only
- * wait for SIGUSR1, SIGCHLD, and signals those are necessary to make
- * sure we respond to user requests of shutdown if it arrives while we
- * are in waiting state.
+ * So we need to wait until watchdog is in stable state so only wait
+ * for SIGUSR1, SIGCHLD, and signals those are necessary to make sure
+ * we respond to user requests of shutdown if it arrives while we are
+ * in waiting state.
*
* Note that SIGUSR1 does not need to be in the wait signal list,
* although it's signal handler is already installed, but even if the
@@ -450,7 +455,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
ereport(LOG,
(errmsg("watchdog process is initialized"),
- errdetail("watchdog messaging data version: %s",WD_MESSAGE_DATA_VERSION)));
+ errdetail("watchdog messaging data version: %s", WD_MESSAGE_DATA_VERSION)));
/*
* initialize the lifecheck process
@@ -459,7 +464,8 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
if (sigusr1_request)
{
- do {
+ do
+ {
sigusr1_request = 0;
sigusr1_interrupt_processor();
} while (sigusr1_request == 1);
@@ -470,7 +476,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
fds = malloc(sizeof(int) * (num_unix_fds + 1));
if (fds == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
unix_fds = create_unix_domain_sockets_by_list(un_addrs,
pool_config->unix_socket_group,
@@ -497,7 +503,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
fds = realloc(fds, sizeof(int) * (num_inet_fds + num_unix_fds + 1));
if (fds == NULL)
ereport(FATAL,
- (errmsg("failed to expand memory for fds")));
+ (errmsg("failed to expand memory for fds")));
memcpy(&fds[num_unix_fds], inet_fds, sizeof(int) * num_inet_fds);
fds[num_unix_fds + num_inet_fds] = -1;
@@ -507,8 +513,8 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
/* For query cache concurrency control */
if (pool_config->memory_cache_enabled)
{
- char path[1024];
- int lfd;
+ char path[1024];
+ int lfd;
snprintf(path, sizeof(path), "%s/QUERY_CACHE_LOCK_FILE", pool_config->logdir);
lfd = open(path, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR);
@@ -542,7 +548,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
else
current_child_process_count = pool_config->num_init_children;
ereport(DEBUG1,
- (errmsg("Spawning %d child processes",current_child_process_count)));
+ (errmsg("Spawning %d child processes", current_child_process_count)));
for (i = 0; i < current_child_process_count; i++)
{
process_info[i].start_time = time(NULL);
@@ -601,7 +607,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
pcp_fds = realloc(pcp_fds, sizeof(int) * (num_inet_fds + num_pcp_fds + 1));
if (pcp_fds == NULL)
ereport(FATAL,
- (errmsg("failed to expand memory for pcp_fds")));
+ (errmsg("failed to expand memory for pcp_fds")));
memcpy(&pcp_fds[num_pcp_fds], pcp_inet_fds, sizeof(int) * num_inet_fds);
pcp_fds[num_inet_fds + num_pcp_fds] = -1;
@@ -645,6 +651,7 @@ PgpoolMain(bool discard_status, bool clear_memcache_oidmaps)
#ifdef NOT_USED
CHECK_REQUEST;
#endif
+
/*
* check for child signals to ensure child startup before reporting
* successful start.
@@ -1108,11 +1115,11 @@ create_unix_domain_socket(struct sockaddr_un un_addr_tmp, const char *group, con
if (*group != '\0')
{
- char *endptr;
- gid_t gid;
+ char *endptr;
+ gid_t gid;
unsigned long val;
- /* check group*/
+ /* check group */
val = strtoul(group, &endptr, 10);
if (*endptr == '\0')
{
@@ -1121,8 +1128,9 @@ create_unix_domain_socket(struct sockaddr_un un_addr_tmp, const char *group, con
else
{
struct group *gr;
+
gr = getgrnam(group);
- if(!gr)
+ if (!gr)
{
ereport(FATAL,
(errmsg("unix_socket_group \"%s\" does not exist", group)));
@@ -1167,6 +1175,7 @@ terminate_all_childrens(int sig)
int i;
int killed_count = 0;
int terminated_count = 0;
+
/*
* This is supposed to be called from main process
*/
@@ -1228,7 +1237,7 @@ terminate_all_childrens(int sig)
}
}
- for (i = 0 ; i < MAX_NUM_BACKENDS; i++)
+ for (i = 0; i < MAX_NUM_BACKENDS; i++)
{
if (health_check_pids[i] != 0)
{
@@ -1335,7 +1344,7 @@ static RETSIGTYPE exit_handler(int sig)
*/
if (follow_pid > 0)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("terminating all child processes of follow child")));
kill(follow_pid, sig);
switch (sig)
@@ -1373,8 +1382,8 @@ get_next_main_node(void)
{
/*
* Do not use VALID_BACKEND macro in raw mode. VALID_BACKEND return
- * true only if the argument is main node id. In other words,
- * standby nodes are false. So need to check backend status with
+ * true only if the argument is main node id. In other words, standby
+ * nodes are false. So need to check backend status with
* VALID_BACKEND_RAW.
*/
if (RAW_MODE)
@@ -1412,7 +1421,7 @@ static RETSIGTYPE sigusr1_handler(int sig)
if (write(pipe_fds[1], "\0", 1) < 0)
ereport(WARNING,
(errmsg("SIGUSR1 handler: write to pipe failed"),
- errdetail("%m")));
+ errdetail("%m")));
#endif
POOL_SETMASK(&UnBlockSig);
@@ -1478,11 +1487,11 @@ sigusr1_interrupt_processor(void)
(errmsg("we have joined the watchdog cluster as STANDBY node"),
errdetail("syncing the backend states from the LEADER watchdog node")));
sync_backend_from_watchdog();
+
/*
* we also want to release the follow_primary lock if it was held
- * by the remote node.
- * because the change of watchdog coordinator would lead to forever stuck
- * in the the locked state
+ * by the remote node. because the change of watchdog coordinator
+ * would lead to forever stuck in the the locked state
*/
pool_release_follow_primary_lock(true);
}
@@ -1635,9 +1644,10 @@ failover(void)
(errmsg("failover handler"),
errdetail("starting to select new main node")));
- /* If this is promoting specified node, new_main_node
- * should be replaced by the requested node. The requested
- * node should be REAL_PRIMARY_NODE_ID.
+ /*
+ * If this is promoting specified node, new_main_node should be
+ * replaced by the requested node. The requested node should be
+ * REAL_PRIMARY_NODE_ID.
*/
if (failover_context.request_details & REQ_DETAIL_PROMOTE)
{
@@ -1675,7 +1685,7 @@ failover(void)
else /* NODE_DOWN_REQUEST &&
* NODE_QUARANTINE_REQUEST */
{
-
+
if (handle_failover_request(&failover_context, node_id) < 0)
continue;
}
@@ -1709,14 +1719,15 @@ failover(void)
exec_failover_command(&failover_context, new_main_node, promote_node);
/*
- * Determine new primary node id. Possibly call find_primary_node_repeatedly().
+ * Determine new primary node id. Possibly call
+ * find_primary_node_repeatedly().
*/
new_primary = determine_new_primary_node(&failover_context, node_id);
-
+
/*
- * If follow_primary_command is provided and in streaming
- * replication mode, we start degenerating all backends as they are
- * not replicated anymore.
+ * If follow_primary_command is provided and in streaming replication
+ * mode, we start degenerating all backends as they are not replicated
+ * anymore.
*/
i = exec_follow_primary_command(&failover_context, node_id, new_primary);
@@ -1725,8 +1736,8 @@ failover(void)
new_main_node = i;
/*
- * Now new primary node and new main node are established.
- * Save them into shared memory. Also update status changed time.
+ * Now new primary node and new main node are established. Save them
+ * into shared memory. Also update status changed time.
*/
save_node_info(&failover_context, new_primary, new_main_node);
@@ -1735,8 +1746,7 @@ failover(void)
}
/*
- * We are almost done.
- * Unlock flags.
+ * We are almost done. Unlock flags.
*/
pool_semaphore_lock(REQUEST_INFO_SEM);
switching = 0;
@@ -1768,7 +1778,7 @@ static RETSIGTYPE reap_handler(int sig)
if (pipe_fds[1] && write(pipe_fds[1], "\0", 1) < 0)
ereport(WARNING,
(errmsg("reap_handler: write to pipe failed"),
- errdetail("%m")));
+ errdetail("%m")));
#endif
POOL_SETMASK(&UnBlockSig);
@@ -2092,7 +2102,7 @@ pool_get_process_list(int *array_size)
int cnt = 0;
int i;
- for (i=0;i < pool_config->num_init_children;i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
{
if (process_info[i].pid != 0)
cnt++;
@@ -2307,8 +2317,8 @@ trigger_failover_command(int node, const char *command_line,
int old_main_node, int new_main_node, int old_primary)
{
int r = 0;
- StringInfoData exec_cmd_data;
- StringInfo exec_cmd = &exec_cmd_data;
+ StringInfoData exec_cmd_data;
+ StringInfo exec_cmd = &exec_cmd_data;
BackendInfo *info;
BackendInfo *newmain;
BackendInfo *oldprimary;
@@ -2445,7 +2455,7 @@ trigger_failover_command(int node, const char *command_line,
static POOL_NODE_STATUS pool_node_status[MAX_NUM_BACKENDS];
POOL_NODE_STATUS *
-verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots)
+verify_backend_node_status(POOL_CONNECTION_POOL_SLOT **slots)
{
POOL_SELECT_RESULT *res;
int num_primaries = 0;
@@ -2661,13 +2671,14 @@ verify_backend_node_status(POOL_CONNECTION_POOL_SLOT * *slots)
/* verify host and port */
if (((*backend_info->backend_hostname == '/' && *host == '\0') ||
- /*
- * It is possible that backend_hostname is Unix
- * domain socket but wal_receiver connects via
- * TCP/IP localhost.
- */
+
+ /*
+ * It is possible that backend_hostname is Unix domain
+ * socket but wal_receiver connects via TCP/IP
+ * localhost.
+ */
(*backend_info->backend_hostname == '/' && !strcmp("localhost", host)) ||
- !strcmp(backend_info->backend_hostname, host)) &&
+ !strcmp(backend_info->backend_hostname, host)) &&
backend_info->backend_port == atoi(port))
{
/* the standby connects to the primary */
@@ -2909,8 +2920,8 @@ find_primary_node_repeatedly(void)
{
ereport(LOG,
(errmsg("failed to find primary node"),
- errdetail("find_primary_node_repeatedly: expired after %d seconds",
- pool_config->search_primary_node_timeout)));
+ errdetail("find_primary_node_repeatedly: expired after %d seconds",
+ pool_config->search_primary_node_timeout)));
break;
}
}
@@ -2955,13 +2966,12 @@ fork_follow_child(int old_main_node, int new_primary, int old_primary)
#endif
SetProcessGlobalVariables(PT_FOLLOWCHILD);
+
/*
- * when the watchdog is enabled, we would come here
- * only on the coordinator node.
- * so before acquiring the local lock, Lock all the
- * standby nodes so that they should stop false primary
- * detection until we are finished with the follow primary
- * command.
+ * when the watchdog is enabled, we would come here only on the
+ * coordinator node. so before acquiring the local lock, Lock all the
+ * standby nodes so that they should stop false primary detection
+ * until we are finished with the follow primary command.
*/
wd_lock_standby(WD_FOLLOW_PRIMARY_LOCK);
pool_acquire_follow_primary_lock(true, false);
@@ -2983,7 +2993,7 @@ fork_follow_child(int old_main_node, int new_primary, int old_primary)
}
Req_info->follow_primary_ongoing = false;
pool_release_follow_primary_lock(false);
- /* inform standby watchdog nodes to release the lock as well*/
+ /* inform standby watchdog nodes to release the lock as well */
wd_unlock_standby(WD_FOLLOW_PRIMARY_LOCK);
exit(0);
}
@@ -3000,26 +3010,26 @@ fork_follow_child(int old_main_node, int new_primary, int old_primary)
static void
initialize_shared_mem_objects(bool clear_memcache_oidmaps)
{
- BackendDesc* backend_desc;
- Size size;
- int i;
+ BackendDesc *backend_desc;
+ Size size;
+ int i;
/*
* Calculate the size of required shared memory and try to allocate
* everything in single memory segment
*/
- size = 256;/* let us have some extra space */
+ size = 256; /* let us have some extra space */
size += MAXALIGN(sizeof(BackendDesc));
elog(DEBUG1, "BackendDesc: %zu bytes requested for shared memory", MAXALIGN(sizeof(BackendDesc)));
size += MAXALIGN(pool_coninfo_size());
size += MAXALIGN(pool_config->num_init_children * (sizeof(ProcessInfo)));
elog(DEBUG1, "ProcessInfo: num_init_children (%d) * sizeof(ProcessInfo) (%zu) = %zu bytes requested for shared memory",
- pool_config->num_init_children, sizeof(ProcessInfo), pool_config->num_init_children* sizeof(ProcessInfo));
+ pool_config->num_init_children, sizeof(ProcessInfo), pool_config->num_init_children * sizeof(ProcessInfo));
size += MAXALIGN(sizeof(User1SignalSlot));
elog(DEBUG1, "UserSignalSlot: %zu bytes requested for shared memory", MAXALIGN(sizeof(User1SignalSlot)));
size += MAXALIGN(sizeof(POOL_REQUEST_INFO));
elog(DEBUG1, "POOL_REQUEST_INFO: %zu bytes requested for shared memory", MAXALIGN(sizeof(POOL_REQUEST_INFO)));
- size += MAXALIGN(sizeof(int)); /* for InRecovery */
+ size += MAXALIGN(sizeof(int)); /* for InRecovery */
size += MAXALIGN(stat_shared_memory_size());
elog(DEBUG1, "stat_shared_memory_size: %zu bytes requested for shared memory", MAXALIGN(stat_shared_memory_size()));
size += MAXALIGN(health_check_stats_shared_memory_size());
@@ -3051,25 +3061,25 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
/* Move the backend descriptors to shared memory */
backend_desc = pool_shared_memory_segment_get_chunk(sizeof(BackendDesc));
- memcpy(backend_desc, pool_config->backend_desc,sizeof(BackendDesc));
+ memcpy(backend_desc, pool_config->backend_desc, sizeof(BackendDesc));
pfree(pool_config->backend_desc);
pool_config->backend_desc = backend_desc;
- /* get the shared memory from main segment*/
- con_info = (ConnectionInfo *)pool_shared_memory_segment_get_chunk(pool_coninfo_size());
+ /* get the shared memory from main segment */
+ con_info = (ConnectionInfo *) pool_shared_memory_segment_get_chunk(pool_coninfo_size());
- process_info = (ProcessInfo *)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * (sizeof(ProcessInfo)));
+ process_info = (ProcessInfo *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * (sizeof(ProcessInfo)));
for (i = 0; i < pool_config->num_init_children; i++)
{
process_info[i].connection_info = pool_coninfo(i, 0, 0);
process_info[i].pid = 0;
}
- user1SignalSlot = (User1SignalSlot *)pool_shared_memory_segment_get_chunk(sizeof(User1SignalSlot));
+ user1SignalSlot = (User1SignalSlot *) pool_shared_memory_segment_get_chunk(sizeof(User1SignalSlot));
- Req_info = (POOL_REQUEST_INFO *)pool_shared_memory_segment_get_chunk(sizeof(POOL_REQUEST_INFO));
+ Req_info = (POOL_REQUEST_INFO *) pool_shared_memory_segment_get_chunk(sizeof(POOL_REQUEST_INFO));
- InRecovery = (int *)pool_shared_memory_segment_get_chunk(sizeof(int));
+ InRecovery = (int *) pool_shared_memory_segment_get_chunk(sizeof(int));
/* Initialize statistics area */
stat_set_stat_area(pool_shared_memory_segment_get_chunk(stat_shared_memory_size()));
@@ -3079,13 +3089,13 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
health_check_stats_init(pool_shared_memory_segment_get_chunk(health_check_stats_shared_memory_size()));
/* Initialize Snapshot Isolation manage area */
- si_manage_info = (SI_ManageInfo*)pool_shared_memory_segment_get_chunk(sizeof(SI_ManageInfo));
+ si_manage_info = (SI_ManageInfo *) pool_shared_memory_segment_get_chunk(sizeof(SI_ManageInfo));
si_manage_info->snapshot_waiting_children =
- (pid_t*)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
+ (pid_t *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
si_manage_info->commit_waiting_children =
- (pid_t*)pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
+ (pid_t *) pool_shared_memory_segment_get_chunk(pool_config->num_init_children * sizeof(pid_t));
/*
* Initialize backend status area. From now on, VALID_BACKEND macro can be
@@ -3164,6 +3174,7 @@ initialize_shared_mem_objects(bool clear_memcache_oidmaps)
}
}
+
/*
* Read the status file
*/
@@ -3503,7 +3514,7 @@ system_will_go_down(int code, Datum arg)
*/
if (follow_pid > 0)
{
- ereport(LOG,
+ ereport(LOG,
(errmsg("terminating all child processes of follow child")));
kill(follow_pid, SIGTERM);
kill(-follow_pid, SIGTERM);
@@ -3662,8 +3673,8 @@ sync_backend_from_watchdog(void)
/*
* Update primary node id info on the shared memory area if it's different
- * from the one on leader watchdog node. This should be done only in streaming
- * or logical replication mode.
+ * from the one on leader watchdog node. This should be done only in
+ * streaming or logical replication mode.
*/
if (SL_MODE && Req_info->primary_node_id != backendStatus->primary_node_id)
{
@@ -3672,6 +3683,7 @@ sync_backend_from_watchdog(void)
ereport(LOG,
(errmsg("primary node:%d on leader watchdog node \"%s\" is different from local primary node:%d",
backendStatus->primary_node_id, backendStatus->nodeName, Req_info->primary_node_id)));
+
/*
* leader node returns primary_node_id = -1 when the primary node is
* in quarantine state on the leader. So we will not update our
@@ -3687,9 +3699,9 @@ sync_backend_from_watchdog(void)
backendStatus->primary_node_id == -1 && BACKEND_INFO(Req_info->primary_node_id).backend_status != CON_DOWN)
{
ereport(LOG,
- (errmsg("primary node:%d on leader watchdog node \"%s\" seems to be quarantined",
- Req_info->primary_node_id, backendStatus->nodeName),
- errdetail("keeping the current primary")));
+ (errmsg("primary node:%d on leader watchdog node \"%s\" seems to be quarantined",
+ Req_info->primary_node_id, backendStatus->nodeName),
+ errdetail("keeping the current primary")));
}
else
{
@@ -3871,7 +3883,7 @@ sync_backend_from_watchdog(void)
* version number is in the static memory area.
*/
static int
-get_server_version(POOL_CONNECTION_POOL_SLOT * *slots, int node_id)
+get_server_version(POOL_CONNECTION_POOL_SLOT **slots, int node_id)
{
static int server_versions[MAX_NUM_BACKENDS];
@@ -3955,7 +3967,7 @@ bool
pool_acquire_follow_primary_lock(bool block, bool remote_request)
{
pool_sigset_t oldmask;
- volatile int follow_primary_count;
+ volatile int follow_primary_count;
for (;;)
{
@@ -3974,7 +3986,8 @@ pool_acquire_follow_primary_lock(bool block, bool remote_request)
{
if (Req_info->follow_primary_lock_held_remotely)
{
- /* The lock was already held by remote node and we only
+ /*
+ * The lock was already held by remote node and we only
* support one remote lock
*/
ereport(LOG,
@@ -4047,25 +4060,26 @@ pool_release_follow_primary_lock(bool remote_request)
else if (Req_info->follow_primary_count)
{
/*
- * we have received the release lock request from remote
- * but the lock is not held by remote node.
- * Just ignore the request
+ * we have received the release lock request from remote but the
+ * lock is not held by remote node. Just ignore the request
*/
ereport(DEBUG1,
(errmsg("pool_release_follow_primary_lock is not relasing the lock since it was not held by remote node")));
}
+
/*
- * Silently ignore, if we received the release request from remote while no lock was held.
- * Also clear the pending lock request, As we only support single remote lock
+ * Silently ignore, if we received the release request from remote
+ * while no lock was held. Also clear the pending lock request, As we
+ * only support single remote lock
*/
Req_info->follow_primary_lock_pending = false;
}
- else /*local request */
+ else /* local request */
{
/*
- * if we have a pending lock request from watchdog
- * do not remove the actual lock, Just clear the pending flag
+ * if we have a pending lock request from watchdog do not remove the
+ * actual lock, Just clear the pending flag
*/
if (Req_info->follow_primary_lock_pending)
{
@@ -4081,10 +4095,10 @@ pool_release_follow_primary_lock(bool remote_request)
if (Req_info->follow_primary_lock_held_remotely)
{
/*
- * Ideally this should not happen.
- * yet if for some reason our local node is trying to release a lock
- * that is held by remote node. Just produce a LOG message and release
- * the lock
+ * Ideally this should not happen. yet if for some reason our
+ * local node is trying to release a lock that is held by
+ * remote node. Just produce a LOG message and release the
+ * lock
*/
ereport(LOG,
(errmsg("pool_release_follow_primary_lock is relasing the remote lock by local request")));
@@ -4146,7 +4160,7 @@ handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id)
{
if (node_id < 0 || node_id >= MAX_NUM_BACKENDS ||
(failover_context->reqkind == NODE_UP_REQUEST && !(RAW_MODE &&
- BACKEND_INFO(node_id).backend_status == CON_DOWN) && VALID_BACKEND(node_id)) ||
+ BACKEND_INFO(node_id).backend_status == CON_DOWN) && VALID_BACKEND(node_id)) ||
(failover_context->reqkind == NODE_DOWN_REQUEST && !VALID_BACKEND(node_id)))
{
if (node_id < 0 || node_id >= MAX_NUM_BACKENDS)
@@ -4176,26 +4190,24 @@ handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id)
BACKEND_INFO(node_id).quarantine = false;
/*
- * do not search for primary node when handling the quarantine
- * nodes
+ * do not search for primary node when handling the quarantine nodes
*/
failover_context->search_primary = false;
/*
- * recalculate the main node id after setting the backend
- * status of quarantined node, this will bring us to the old
- * main_node_id that was before the quarantine state
+ * recalculate the main node id after setting the backend status of
+ * quarantined node, this will bring us to the old main_node_id that
+ * was before the quarantine state
*/
Req_info->main_node_id = get_next_main_node();
if (Req_info->primary_node_id == -1 &&
BACKEND_INFO(node_id).role == ROLE_PRIMARY)
{
/*
- * if the failback request is for the quarantined node and
- * that node had a primary role before it was quarantined,
- * restore the primary node status for that node. this is
- * important for the failover script to get the proper
- * value of old primary
+ * if the failback request is for the quarantined node and that
+ * node had a primary role before it was quarantined, restore the
+ * primary node status for that node. this is important for the
+ * failover script to get the proper value of old primary
*/
ereport(LOG,
(errmsg("failover: failing back the quarantine node that was primary before it was quarantined"),
@@ -4203,8 +4215,8 @@ handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id)
Req_info->primary_node_id = node_id;
/*
- * since we changed the primary node so restart of all
- * children is required
+ * since we changed the primary node so restart of all children is
+ * required
*/
failover_context->need_to_restart_children = true;
failover_context->partial_restart = false;
@@ -4227,8 +4239,8 @@ handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id)
else
{
/*
- * The request is a proper failback request and not because of
- * the update status of quarantined node
+ * The request is a proper failback request and not because of the
+ * update status of quarantined node
*/
(void) write_status_file();
@@ -4248,14 +4260,14 @@ handle_failback_request(FAILOVER_CONTEXT *failover_context, int node_id)
static int
handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int cnt = 0; /* number of down node ids */
- int i;
+ int cnt = 0; /* number of down node ids */
+ int i;
for (i = 0; i < failover_context->node_count; i++)
{
if (failover_context->node_id_set[i] != -1 && (BACKEND_INFO(failover_context->node_id_set[i]).quarantine == true ||
- ((RAW_MODE && VALID_BACKEND_RAW(failover_context->node_id_set[i])) ||
- VALID_BACKEND(failover_context->node_id_set[i]))))
+ ((RAW_MODE && VALID_BACKEND_RAW(failover_context->node_id_set[i])) ||
+ VALID_BACKEND(failover_context->node_id_set[i]))))
{
ereport(LOG,
(errmsg("=== Starting %s. shutdown host %s(%d) ===",
@@ -4263,7 +4275,7 @@ handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id)
BACKEND_INFO(failover_context->node_id_set[i]).backend_hostname,
BACKEND_INFO(failover_context->node_id_set[i]).backend_port)));
- BACKEND_INFO(failover_context->node_id_set[i]).backend_status = CON_DOWN; /* set down status */
+ BACKEND_INFO(failover_context->node_id_set[i]).backend_status = CON_DOWN; /* set down status */
pool_set_backend_status_changed_time(failover_context->node_id_set[i]);
if (failover_context->reqkind == NODE_QUARANTINE_REQUEST)
{
@@ -4272,12 +4284,11 @@ handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id)
else
{
/*
- * if the degeneration request is for the quarantined
- * node and that node had a primary role before it was
- * quarantined, Restore the primary node status for
- * that node before degenerating it. This is important
- * for the failover script to get the proper value of
- * old primary
+ * if the degeneration request is for the quarantined node and
+ * that node had a primary role before it was quarantined,
+ * Restore the primary node status for that node before
+ * degenerating it. This is important for the failover script
+ * to get the proper value of old primary
*/
if (Req_info->primary_node_id == -1 &&
BACKEND_INFO(failover_context->node_id_set[i]).quarantine == true &&
@@ -4313,41 +4324,43 @@ handle_failover_request(FAILOVER_CONTEXT *failover_context, int node_id)
static void
kill_failover_children(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int i, j, k;
+ int i,
+ j,
+ k;
+
/*
* On 2011/5/2 Tatsuo Ishii says: if mode is streaming replication and
- * request is NODE_UP_REQUEST (failback case) we don't need to restart
- * all children. Existing session will not use newly attached node,
- * but load balanced node is not changed until this session ends, so
- * it's harmless anyway.
+ * request is NODE_UP_REQUEST (failback case) we don't need to restart all
+ * children. Existing session will not use newly attached node, but load
+ * balanced node is not changed until this session ends, so it's harmless
+ * anyway.
*/
/*
- * On 2015/9/21 Tatsuo Ishii says: this judgment is not sufficient if
- * all backends were down. Child process has local status in which all
+ * On 2015/9/21 Tatsuo Ishii says: this judgment is not sufficient if all
+ * backends were down. Child process has local status in which all
* backends are down. In this case even if new connection arrives from
- * frontend, the child will not accept it because the local status
- * shows all backends are down. For this purpose we refer to
- * "all_backend_down" variable, which was set before updating backend
- * status.
+ * frontend, the child will not accept it because the local status shows
+ * all backends are down. For this purpose we refer to "all_backend_down"
+ * variable, which was set before updating backend status.
*
* See bug 248 for more details.
*/
/*
- * We also need to think about a case when the former primary node did
- * not exist. In the case we need to restart all children as
- * well. For example when previous primary node id is 0 and then it
- * went down, restarted, re-attached without promotion. Then existing
- * child process loses connection slot to node 0 and keeps on using it
- * when node 0 comes back. This could result in segfault later on in
- * the child process because there's no connection to node id 0.
+ * We also need to think about a case when the former primary node did not
+ * exist. In the case we need to restart all children as well. For
+ * example when previous primary node id is 0 and then it went down,
+ * restarted, re-attached without promotion. Then existing child process
+ * loses connection slot to node 0 and keeps on using it when node 0 comes
+ * back. This could result in segfault later on in the child process
+ * because there's no connection to node id 0.
*
- * Actually we need to think about when ALWAYS_PRIMARY flag is set
- * *but* DISALLOW_TO_FAILOVER flag is not set case. In the case after
- * primary failover Req_info->primary_node_id is set, but connection
- * to the primary node does not exist. So we should do full restart if
- * requested node id is the former primary node.
+ * Actually we need to think about when ALWAYS_PRIMARY flag is set *but*
+ * DISALLOW_TO_FAILOVER flag is not set case. In the case after primary
+ * failover Req_info->primary_node_id is set, but connection to the
+ * primary node does not exist. So we should do full restart if requested
+ * node id is the former primary node.
*
* See bug 672 for more details.
*/
@@ -4453,7 +4466,7 @@ kill_failover_children(FAILOVER_CONTEXT *failover_context, int node_id)
static void
exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id, int promote_node_id)
{
- int i;
+ int i;
if (failover_context->reqkind == NODE_DOWN_REQUEST)
{
@@ -4461,9 +4474,10 @@ exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id,
{
if (failover_context->nodes[i])
{
- /* If this is promoting specified node, new_main_node
- * should be replaced by the requested node. The requested
- * node should be REAL_PRIMARY_NODE_ID.
+ /*
+ * If this is promoting specified node, new_main_node should
+ * be replaced by the requested node. The requested node
+ * should be REAL_PRIMARY_NODE_ID.
*/
if (failover_context->request_details & REQ_DETAIL_PROMOTE)
{
@@ -4487,7 +4501,7 @@ exec_failover_command(FAILOVER_CONTEXT *failover_context, int new_main_node_id,
static int
determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int new_primary;
+ int new_primary;
if (failover_context->reqkind == PROMOTE_NODE_REQUEST && VALID_BACKEND(node_id))
{
@@ -4496,15 +4510,14 @@ determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id)
else if (failover_context->reqkind == NODE_QUARANTINE_REQUEST)
{
/*
- * If the quarantine node was the primary node, set the new primary
- * to -1 (invalid).
+ * If the quarantine node was the primary node, set the new primary to
+ * -1 (invalid).
*/
if (Req_info->primary_node_id == node_id)
{
/*
- * set the role of the node, This will help us restore the
- * primary node id when the node will come out from quarantine
- * state
+ * set the role of the node, This will help us restore the primary
+ * node id when the node will come out from quarantine state
*/
BACKEND_INFO(node_id).role = ROLE_PRIMARY;
new_primary = -1;
@@ -4516,12 +4529,11 @@ determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id)
}
/*
- * If the down node was a standby node in streaming replication mode,
- * we can avoid calling find_primary_node_repeatedly() and recognize
- * the former primary as the new primary node, which will reduce the
- * time to process standby down.
- * This does not apply to the case when no primary node existed
- * (Req_info->primary_node_id < 0). In this case
+ * If the down node was a standby node in streaming replication mode, we
+ * can avoid calling find_primary_node_repeatedly() and recognize the
+ * former primary as the new primary node, which will reduce the time to
+ * process standby down. This does not apply to the case when no primary
+ * node existed (Req_info->primary_node_id < 0). In this case
* find_primary_node_repeatedly() should be called.
*/
else if (SL_MODE &&
@@ -4560,9 +4572,9 @@ determine_new_primary_node(FAILOVER_CONTEXT *failover_context, int node_id)
static int
exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int new_primary_node_id)
{
- int follow_cnt = 0;
- int new_main_node_id = -1;
- int i;
+ int follow_cnt = 0;
+ int new_main_node_id = -1;
+ int i;
if (!STREAM)
return -1;
@@ -4571,10 +4583,9 @@ exec_follow_primary_command(FAILOVER_CONTEXT *failover_context, int node_id, int
failover_context->reqkind == PROMOTE_NODE_REQUEST)
{
/*
- * follow primary command is executed in following cases:
- * - failover against the current primary
- * - no primary exists and new primary is created by failover
- * - promote node request
+ * follow primary command is executed in following cases: - failover
+ * against the current primary - no primary exists and new primary is
+ * created by failover - promote node request
*/
if (((failover_context->reqkind == NODE_DOWN_REQUEST) &&
Req_info->primary_node_id >= 0 &&
@@ -4668,21 +4679,22 @@ save_node_info(FAILOVER_CONTEXT *failover_context, int new_primary_node_id, int
static void
exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id)
{
- int i, j, k;
+ int i,
+ j,
+ k;
if (failover_context->need_to_restart_children)
{
for (i = 0; i < pool_config->num_init_children; i++)
{
/*
- * Try to kill pgpool child because previous kill signal may
- * not be received by pgpool child. This could happen if
- * multiple PostgreSQL are going down (or even starting
- * pgpool, without starting PostgreSQL can trigger this).
- * Child calls degenerate_backend() and it tries to acquire
- * semaphore to write a failover request. In this case the
- * signal mask is set as well, thus signals are never
- * received.
+ * Try to kill pgpool child because previous kill signal may not
+ * be received by pgpool child. This could happen if multiple
+ * PostgreSQL are going down (or even starting pgpool, without
+ * starting PostgreSQL can trigger this). Child calls
+ * degenerate_backend() and it tries to acquire semaphore to write
+ * a failover request. In this case the signal mask is set as
+ * well, thus signals are never received.
*/
bool restart = false;
@@ -4734,8 +4746,8 @@ exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id)
else
{
/*
- * Set restart request to each child. Children will exit(1)
- * whenever they are convenient.
+ * Set restart request to each child. Children will exit(1) whenever
+ * they are convenient.
*/
for (i = 0; i < pool_config->num_init_children; i++)
{
@@ -4783,8 +4795,7 @@ exec_child_restart(FAILOVER_CONTEXT *failover_context, int node_id)
{
#ifdef NOT_USED
/*
- * Temporary black magic. Without this regression 055 does not
- * finish
+ * Temporary black magic. Without this regression 055 does not finish
*/
fprintf(stderr, "=== %s done. shutdown host %s(%d) ===",
(failover_context->reqkind == NODE_DOWN_REQUEST) ? "Failover" : "Quarantine",
@@ -4848,6 +4859,7 @@ exec_notice_pcp_child(FAILOVER_CONTEXT *failover_context)
(errmsg("fork a new PCP child pid %d in failover()", pcp_pid)));
}
}
+
/*
* -------------------------------------------------------------------------
* Subroutines for failover() end
@@ -4865,8 +4877,8 @@ static int *
create_unix_domain_sockets_by_list(struct sockaddr_un *un_addrs,
char *group, int permissions, int n_sockets)
{
- int i;
- int *sockets = NULL;
+ int i;
+ int *sockets = NULL;
if (un_addrs == NULL)
return NULL;
@@ -4874,7 +4886,7 @@ create_unix_domain_sockets_by_list(struct sockaddr_un *un_addrs,
sockets = malloc(sizeof(int) * n_sockets);
if (sockets == NULL)
ereport(FATAL,
- (errmsg("failed to allocate memory in startup process")));
+ (errmsg("failed to allocate memory in startup process")));
for (i = 0; i < n_sockets; i++)
{
@@ -4896,8 +4908,8 @@ create_unix_domain_sockets_by_list(struct sockaddr_un *un_addrs,
static int *
create_inet_domain_sockets_by_list(char **listen_addresses, int n_listen_addresses, int port, int *n_sockets)
{
- int *sockets = NULL;
- int i;
+ int *sockets = NULL;
+ int i;
*n_sockets = 0;
@@ -4908,7 +4920,8 @@ create_inet_domain_sockets_by_list(char **listen_addresses, int n_listen_address
{
int *inet_fds,
*walk;
- int n = 0; /* number of fds returned from create_inet_domain_sockets(). */
+ int n = 0; /* number of fds returned from
+ * create_inet_domain_sockets(). */
ereport(LOG,
(errmsg("listen address[%d]: %s", i, listen_addresses[i])));
@@ -4946,7 +4959,8 @@ create_inet_domain_sockets_by_list(char **listen_addresses, int n_listen_address
* Check and execute pending requests set by signal interrupts.
*/
static
-void check_requests(void)
+void
+check_requests(void)
{
sigset_t sig;
@@ -4964,7 +4978,8 @@ void check_requests(void)
*/
if (sigusr1_request)
{
- do {
+ do
+ {
sigusr1_request = 0;
sigusr1_interrupt_processor();
} while (sigusr1_request == 1);
@@ -4975,8 +4990,8 @@ void check_requests(void)
print_signal_member(&sig);
/*
- * Unblock signals so that SIGQUIT/SIGTERRM/SIGINT can be accepted.
- * They are all shutdown requests.
+ * Unblock signals so that SIGQUIT/SIGTERRM/SIGINT can be accepted. They
+ * are all shutdown requests.
*/
POOL_SETMASK(&UnBlockSig);
@@ -5004,7 +5019,8 @@ void check_requests(void)
}
static
-void print_signal_member(sigset_t *sig)
+void
+print_signal_member(sigset_t *sig)
{
if (sigismember(sig, SIGQUIT))
ereport(LOG,
@@ -5023,55 +5039,56 @@ void print_signal_member(sigset_t *sig)
static void
service_child_processes(void)
{
- int connected_children = Req_info->conn_counter;
- int idle_children = current_child_process_count - connected_children;
- static int high_load_counter = 0;
+ int connected_children = Req_info->conn_counter;
+ int idle_children = current_child_process_count - connected_children;
+ static int high_load_counter = 0;
+
ereport(DEBUG2,
- (errmsg("current_children_count = %d idle_children = %d connected_children = %d high_load_counter = %d",
- current_child_process_count, idle_children, connected_children, high_load_counter)));
+ (errmsg("current_children_count = %d idle_children = %d connected_children = %d high_load_counter = %d",
+ current_child_process_count, idle_children, connected_children, high_load_counter)));
if (idle_children > pool_config->max_spare_children)
{
- int ki;
- int victim_count;
- int kill_process_info_idxs[MAX_ONE_SHOT_KILLS];
- int kill_count = idle_children - pool_config->max_spare_children;
- int cycle_skip_count_before_scale_down;
- int cycle_skip_between_scale_down;
- int one_shot_kill_count;
+ int ki;
+ int victim_count;
+ int kill_process_info_idxs[MAX_ONE_SHOT_KILLS];
+ int kill_count = idle_children - pool_config->max_spare_children;
+ int cycle_skip_count_before_scale_down;
+ int cycle_skip_between_scale_down;
+ int one_shot_kill_count;
switch (pool_config->process_management_strategy)
{
- case PM_STRATEGY_AGGRESSIVE:
- cycle_skip_count_before_scale_down = 25; /* roughly 50 seconds */
- cycle_skip_between_scale_down = 2;
- one_shot_kill_count = MAX_ONE_SHOT_KILLS;
- break;
+ case PM_STRATEGY_AGGRESSIVE:
+ cycle_skip_count_before_scale_down = 25; /* roughly 50 seconds */
+ cycle_skip_between_scale_down = 2;
+ one_shot_kill_count = MAX_ONE_SHOT_KILLS;
+ break;
- case PM_STRATEGY_LAZY:
- cycle_skip_count_before_scale_down = 150; /* roughly 300 seconds */
- cycle_skip_between_scale_down = 10;
- one_shot_kill_count = 3;
- break;
+ case PM_STRATEGY_LAZY:
+ cycle_skip_count_before_scale_down = 150; /* roughly 300 seconds */
+ cycle_skip_between_scale_down = 10;
+ one_shot_kill_count = 3;
+ break;
- case PM_STRATEGY_GENTLE:
- cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
- cycle_skip_between_scale_down = 5;
- one_shot_kill_count = 3;
- break;
+ case PM_STRATEGY_GENTLE:
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
- default:
- /* should never come here, but if we do use gentle counts*/
- cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
- cycle_skip_between_scale_down = 5;
- one_shot_kill_count = 3;
- break;
+ default:
+ /* should never come here, but if we do use gentle counts */
+ cycle_skip_count_before_scale_down = 60; /* roughly 120 seconds */
+ cycle_skip_between_scale_down = 5;
+ one_shot_kill_count = 3;
+ break;
}
/* Do not scale down too quickly */
if (++high_load_counter < cycle_skip_count_before_scale_down || high_load_counter % cycle_skip_between_scale_down)
return;
- memset(kill_process_info_idxs, -1 ,sizeof(kill_process_info_idxs));
+ memset(kill_process_info_idxs, -1, sizeof(kill_process_info_idxs));
if (kill_count > one_shot_kill_count)
kill_count = one_shot_kill_count;
@@ -5080,15 +5097,16 @@ service_child_processes(void)
for (ki = 0; ki < victim_count; ki++)
{
- int index = kill_process_info_idxs[ki];
- if (index >=0)
+ int index = kill_process_info_idxs[ki];
+
+ if (index >= 0)
{
if (process_info[index].pid && process_info[index].status == WAIT_FOR_CONNECT)
{
ereport(DEBUG1,
- (errmsg("asking child process with pid:%d to kill itself to satisfy max_spare_children",
- process_info[index].pid),
- errdetail("child process has %d pooled connections",process_info[index].pooled_connections)));
+ (errmsg("asking child process with pid:%d to kill itself to satisfy max_spare_children",
+ process_info[index].pid),
+ errdetail("child process has %d pooled connections", process_info[index].pooled_connections)));
process_info[index].exit_if_idle = true;
kill(process_info[index].pid, SIGUSR2);
}
@@ -5099,19 +5117,20 @@ service_child_processes(void)
{
/* Reset the high load counter */
high_load_counter = 0;
- /*See if we need to spawn new children */
+ /* See if we need to spawn new children */
if (idle_children < pool_config->min_spare_children)
{
- int i;
- int spawned = 0;
- int new_spawn_no = pool_config->min_spare_children - idle_children;
+ int i;
+ int spawned = 0;
+ int new_spawn_no = pool_config->min_spare_children - idle_children;
+
/* Add 25% of max_spare_children */
new_spawn_no += pool_config->max_spare_children / 4;
if (new_spawn_no + current_child_process_count > pool_config->num_init_children)
{
ereport(DEBUG5,
- (errmsg("we have hit the ceiling, spawning %d child(ren)",
- pool_config->num_init_children - current_child_process_count)));
+ (errmsg("we have hit the ceiling, spawning %d child(ren)",
+ pool_config->num_init_children - current_child_process_count)));
new_spawn_no = pool_config->num_init_children - current_child_process_count;
}
if (new_spawn_no <= 0)
@@ -5149,47 +5168,51 @@ service_child_processes(void)
static int
select_victim_processes(int *process_info_idxs, int count)
{
- int i, ki;
- bool found_enough = false;
- int selected_count = 0;
+ int i,
+ ki;
+ bool found_enough = false;
+ int selected_count = 0;
- if (count <= 0)
- return 0;
+ if (count <= 0)
+ return 0;
- for (i = 0; i < pool_config->num_init_children; i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
+ {
+ /* Only the child process in waiting for connect can be terminated */
+ if (process_info[i].pid && process_info[i].status == WAIT_FOR_CONNECT)
{
- /* Only the child process in waiting for connect can be terminated */
- if (process_info[i].pid && process_info[i].status == WAIT_FOR_CONNECT)
+ if (selected_count < count)
{
- if (selected_count < count)
- {
- process_info_idxs[selected_count++] = i;
- }
- else
+ process_info_idxs[selected_count++] = i;
+ }
+ else
+ {
+ found_enough = true;
+
+ /*
+ * we don't bother selecting the child having least pooled
+ * connection with aggressive strategy
+ */
+ if (pool_config->process_management_strategy != PM_STRATEGY_AGGRESSIVE)
{
- found_enough = true;
- /* we don't bother selecting the child having least pooled connection with
- * aggressive strategy
- */
- if (pool_config->process_management_strategy != PM_STRATEGY_AGGRESSIVE)
+ for (ki = 0; ki < count; ki++)
{
- for (ki = 0; ki < count; ki++)
+ int old_index = process_info_idxs[ki];
+
+ if (old_index < 0 || process_info[old_index].pooled_connections > process_info[i].pooled_connections)
{
- int old_index = process_info_idxs[ki];
- if (old_index < 0 || process_info[old_index].pooled_connections > process_info[i].pooled_connections)
- {
- process_info_idxs[ki] = i;
- found_enough = false;
- break;
- }
- if (process_info[old_index].pooled_connections)
- found_enough = false;
+ process_info_idxs[ki] = i;
+ found_enough = false;
+ break;
}
+ if (process_info[old_index].pooled_connections)
+ found_enough = false;
}
}
}
- if (found_enough)
- break;
}
+ if (found_enough)
+ break;
+ }
return selected_count;
}
diff --git a/src/main/pool_globals.c b/src/main/pool_globals.c
index 06f775ad9..84680fc8d 100644
--- a/src/main/pool_globals.c
+++ b/src/main/pool_globals.c
@@ -20,38 +20,39 @@
*
* Global variables. Should be eventually removed.
*/
-#include <unistd.h> /*For getpid*/
+#include <unistd.h> /* For getpid */
#include "pool.h"
#include "utils/elog.h"
pid_t mypid; /* pgpool parent process id */
-pid_t myProcPid; /* process pid */
+pid_t myProcPid; /* process pid */
ProcessType processType;
ProcessState processState;
-bool reset_query_error; /* true if error returned from backend while processing reset queries */
+bool reset_query_error; /* true if error returned from backend while
+ * processing reset queries */
/*
* Application name
*/
-static char *process_application_name = "main";
+static char *process_application_name = "main";
/*
* Fixed application names. ordered by ProcessType.
*/
-char *application_names[] = {"main",
- "child",
- "sr_check_worker",
- "heart_beat_sender",
- "heart_beat_receiver",
- "watchdog",
- "life_check",
- "follow_child",
- "watchdog_utility",
- "pcp_main",
- "pcp_child",
- "health_check",
- "logger"
+char *application_names[] = {"main",
+ "child",
+ "sr_check_worker",
+ "heart_beat_sender",
+ "heart_beat_receiver",
+ "watchdog",
+ "life_check",
+ "follow_child",
+ "watchdog_utility",
+ "pcp_main",
+ "pcp_child",
+ "health_check",
+ "logger"
};
char *
@@ -90,7 +91,8 @@ set_application_name_with_string(char *string)
void
set_application_name_with_suffix(ProcessType ptype, int suffix)
{
- static char appname_buf[POOLCONFIG_MAXNAMELEN +1];
+ static char appname_buf[POOLCONFIG_MAXNAMELEN + 1];
+
snprintf(appname_buf, POOLCONFIG_MAXNAMELEN, "%s%d", get_application_name_for_process(ptype), suffix);
set_application_name_with_string(appname_buf);
}
@@ -104,7 +106,8 @@ get_application_name(void)
return process_application_name;
}
-void SetProcessGlobalVariables(ProcessType pType)
+void
+SetProcessGlobalVariables(ProcessType pType)
{
processType = pType;
myProcPid = getpid();
diff --git a/src/main/pool_internal_comms.c b/src/main/pool_internal_comms.c
index 9aa653c60..35efd0465 100644
--- a/src/main/pool_internal_comms.c
+++ b/src/main/pool_internal_comms.c
@@ -42,7 +42,8 @@
* sends the signal to pgpool-II main process to terminate Pgpool-II
* process.
*/
-bool terminate_pgpool(char mode, bool error)
+bool
+terminate_pgpool(char mode, bool error)
{
pid_t ppid = getppid();
@@ -71,7 +72,7 @@ bool terminate_pgpool(char mode, bool error)
}
else
{
- ereport(error?ERROR:WARNING,
+ ereport(error ? ERROR : WARNING,
(errmsg("error while processing shutdown request"),
errdetail("invalid shutdown mode \"%c\"", mode)));
return false;
diff --git a/src/parser/copyfuncs.c b/src/parser/copyfuncs.c
index b9262aec7..4571d4b17 100644
--- a/src/parser/copyfuncs.c
+++ b/src/parser/copyfuncs.c
@@ -84,7 +84,7 @@
* _copyPlannedStmt
*/
static PlannedStmt *
-_copyPlannedStmt(const PlannedStmt *from)
+_copyPlannedStmt(const PlannedStmt * from)
{
PlannedStmt *newnode = makeNode(PlannedStmt);
@@ -121,7 +121,7 @@ _copyPlannedStmt(const PlannedStmt *from)
* all the copy functions for classes which inherit from Plan.
*/
static void
-CopyPlanFields(const Plan *from, Plan *newnode)
+CopyPlanFields(const Plan * from, Plan * newnode)
{
COPY_SCALAR_FIELD(startup_cost);
COPY_SCALAR_FIELD(total_cost);
@@ -144,7 +144,7 @@ CopyPlanFields(const Plan *from, Plan *newnode)
* _copyPlan
*/
static Plan *
-_copyPlan(const Plan *from)
+_copyPlan(const Plan * from)
{
Plan *newnode = makeNode(Plan);
@@ -161,7 +161,7 @@ _copyPlan(const Plan *from)
* _copyResult
*/
static Result *
-_copyResult(const Result *from)
+_copyResult(const Result * from)
{
Result *newnode = makeNode(Result);
@@ -182,7 +182,7 @@ _copyResult(const Result *from)
* _copyProjectSet
*/
static ProjectSet *
-_copyProjectSet(const ProjectSet *from)
+_copyProjectSet(const ProjectSet * from)
{
ProjectSet *newnode = makeNode(ProjectSet);
@@ -198,7 +198,7 @@ _copyProjectSet(const ProjectSet *from)
* _copyModifyTable
*/
static ModifyTable *
-_copyModifyTable(const ModifyTable *from)
+_copyModifyTable(const ModifyTable * from)
{
ModifyTable *newnode = makeNode(ModifyTable);
@@ -240,7 +240,7 @@ _copyModifyTable(const ModifyTable *from)
* _copyAppend
*/
static Append *
-_copyAppend(const Append *from)
+_copyAppend(const Append * from)
{
Append *newnode = makeNode(Append);
@@ -265,7 +265,7 @@ _copyAppend(const Append *from)
* _copyMergeAppend
*/
static MergeAppend *
-_copyMergeAppend(const MergeAppend *from)
+_copyMergeAppend(const MergeAppend * from)
{
MergeAppend *newnode = makeNode(MergeAppend);
@@ -293,7 +293,7 @@ _copyMergeAppend(const MergeAppend *from)
* _copyRecursiveUnion
*/
static RecursiveUnion *
-_copyRecursiveUnion(const RecursiveUnion *from)
+_copyRecursiveUnion(const RecursiveUnion * from)
{
RecursiveUnion *newnode = makeNode(RecursiveUnion);
@@ -319,7 +319,7 @@ _copyRecursiveUnion(const RecursiveUnion *from)
* _copyBitmapAnd
*/
static BitmapAnd *
-_copyBitmapAnd(const BitmapAnd *from)
+_copyBitmapAnd(const BitmapAnd * from)
{
BitmapAnd *newnode = makeNode(BitmapAnd);
@@ -340,7 +340,7 @@ _copyBitmapAnd(const BitmapAnd *from)
* _copyBitmapOr
*/
static BitmapOr *
-_copyBitmapOr(const BitmapOr *from)
+_copyBitmapOr(const BitmapOr * from)
{
BitmapOr *newnode = makeNode(BitmapOr);
@@ -362,7 +362,7 @@ _copyBitmapOr(const BitmapOr *from)
* _copyGather
*/
static Gather *
-_copyGather(const Gather *from)
+_copyGather(const Gather * from)
{
Gather *newnode = makeNode(Gather);
@@ -390,7 +390,7 @@ _copyGather(const Gather *from)
* all the copy functions for classes which inherit from Scan.
*/
static void
-CopyScanFields(const Scan *from, Scan *newnode)
+CopyScanFields(const Scan * from, Scan * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
@@ -401,7 +401,7 @@ CopyScanFields(const Scan *from, Scan *newnode)
* _copyScan
*/
static Scan *
-_copyScan(const Scan *from)
+_copyScan(const Scan * from)
{
Scan *newnode = makeNode(Scan);
@@ -417,7 +417,7 @@ _copyScan(const Scan *from)
* _copySeqScan
*/
static SeqScan *
-_copySeqScan(const SeqScan *from)
+_copySeqScan(const SeqScan * from)
{
SeqScan *newnode = makeNode(SeqScan);
@@ -433,7 +433,7 @@ _copySeqScan(const SeqScan *from)
* _copySampleScan
*/
static SampleScan *
-_copySampleScan(const SampleScan *from)
+_copySampleScan(const SampleScan * from)
{
SampleScan *newnode = makeNode(SampleScan);
@@ -454,7 +454,7 @@ _copySampleScan(const SampleScan *from)
* _copyIndexScan
*/
static IndexScan *
-_copyIndexScan(const IndexScan *from)
+_copyIndexScan(const IndexScan * from)
{
IndexScan *newnode = makeNode(IndexScan);
@@ -481,7 +481,7 @@ _copyIndexScan(const IndexScan *from)
* _copyIndexOnlyScan
*/
static IndexOnlyScan *
-_copyIndexOnlyScan(const IndexOnlyScan *from)
+_copyIndexOnlyScan(const IndexOnlyScan * from)
{
IndexOnlyScan *newnode = makeNode(IndexOnlyScan);
@@ -507,7 +507,7 @@ _copyIndexOnlyScan(const IndexOnlyScan *from)
* _copyBitmapIndexScan
*/
static BitmapIndexScan *
-_copyBitmapIndexScan(const BitmapIndexScan *from)
+_copyBitmapIndexScan(const BitmapIndexScan * from)
{
BitmapIndexScan *newnode = makeNode(BitmapIndexScan);
@@ -530,7 +530,7 @@ _copyBitmapIndexScan(const BitmapIndexScan *from)
* _copyBitmapHeapScan
*/
static BitmapHeapScan *
-_copyBitmapHeapScan(const BitmapHeapScan *from)
+_copyBitmapHeapScan(const BitmapHeapScan * from)
{
BitmapHeapScan *newnode = makeNode(BitmapHeapScan);
@@ -551,7 +551,7 @@ _copyBitmapHeapScan(const BitmapHeapScan *from)
* _copyTidScan
*/
static TidScan *
-_copyTidScan(const TidScan *from)
+_copyTidScan(const TidScan * from)
{
TidScan *newnode = makeNode(TidScan);
@@ -572,7 +572,7 @@ _copyTidScan(const TidScan *from)
* _copyTidRangeScan
*/
static TidRangeScan *
-_copyTidRangeScan(const TidRangeScan *from)
+_copyTidRangeScan(const TidRangeScan * from)
{
TidRangeScan *newnode = makeNode(TidRangeScan);
@@ -593,7 +593,7 @@ _copyTidRangeScan(const TidRangeScan *from)
* _copySubqueryScan
*/
static SubqueryScan *
-_copySubqueryScan(const SubqueryScan *from)
+_copySubqueryScan(const SubqueryScan * from)
{
SubqueryScan *newnode = makeNode(SubqueryScan);
@@ -615,7 +615,7 @@ _copySubqueryScan(const SubqueryScan *from)
* _copyFunctionScan
*/
static FunctionScan *
-_copyFunctionScan(const FunctionScan *from)
+_copyFunctionScan(const FunctionScan * from)
{
FunctionScan *newnode = makeNode(FunctionScan);
@@ -637,7 +637,7 @@ _copyFunctionScan(const FunctionScan *from)
* _copyTableFuncScan
*/
static TableFuncScan *
-_copyTableFuncScan(const TableFuncScan *from)
+_copyTableFuncScan(const TableFuncScan * from)
{
TableFuncScan *newnode = makeNode(TableFuncScan);
@@ -658,7 +658,7 @@ _copyTableFuncScan(const TableFuncScan *from)
* _copyValuesScan
*/
static ValuesScan *
-_copyValuesScan(const ValuesScan *from)
+_copyValuesScan(const ValuesScan * from)
{
ValuesScan *newnode = makeNode(ValuesScan);
@@ -679,7 +679,7 @@ _copyValuesScan(const ValuesScan *from)
* _copyCteScan
*/
static CteScan *
-_copyCteScan(const CteScan *from)
+_copyCteScan(const CteScan * from)
{
CteScan *newnode = makeNode(CteScan);
@@ -701,7 +701,7 @@ _copyCteScan(const CteScan *from)
* _copyNamedTuplestoreScan
*/
static NamedTuplestoreScan *
-_copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
+_copyNamedTuplestoreScan(const NamedTuplestoreScan * from)
{
NamedTuplestoreScan *newnode = makeNode(NamedTuplestoreScan);
@@ -722,7 +722,7 @@ _copyNamedTuplestoreScan(const NamedTuplestoreScan *from)
* _copyWorkTableScan
*/
static WorkTableScan *
-_copyWorkTableScan(const WorkTableScan *from)
+_copyWorkTableScan(const WorkTableScan * from)
{
WorkTableScan *newnode = makeNode(WorkTableScan);
@@ -743,7 +743,7 @@ _copyWorkTableScan(const WorkTableScan *from)
* _copyForeignScan
*/
static ForeignScan *
-_copyForeignScan(const ForeignScan *from)
+_copyForeignScan(const ForeignScan * from)
{
ForeignScan *newnode = makeNode(ForeignScan);
@@ -772,7 +772,7 @@ _copyForeignScan(const ForeignScan *from)
* _copyCustomScan
*/
static CustomScan *
-_copyCustomScan(const CustomScan *from)
+_copyCustomScan(const CustomScan * from)
{
CustomScan *newnode = makeNode(CustomScan);
@@ -808,7 +808,7 @@ _copyCustomScan(const CustomScan *from)
* all the copy functions for classes which inherit from Join.
*/
static void
-CopyJoinFields(const Join *from, Join *newnode)
+CopyJoinFields(const Join * from, Join * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
@@ -821,7 +821,7 @@ CopyJoinFields(const Join *from, Join *newnode)
* _copyGatherMerge
*/
static GatherMerge *
-_copyGatherMerge(const GatherMerge *from)
+_copyGatherMerge(const GatherMerge * from)
{
GatherMerge *newnode = makeNode(GatherMerge);
@@ -840,7 +840,7 @@ _copyGatherMerge(const GatherMerge *from)
COPY_POINTER_FIELD(sortOperators, from->numCols * sizeof(Oid));
COPY_POINTER_FIELD(collations, from->numCols * sizeof(Oid));
COPY_POINTER_FIELD(nullsFirst, from->numCols * sizeof(bool));
- COPY_BITMAPSET_FIELD(initParam);
+ COPY_BITMAPSET_FIELD(initParam);
return newnode;
}
@@ -849,7 +849,7 @@ _copyGatherMerge(const GatherMerge *from)
* _copyJoin
*/
static Join *
-_copyJoin(const Join *from)
+_copyJoin(const Join * from)
{
Join *newnode = makeNode(Join);
@@ -866,7 +866,7 @@ _copyJoin(const Join *from)
* _copyNestLoop
*/
static NestLoop *
-_copyNestLoop(const NestLoop *from)
+_copyNestLoop(const NestLoop * from)
{
NestLoop *newnode = makeNode(NestLoop);
@@ -888,7 +888,7 @@ _copyNestLoop(const NestLoop *from)
* _copyMergeJoin
*/
static MergeJoin *
-_copyMergeJoin(const MergeJoin *from)
+_copyMergeJoin(const MergeJoin * from)
{
MergeJoin *newnode = makeNode(MergeJoin);
int numCols;
@@ -916,7 +916,7 @@ _copyMergeJoin(const MergeJoin *from)
* _copyHashJoin
*/
static HashJoin *
-_copyHashJoin(const HashJoin *from)
+_copyHashJoin(const HashJoin * from)
{
HashJoin *newnode = makeNode(HashJoin);
@@ -941,7 +941,7 @@ _copyHashJoin(const HashJoin *from)
* _copyMaterial
*/
static Material *
-_copyMaterial(const Material *from)
+_copyMaterial(const Material * from)
{
Material *newnode = makeNode(Material);
@@ -958,7 +958,7 @@ _copyMaterial(const Material *from)
* _copyMemoize
*/
static Memoize *
-_copyMemoize(const Memoize *from)
+_copyMemoize(const Memoize * from)
{
Memoize *newnode = makeNode(Memoize);
@@ -990,7 +990,7 @@ _copyMemoize(const Memoize *from)
* all the copy functions for classes which inherit from Sort.
*/
static void
-CopySortFields(const Sort *from, Sort *newnode)
+CopySortFields(const Sort * from, Sort * newnode)
{
CopyPlanFields((const Plan *) from, (Plan *) newnode);
@@ -1005,7 +1005,7 @@ CopySortFields(const Sort *from, Sort *newnode)
* _copySort
*/
static Sort *
-_copySort(const Sort *from)
+_copySort(const Sort * from)
{
Sort *newnode = makeNode(Sort);
@@ -1022,7 +1022,7 @@ _copySort(const Sort *from)
* _copyIncrementalSort
*/
static IncrementalSort *
-_copyIncrementalSort(const IncrementalSort *from)
+_copyIncrementalSort(const IncrementalSort * from)
{
IncrementalSort *newnode = makeNode(IncrementalSort);
@@ -1044,7 +1044,7 @@ _copyIncrementalSort(const IncrementalSort *from)
* _copyGroup
*/
static Group *
-_copyGroup(const Group *from)
+_copyGroup(const Group * from)
{
Group *newnode = makeNode(Group);
@@ -1062,7 +1062,7 @@ _copyGroup(const Group *from)
* _copyAgg
*/
static Agg *
-_copyAgg(const Agg *from)
+_copyAgg(const Agg * from)
{
Agg *newnode = makeNode(Agg);
@@ -1087,7 +1087,7 @@ _copyAgg(const Agg *from)
* _copyWindowAgg
*/
static WindowAgg *
-_copyWindowAgg(const WindowAgg *from)
+_copyWindowAgg(const WindowAgg * from)
{
WindowAgg *newnode = makeNode(WindowAgg);
@@ -1121,7 +1121,7 @@ _copyWindowAgg(const WindowAgg *from)
* _copyUnique
*/
static Unique *
-_copyUnique(const Unique *from)
+_copyUnique(const Unique * from)
{
Unique *newnode = makeNode(Unique);
@@ -1145,7 +1145,7 @@ _copyUnique(const Unique *from)
* _copyHash
*/
static Hash *
-_copyHash(const Hash *from)
+_copyHash(const Hash * from)
{
Hash *newnode = makeNode(Hash);
@@ -1170,7 +1170,7 @@ _copyHash(const Hash *from)
* _copySetOp
*/
static SetOp *
-_copySetOp(const SetOp *from)
+_copySetOp(const SetOp * from)
{
SetOp *newnode = makeNode(SetOp);
@@ -1199,7 +1199,7 @@ _copySetOp(const SetOp *from)
* _copyLockRows
*/
static LockRows *
-_copyLockRows(const LockRows *from)
+_copyLockRows(const LockRows * from)
{
LockRows *newnode = makeNode(LockRows);
@@ -1221,7 +1221,7 @@ _copyLockRows(const LockRows *from)
* _copyLimit
*/
static Limit *
-_copyLimit(const Limit *from)
+_copyLimit(const Limit * from)
{
Limit *newnode = makeNode(Limit);
@@ -1248,7 +1248,7 @@ _copyLimit(const Limit *from)
* _copyNestLoopParam
*/
static NestLoopParam *
-_copyNestLoopParam(const NestLoopParam *from)
+_copyNestLoopParam(const NestLoopParam * from)
{
NestLoopParam *newnode = makeNode(NestLoopParam);
@@ -1262,7 +1262,7 @@ _copyNestLoopParam(const NestLoopParam *from)
* _copyPlanRowMark
*/
static PlanRowMark *
-_copyPlanRowMark(const PlanRowMark *from)
+_copyPlanRowMark(const PlanRowMark * from)
{
PlanRowMark *newnode = makeNode(PlanRowMark);
@@ -1279,7 +1279,7 @@ _copyPlanRowMark(const PlanRowMark *from)
}
static PartitionPruneInfo *
-_copyPartitionPruneInfo(const PartitionPruneInfo *from)
+_copyPartitionPruneInfo(const PartitionPruneInfo * from)
{
PartitionPruneInfo *newnode = makeNode(PartitionPruneInfo);
@@ -1290,7 +1290,7 @@ _copyPartitionPruneInfo(const PartitionPruneInfo *from)
}
static PartitionedRelPruneInfo *
-_copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo *from)
+_copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo * from)
{
PartitionedRelPruneInfo *newnode = makeNode(PartitionedRelPruneInfo);
@@ -1311,7 +1311,7 @@ _copyPartitionedRelPruneInfo(const PartitionedRelPruneInfo *from)
* _copyPartitionPruneStepOp
*/
static PartitionPruneStepOp *
-_copyPartitionPruneStepOp(const PartitionPruneStepOp *from)
+_copyPartitionPruneStepOp(const PartitionPruneStepOp * from)
{
PartitionPruneStepOp *newnode = makeNode(PartitionPruneStepOp);
@@ -1328,7 +1328,7 @@ _copyPartitionPruneStepOp(const PartitionPruneStepOp *from)
* _copyPartitionPruneStepCombine
*/
static PartitionPruneStepCombine *
-_copyPartitionPruneStepCombine(const PartitionPruneStepCombine *from)
+_copyPartitionPruneStepCombine(const PartitionPruneStepCombine * from)
{
PartitionPruneStepCombine *newnode = makeNode(PartitionPruneStepCombine);
@@ -1343,7 +1343,7 @@ _copyPartitionPruneStepCombine(const PartitionPruneStepCombine *from)
* _copyPlanInvalItem
*/
static PlanInvalItem *
-_copyPlanInvalItem(const PlanInvalItem *from)
+_copyPlanInvalItem(const PlanInvalItem * from)
{
PlanInvalItem *newnode = makeNode(PlanInvalItem);
@@ -2208,7 +2208,7 @@ _copyJsonBehavior(const JsonBehavior *from)
static JsonExpr *
_copyJsonExpr(const JsonExpr *from)
{
- JsonExpr *newnode = makeNode(JsonExpr);
+ JsonExpr *newnode = makeNode(JsonExpr);
COPY_SCALAR_FIELD(op);
COPY_STRING_FIELD(column_name);
@@ -2506,7 +2506,7 @@ _copyOnConflictExpr(const OnConflictExpr *from)
* _copyPathKey
*/
static PathKey *
-_copyPathKey(const PathKey *from)
+_copyPathKey(const PathKey * from)
{
PathKey *newnode = makeNode(PathKey);
@@ -2520,7 +2520,7 @@ _copyPathKey(const PathKey *from)
}
static GroupByOrdering *
-_copyGroupByOrdering(const GroupByOrdering *from)
+_copyGroupByOrdering(const GroupByOrdering * from)
{
GroupByOrdering *newnode = makeNode(GroupByOrdering);
@@ -2534,7 +2534,7 @@ _copyGroupByOrdering(const GroupByOrdering *from)
* _copyRestrictInfo
*/
static RestrictInfo *
-_copyRestrictInfo(const RestrictInfo *from)
+_copyRestrictInfo(const RestrictInfo * from)
{
RestrictInfo *newnode = makeNode(RestrictInfo);
@@ -2582,7 +2582,7 @@ _copyRestrictInfo(const RestrictInfo *from)
* _copyPlaceHolderVar
*/
static PlaceHolderVar *
-_copyPlaceHolderVar(const PlaceHolderVar *from)
+_copyPlaceHolderVar(const PlaceHolderVar * from)
{
PlaceHolderVar *newnode = makeNode(PlaceHolderVar);
@@ -2598,7 +2598,7 @@ _copyPlaceHolderVar(const PlaceHolderVar *from)
* _copySpecialJoinInfo
*/
static SpecialJoinInfo *
-_copySpecialJoinInfo(const SpecialJoinInfo *from)
+_copySpecialJoinInfo(const SpecialJoinInfo * from)
{
SpecialJoinInfo *newnode = makeNode(SpecialJoinInfo);
@@ -2621,7 +2621,7 @@ _copySpecialJoinInfo(const SpecialJoinInfo *from)
* _copyAppendRelInfo
*/
static AppendRelInfo *
-_copyAppendRelInfo(const AppendRelInfo *from)
+_copyAppendRelInfo(const AppendRelInfo * from)
{
AppendRelInfo *newnode = makeNode(AppendRelInfo);
@@ -2641,7 +2641,7 @@ _copyAppendRelInfo(const AppendRelInfo *from)
* _copyPlaceHolderInfo
*/
static PlaceHolderInfo *
-_copyPlaceHolderInfo(const PlaceHolderInfo *from)
+_copyPlaceHolderInfo(const PlaceHolderInfo * from)
{
PlaceHolderInfo *newnode = makeNode(PlaceHolderInfo);
@@ -3436,7 +3436,7 @@ _copyJsonTablePathSpec(const JsonTablePathSpec *from)
static JsonTable *
_copyJsonTable(const JsonTable *from)
{
- JsonTable *newnode = makeNode(JsonTable);
+ JsonTable *newnode = makeNode(JsonTable);
COPY_NODE_FIELD(context_item);
COPY_NODE_FIELD(pathspec);
@@ -5397,10 +5397,10 @@ _copyDropSubscriptionStmt(const DropSubscriptionStmt *from)
* ****************************************************************
*/
static ExtensibleNode *
-_copyExtensibleNode(const ExtensibleNode *from)
+_copyExtensibleNode(const ExtensibleNode * from)
{
ExtensibleNode *newnode;
- const ExtensibleNodeMethods *methods;
+ const ExtensibleNodeMethods *methods;
methods = GetExtensibleNodeMethods(from->extnodename, false);
newnode = (ExtensibleNode *) newNode(methods->node_size,
@@ -5471,7 +5471,7 @@ _copyBitString(const BitString *from)
#ifdef NOT_USED_IN_PGPOOL
static ForeignKeyCacheInfo *
-_copyForeignKeyCacheInfo(const ForeignKeyCacheInfo *from)
+_copyForeignKeyCacheInfo(const ForeignKeyCacheInfo * from)
{
ForeignKeyCacheInfo *newnode = makeNode(ForeignKeyCacheInfo);
@@ -6459,54 +6459,54 @@ copyObjectImpl(const void *from)
case T_TriggerTransition:
retval = _copyTriggerTransition(from);
break;
- case T_JsonOutput:
- retval = _copyJsonOutput(from);
- break;
- case T_JsonArgument:
- retval = _copyJsonArgument(from);
- break;
- case T_JsonFuncExpr:
- retval = _copyJsonFuncExpr(from);
- break;
- case T_JsonTablePathSpec:
- retval = _copyJsonTablePathSpec(from);
- break;
- case T_JsonTable:
- retval = _copyJsonTable(from);
- break;
- case T_JsonTableColumn:
- retval = _copyJsonTableColumn(from);
- break;
- case T_JsonKeyValue:
- retval = _copyJsonKeyValue(from);
- break;
- case T_JsonParseExpr:
- retval = _copyJsonParseExpr(from);
- break;
- case T_JsonScalarExpr:
- retval = _copyJsonScalarExpr(from);
- break;
- case T_JsonSerializeExpr:
- retval = _copyJsonSerializeExpr(from);
- break;
- case T_JsonObjectConstructor:
- retval = _copyJsonObjectConstructor(from);
- break;
- case T_JsonArrayConstructor:
- retval = _copyJsonArrayConstructor(from);
- break;
- case T_JsonArrayQueryConstructor:
- retval = _copyJsonArrayQueryConstructor(from);
- break;
- case T_JsonAggConstructor:
- retval = _copyJsonAggConstructor(from);
- break;
- case T_JsonObjectAgg:
- retval = _copyJsonObjectAgg(from);
- break;
- case T_JsonArrayAgg:
- retval = _copyJsonArrayAgg(from);
- break;
+ case T_JsonOutput:
+ retval = _copyJsonOutput(from);
+ break;
+ case T_JsonArgument:
+ retval = _copyJsonArgument(from);
+ break;
+ case T_JsonFuncExpr:
+ retval = _copyJsonFuncExpr(from);
+ break;
+ case T_JsonTablePathSpec:
+ retval = _copyJsonTablePathSpec(from);
+ break;
+ case T_JsonTable:
+ retval = _copyJsonTable(from);
+ break;
+ case T_JsonTableColumn:
+ retval = _copyJsonTableColumn(from);
+ break;
+ case T_JsonKeyValue:
+ retval = _copyJsonKeyValue(from);
+ break;
+ case T_JsonParseExpr:
+ retval = _copyJsonParseExpr(from);
+ break;
+ case T_JsonScalarExpr:
+ retval = _copyJsonScalarExpr(from);
+ break;
+ case T_JsonSerializeExpr:
+ retval = _copyJsonSerializeExpr(from);
+ break;
+ case T_JsonObjectConstructor:
+ retval = _copyJsonObjectConstructor(from);
+ break;
+ case T_JsonArrayConstructor:
+ retval = _copyJsonArrayConstructor(from);
+ break;
+ case T_JsonArrayQueryConstructor:
+ retval = _copyJsonArrayQueryConstructor(from);
+ break;
+ case T_JsonAggConstructor:
+ retval = _copyJsonAggConstructor(from);
+ break;
+ case T_JsonObjectAgg:
+ retval = _copyJsonObjectAgg(from);
+ break;
+ case T_JsonArrayAgg:
+ retval = _copyJsonArrayAgg(from);
+ break;
case T_PartitionElem:
retval = _copyPartitionElem(from);
break;
diff --git a/src/parser/keywords.c b/src/parser/keywords.c
index 161282158..83feb9f47 100644
--- a/src/parser/keywords.c
+++ b/src/parser/keywords.c
@@ -38,7 +38,7 @@ const uint8 ScanKeywordCategories[SCANKEYWORDS_NUM_KEYWORDS] = {
#define BARE_LABEL true
#define AS_LABEL false
-const bool ScanKeywordBareLabel[SCANKEYWORDS_NUM_KEYWORDS] = {
+const bool ScanKeywordBareLabel[SCANKEYWORDS_NUM_KEYWORDS] = {
#include "parser/kwlist.h"
};
diff --git a/src/parser/list.c b/src/parser/list.c
index 9b16e1d1c..909517632 100644
--- a/src/parser/list.c
+++ b/src/parser/list.c
@@ -1709,23 +1709,23 @@ list_oid_cmp(const ListCell *p1, const ListCell *p2)
static inline MemoryContext
GetMemoryChunkContext(void *pointer)
{
- MemoryContext context;
+ MemoryContext context;
- /*
- * Try to detect bogus pointers handed to us, poorly though we can.
- * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
- * allocated chunk.
- */
- Assert(pointer != NULL);
- Assert(pointer == (void *) MAXALIGN(pointer));
+ /*
+ * Try to detect bogus pointers handed to us, poorly though we can.
+ * Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
+ * allocated chunk.
+ */
+ Assert(pointer != NULL);
+ Assert(pointer == (void *) MAXALIGN(pointer));
- /*
- * OK, it's probably safe to look at the context.
- */
- context = *(MemoryContext *) (((char *) pointer) - sizeof(void *));
+ /*
+ * OK, it's probably safe to look at the context.
+ */
+ context = *(MemoryContext *) (((char *) pointer) - sizeof(void *));
- AssertArg(MemoryContextIsValid(context));
+ AssertArg(MemoryContextIsValid(context));
- return context;
+ return context;
}
#endif
diff --git a/src/parser/makefuncs.c b/src/parser/makefuncs.c
index cd1be5c83..3777c66af 100644
--- a/src/parser/makefuncs.c
+++ b/src/parser/makefuncs.c
@@ -656,13 +656,13 @@ makeVacuumRelation(RangeVar *relation, Oid oid, List *va_cols)
JsonFormat *
makeJsonFormat(JsonFormatType type, JsonEncoding encoding, int location)
{
- JsonFormat *jf = makeNode(JsonFormat);
+ JsonFormat *jf = makeNode(JsonFormat);
- jf->format_type = type;
- jf->encoding = encoding;
- jf->location = location;
+ jf->format_type = type;
+ jf->encoding = encoding;
+ jf->location = location;
- return jf;
+ return jf;
}
/*
@@ -673,13 +673,13 @@ JsonValueExpr *
makeJsonValueExpr(Expr *raw_expr, Expr *formatted_expr,
JsonFormat *format)
{
- JsonValueExpr *jve = makeNode(JsonValueExpr);
+ JsonValueExpr *jve = makeNode(JsonValueExpr);
- jve->raw_expr = raw_expr;
- jve->formatted_expr = formatted_expr;
- jve->format = format;
+ jve->raw_expr = raw_expr;
+ jve->formatted_expr = formatted_expr;
+ jve->format = format;
- return jve;
+ return jve;
}
/*
@@ -705,12 +705,12 @@ makeJsonBehavior(JsonBehaviorType btype, Node *expr, int location)
Node *
makeJsonKeyValue(Node *key, Node *value)
{
- JsonKeyValue *n = makeNode(JsonKeyValue);
+ JsonKeyValue *n = makeNode(JsonKeyValue);
- n->key = (Expr *) key;
- n->value = castNode(JsonValueExpr, value);
+ n->key = (Expr *) key;
+ n->value = castNode(JsonValueExpr, value);
- return (Node *) n;
+ return (Node *) n;
}
/*
@@ -719,17 +719,17 @@ makeJsonKeyValue(Node *key, Node *value)
* */
Node *
makeJsonIsPredicate(Node *expr, JsonFormat *format, JsonValueType item_type,
- bool unique_keys, int location)
+ bool unique_keys, int location)
{
- JsonIsPredicate *n = makeNode(JsonIsPredicate);
+ JsonIsPredicate *n = makeNode(JsonIsPredicate);
- n->expr = expr;
- n->format = format;
- n->item_type = item_type;
- n->unique_keys = unique_keys;
- n->location = location;
+ n->expr = expr;
+ n->format = format;
+ n->item_type = item_type;
+ n->unique_keys = unique_keys;
+ n->location = location;
- return (Node *) n;
+ return (Node *) n;
}
/*
@@ -738,7 +738,7 @@ makeJsonIsPredicate(Node *expr, JsonFormat *format, JsonValueType item_type,
*/
JsonTablePathSpec *
makeJsonTablePathSpec(char *string, char *name, int string_location,
- int name_location)
+ int name_location)
{
JsonTablePathSpec *pathspec = makeNode(JsonTablePathSpec);
diff --git a/src/parser/outfuncs.c b/src/parser/outfuncs.c
index 637dfbb96..f98af2b6e 100644
--- a/src/parser/outfuncs.c
+++ b/src/parser/outfuncs.c
@@ -899,9 +899,9 @@ _outOnConflictExpr(StringInfo str, const OnConflictExpr *node)
*****************************************************************************/
static void
-_outExtensibleNode(StringInfo str, const ExtensibleNode *node)
+_outExtensibleNode(StringInfo str, const ExtensibleNode * node)
{
- const ExtensibleNodeMethods *methods;
+ const ExtensibleNodeMethods *methods;
methods = GetExtensibleNodeMethods(node->extnodename, false);
@@ -1732,7 +1732,7 @@ static void
_outMergeWhenClauses(StringInfo str, List *node)
{
ListCell *temp;
- char comma;
+ char comma;
foreach(temp, node)
{
@@ -1755,14 +1755,14 @@ _outMergeWhenClauses(StringInfo str, List *node)
switch (m->commandType)
{
- ListCell *s;
+ ListCell *s;
case CMD_UPDATE:
comma = 0;
appendStringInfo(str, "UPDATE SET ");
foreach(s, m->targetList)
{
- ResTarget *r = (ResTarget *) lfirst(s);
+ ResTarget *r = (ResTarget *) lfirst(s);
if (comma == 0)
comma = 1;
@@ -1781,7 +1781,7 @@ _outMergeWhenClauses(StringInfo str, List *node)
appendStringInfoString(str, "(");
foreach(s, m->targetList)
{
- ResTarget *r = (ResTarget *) lfirst(s);
+ ResTarget *r = (ResTarget *) lfirst(s);
if (comma == 0)
comma = 1;
@@ -2025,23 +2025,23 @@ _outA_Expr(StringInfo str, A_Expr *node)
static void
_outInteger(StringInfo str, const Integer *node)
{
- appendStringInfo(str, "%d", node->ival);
+ appendStringInfo(str, "%d", node->ival);
}
static void
_outFloat(StringInfo str, const Float *node)
{
- /*
- * * We assume the value is a valid numeric literal and so does not need
- * * quoting.
- * */
- appendStringInfoString(str, node->fval);
+ /*
+ * * We assume the value is a valid numeric literal and so does not need *
+ * quoting.
+ */
+ appendStringInfoString(str, node->fval);
}
static void
_outBoolean(StringInfo str, const Boolean *node)
{
- appendStringInfoString(str, node->boolval ? "true" : "false");
+ appendStringInfoString(str, node->boolval ? "true" : "false");
}
static void
@@ -2051,10 +2051,10 @@ _outString(StringInfo str, const String *node)
* We use outToken to provide escaping of the string's content, but we
* don't want it to do anything with an empty string.
*/
- appendStringInfoChar(str, '"');
- if (node->sval[0] != '\0')
- outToken(str, node->sval);
- appendStringInfoChar(str, '"');
+ appendStringInfoChar(str, '"');
+ if (node->sval[0] != '\0')
+ outToken(str, node->sval);
+ appendStringInfoChar(str, '"');
}
static void
@@ -2131,7 +2131,7 @@ _outParamRef(StringInfo str, ParamRef *node)
static void
_outA_Const(StringInfo str, A_Const *node)
{
- char *p;
+ char *p;
if (node->isnull)
{
@@ -2854,6 +2854,7 @@ _outVacuumStmt(StringInfo str, VacuumStmt *node)
{
VacuumParams params;
+
params.options = node->is_vacuumcmd ? VACOPT_VACUUM : VACOPT_ANALYZE;
if (params.options & VACOPT_VACUUM)
@@ -2880,6 +2881,7 @@ _outVacuumStmt(StringInfo str, VacuumStmt *node)
appendStringInfoString(str, "SKIP_LOCKED ");
ListCell *lc;
+
foreach(lc, node->rels)
{
VacuumRelation *vrel = lfirst_node(VacuumRelation, lc);
@@ -3118,19 +3120,20 @@ _outCopyStmt(StringInfo str, CopyStmt *node)
|| strcmp(e->defname, "log_verbosity") == 0)
_outNode(str, e->arg);
else if (strcmp(e->defname, "delimiter") == 0
- || strcmp(e->defname, "null") == 0
- || strcmp(e->defname, "default") == 0
- || strcmp(e->defname, "quote") == 0
- || strcmp(e->defname, "escape") == 0
- || strcmp(e->defname, "encoding") == 0)
+ || strcmp(e->defname, "null") == 0
+ || strcmp(e->defname, "default") == 0
+ || strcmp(e->defname, "quote") == 0
+ || strcmp(e->defname, "escape") == 0
+ || strcmp(e->defname, "encoding") == 0)
{
- String *value = (String *) e->arg;
+ String *value = (String *) e->arg;
+
appendStringInfoString(str, "'");
_outSingleQuote(str, value->sval);
appendStringInfoString(str, "'");
}
else if (strcmp(e->defname, "force_not_null") == 0
- || strcmp(e->defname, "force_null") == 0)
+ || strcmp(e->defname, "force_null") == 0)
{
if (IsA(e->arg, A_Star))
appendStringInfoString(str, "*");
@@ -3328,7 +3331,7 @@ _outOptRoleList(StringInfo str, List *options)
foreach(lc, options)
{
DefElem *elem = lfirst(lc);
- A_Const *value = (A_Const *) elem->arg;
+ A_Const *value = (A_Const *) elem->arg;
if (strcmp(elem->defname, "password") == 0)
{
@@ -3860,7 +3863,7 @@ _outOptSeqList(StringInfo str, List *options)
foreach(lc, options)
{
DefElem *e = lfirst(lc);
- A_Const *v = (A_Const *) e->arg;
+ A_Const *v = (A_Const *) e->arg;
char buf[16];
if (strcmp(e->defname, "cycle") == 0)
@@ -4017,7 +4020,7 @@ _outFuncName(StringInfo str, List *func_name)
foreach(lc, func_name)
{
- A_Const *v = (A_Const *) lfirst(lc);
+ A_Const *v = (A_Const *) lfirst(lc);
if (dot == 0)
dot = 1;
@@ -4160,7 +4163,7 @@ _outDefineStmt(StringInfo str, DefineStmt *node)
foreach(lc, node->defnames)
{
- A_Const *v = (A_Const *) lfirst(lc);
+ A_Const *v = (A_Const *) lfirst(lc);
if (dot == 0)
dot = 1;
@@ -4218,7 +4221,7 @@ _outOperatorName(StringInfo str, List *list)
foreach(lc, list)
{
- A_Const *v = (A_Const *) lfirst(lc);
+ A_Const *v = (A_Const *) lfirst(lc);
if (dot == 0)
dot = 1;
@@ -4251,7 +4254,8 @@ _outCreateOpClassItem(StringInfo str, CreateOpClassItem *node)
}
/*
- * if (node->recheck == TRUE) appendStringInfoString(str, " RECHECK");
+ * if (node->recheck == TRUE) appendStringInfoString(str, "
+ * RECHECK");
*/
break;
@@ -4330,9 +4334,9 @@ static void
_outDropStmt(StringInfo str, DropStmt *node)
{
List *objname;
- char *p;
- char *p1;
- List *l;
+ char *p;
+ char *p1;
+ List *l;
appendStringInfoString(str, "DROP ");
switch (node->removeType)
@@ -4564,7 +4568,7 @@ _outPrivilegeList(StringInfo str, List *list)
{
foreach(lc, list)
{
- A_Const *v = (A_Const *) lfirst(lc);
+ A_Const *v = (A_Const *) lfirst(lc);
if (comma == 0)
comma = 1;
@@ -4769,7 +4773,7 @@ _outFuncOptList(StringInfo str, List *list)
foreach(lc, list)
{
DefElem *e = lfirst(lc);
- A_Const *v = (A_Const *) e->arg;
+ A_Const *v = (A_Const *) e->arg;
if (strcmp(e->defname, "strict") == 0)
{
@@ -5231,7 +5235,7 @@ _outCreatedbOptList(StringInfo str, List *options)
foreach(lc, options)
{
DefElem *e = lfirst(lc);
- A_Const *v = (A_Const *) e->arg;
+ A_Const *v = (A_Const *) e->arg;
/* keyword */
if (strcmp(e->defname, "template") == 0)
@@ -5491,7 +5495,7 @@ static void
_outCommentStmt(StringInfo str, CommentStmt *node)
{
TypeName *t;
- A_Const *v;
+ A_Const *v;
char buf[16];
appendStringInfoString(str, "COMMENT ON ");
@@ -5650,8 +5654,8 @@ static void
_outRangeFunction(StringInfo str, RangeFunction *node)
{
_outNode(str, node->functions);
- //TODO
- if (node->alias)
+ /* TODO */
+ if (node->alias)
{
_outNode(str, node->alias);
}
@@ -5983,7 +5987,7 @@ _outWithDefinition(StringInfo str, List *def_list)
elem = linitial(def_list);
if (strcmp(elem->defname, "oids") == 0)
{
- Integer *v = (Integer *) elem->arg;
+ Integer *v = (Integer *) elem->arg;
if (v->ival == 1)
appendStringInfoString(str, " WITH OIDS ");
@@ -6091,7 +6095,7 @@ _outNode(StringInfo str, void *obj)
if (obj == NULL)
return;
- else if (IsA(obj, List) ||IsA(obj, IntList) || IsA(obj, OidList) ||
+ else if (IsA(obj, List) || IsA(obj, IntList) || IsA(obj, OidList) ||
IsA(obj, XidList))
_outList(str, obj);
/* nodeRead does not want to see { } around these! */
diff --git a/src/parser/parser.c b/src/parser/parser.c
index 0cd5b95ee..878220be9 100644
--- a/src/parser/parser.c
+++ b/src/parser/parser.c
@@ -90,25 +90,25 @@ raw_parser(const char *str, RawParseMode mode, int len, bool *error, bool use_mi
}
/* initialize the bison parser */
- if (use_minimal)
- {
- ereport(DEBUG2,
- (errmsg("invoking the minimal parser")));
- minimal_parser_init(&yyextra);
- }
- else
- {
- ereport(DEBUG2,
- (errmsg("invoking the standard parser")));
- parser_init(&yyextra);
- }
+ if (use_minimal)
+ {
+ ereport(DEBUG2,
+ (errmsg("invoking the minimal parser")));
+ minimal_parser_init(&yyextra);
+ }
+ else
+ {
+ ereport(DEBUG2,
+ (errmsg("invoking the standard parser")));
+ parser_init(&yyextra);
+ }
PG_TRY();
{
/* Parse! */
- if (use_minimal)
- yyresult = minimal_base_yyparse(yyscanner);
- else
- yyresult = base_yyparse(yyscanner);
+ if (use_minimal)
+ yyresult = minimal_base_yyparse(yyscanner);
+ else
+ yyresult = base_yyparse(yyscanner);
/* Clean up (release memory) */
scanner_finish(yyscanner);
}
@@ -145,16 +145,17 @@ raw_parser2(List *parse_tree_list)
return node;
}
-//"INSERT INTO foo VALUES(1)"
+/* "INSERT INTO foo VALUES(1)" */
Node *
get_dummy_insert_query_node(void)
{
InsertStmt *insert = makeNode(InsertStmt);
SelectStmt *select = makeNode(SelectStmt);
+
select->valuesLists = list_make1(makeInteger(1));
insert->relation = makeRangeVar("pgpool", "foo", 0);
- insert->selectStmt = (Node*)select;
- return (Node *)insert;
+ insert->selectStmt = (Node *) select;
+ return (Node *) insert;
}
List *
@@ -162,20 +163,23 @@ get_dummy_read_query_tree(void)
{
RawStmt *rs;
SelectStmt *n = makeNode(SelectStmt);
+
n->targetList = list_make1(makeString("pgpool: unable to parse the query"));
rs = makeNode(RawStmt);
- rs->stmt = (Node *)n;
+ rs->stmt = (Node *) n;
rs->stmt_location = 0;
rs->stmt_len = 0; /* might get changed later */
- return list_make1((Node *)rs);
+ return list_make1((Node *) rs);
}
List *
get_dummy_write_query_tree(void)
{
- ColumnRef *c1,*c2;
+ ColumnRef *c1,
+ *c2;
RawStmt *rs;
DeleteStmt *n = makeNode(DeleteStmt);
+
n->relation = makeRangeVar("pgpool", "foo", 0);
c1 = makeNode(ColumnRef);
@@ -184,18 +188,20 @@ get_dummy_write_query_tree(void)
c2 = makeNode(ColumnRef);
c2->fields = list_make1(makeString("pgpool: unable to parse the query"));
- n->whereClause = (Node*)makeSimpleA_Expr(AEXPR_OP, "=", (Node*)c1, (Node*)c2, 0);
+ n->whereClause = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) c1, (Node *) c2, 0);
+
/*
- * Assign the node directly to the parsetree and exit the scanner
- * we don't want to keep parsing for information we don't need
+ * Assign the node directly to the parsetree and exit the scanner we don't
+ * want to keep parsing for information we don't need
*/
rs = makeNode(RawStmt);
- rs->stmt = (Node *)n;
+ rs->stmt = (Node *) n;
rs->stmt_location = 0;
rs->stmt_len = 0; /* might get changed later */
- return list_make1((Node *)rs);
+ return list_make1((Node *) rs);
}
+
/*
* from src/backend/commands/define.c
* Extract an int32 value from a DefElem.
@@ -243,7 +249,7 @@ defGetInt32(DefElem *def)
* same thing anyway, but notationally they're different).
*/
int
-base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, core_yyscan_t yyscanner)
+base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp, core_yyscan_t yyscanner)
{
base_yy_extra_type *yyextra = pg_yyget_extra(yyscanner);
int cur_token;
@@ -661,9 +667,9 @@ invalid_pair:
}
int
-minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE *llocp, core_yyscan_t yyscanner)
+minimal_base_yylex(YYSTYPE *lvalp, YYLTYPE * llocp, core_yyscan_t yyscanner)
{
- return base_yylex(lvalp, llocp, yyscanner);
+ return base_yylex(lvalp, llocp, yyscanner);
}
static int
diff --git a/src/parser/snprintf.c b/src/parser/snprintf.c
index 19e666b7c..c3564935e 100644
--- a/src/parser/snprintf.c
+++ b/src/parser/snprintf.c
@@ -188,7 +188,7 @@ typedef struct
FILE *stream; /* eventual output destination, or NULL */
int nchars; /* # chars sent to stream, or dropped */
bool failed; /* call is a failure; errno is set */
-} PrintfTarget;
+} PrintfTarget;
/*
* Info about the type and value of a formatting parameter. Note that we
@@ -204,7 +204,7 @@ typedef enum
ATYPE_LONGLONG,
ATYPE_DOUBLE,
ATYPE_CHARPTR
-} PrintfArgType;
+} PrintfArgType;
typedef union
{
@@ -213,11 +213,11 @@ typedef union
long long ll;
double d;
char *cptr;
-} PrintfArgValue;
+} PrintfArgValue;
-static void flushbuffer(PrintfTarget *target);
-static void dopr(PrintfTarget *target, const char *format, va_list args);
+static void flushbuffer(PrintfTarget * target);
+static void dopr(PrintfTarget * target, const char *format, va_list args);
/*
@@ -354,7 +354,7 @@ pg_printf(const char *fmt,...)
* buffer in any case. Call this only when target->stream is defined.
*/
static void
-flushbuffer(PrintfTarget *target)
+flushbuffer(PrintfTarget * target)
{
size_t nc = target->bufptr - target->bufstart;
@@ -376,25 +376,25 @@ flushbuffer(PrintfTarget *target)
static bool find_arguments(const char *format, va_list args,
- PrintfArgValue *argvalues);
+ PrintfArgValue * argvalues);
static void fmtstr(const char *value, int leftjust, int minlen, int maxwidth,
- int pointflag, PrintfTarget *target);
-static void fmtptr(const void *value, PrintfTarget *target);
+ int pointflag, PrintfTarget * target);
+static void fmtptr(const void *value, PrintfTarget * target);
static void fmtint(long long value, char type, int forcesign,
int leftjust, int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target);
-static void fmtchar(int value, int leftjust, int minlen, PrintfTarget *target);
+ PrintfTarget * target);
+static void fmtchar(int value, int leftjust, int minlen, PrintfTarget * target);
static void fmtfloat(double value, char type, int forcesign,
int leftjust, int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target);
-static void dostr(const char *str, int slen, PrintfTarget *target);
-static void dopr_outch(int c, PrintfTarget *target);
-static void dopr_outchmulti(int c, int slen, PrintfTarget *target);
+ PrintfTarget * target);
+static void dostr(const char *str, int slen, PrintfTarget * target);
+static void dopr_outch(int c, PrintfTarget * target);
+static void dopr_outchmulti(int c, int slen, PrintfTarget * target);
static int adjust_sign(int is_negative, int forcesign, int *signvalue);
static int compute_padlen(int minlen, int vallen, int leftjust);
static void leading_pad(int zpad, int signvalue, int *padlen,
- PrintfTarget *target);
-static void trailing_pad(int padlen, PrintfTarget *target);
+ PrintfTarget * target);
+static void trailing_pad(int padlen, PrintfTarget * target);
/*
* If strchrnul exists (it's a glibc-ism), it's a good bit faster than the
@@ -432,7 +432,7 @@ extern char *strchrnul(const char *s, int c);
* dopr(): the guts of *printf for all cases.
*/
static void
-dopr(PrintfTarget *target, const char *format, va_list args)
+dopr(PrintfTarget * target, const char *format, va_list args)
{
int save_errno = errno;
const char *first_pct = NULL;
@@ -806,7 +806,7 @@ bad_format:
*/
static bool
find_arguments(const char *format, va_list args,
- PrintfArgValue *argvalues)
+ PrintfArgValue * argvalues)
{
int ch;
bool afterstar;
@@ -1024,7 +1024,7 @@ nextch1:
static void
fmtstr(const char *value, int leftjust, int minlen, int maxwidth,
- int pointflag, PrintfTarget *target)
+ int pointflag, PrintfTarget * target)
{
int padlen,
vallen; /* amount to pad */
@@ -1052,7 +1052,7 @@ fmtstr(const char *value, int leftjust, int minlen, int maxwidth,
}
static void
-fmtptr(const void *value, PrintfTarget *target)
+fmtptr(const void *value, PrintfTarget * target)
{
int vallen;
char convert[64];
@@ -1068,7 +1068,7 @@ fmtptr(const void *value, PrintfTarget *target)
static void
fmtint(long long value, char type, int forcesign, int leftjust,
int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target)
+ PrintfTarget * target)
{
unsigned long long uvalue;
int base;
@@ -1177,7 +1177,7 @@ fmtint(long long value, char type, int forcesign, int leftjust,
}
static void
-fmtchar(int value, int leftjust, int minlen, PrintfTarget *target)
+fmtchar(int value, int leftjust, int minlen, PrintfTarget * target)
{
int padlen; /* amount to pad */
@@ -1197,7 +1197,7 @@ fmtchar(int value, int leftjust, int minlen, PrintfTarget *target)
static void
fmtfloat(double value, char type, int forcesign, int leftjust,
int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target)
+ PrintfTarget * target)
{
int signvalue = 0;
int prec;
@@ -1433,7 +1433,7 @@ fail:
static void
-dostr(const char *str, int slen, PrintfTarget *target)
+dostr(const char *str, int slen, PrintfTarget * target)
{
/* fast path for common case of slen == 1 */
if (slen == 1)
@@ -1470,7 +1470,7 @@ dostr(const char *str, int slen, PrintfTarget *target)
}
static void
-dopr_outch(int c, PrintfTarget *target)
+dopr_outch(int c, PrintfTarget * target)
{
if (target->bufend != NULL && target->bufptr >= target->bufend)
{
@@ -1486,7 +1486,7 @@ dopr_outch(int c, PrintfTarget *target)
}
static void
-dopr_outchmulti(int c, int slen, PrintfTarget *target)
+dopr_outchmulti(int c, int slen, PrintfTarget * target)
{
/* fast path for common case of slen == 1 */
if (slen == 1)
@@ -1551,7 +1551,7 @@ compute_padlen(int minlen, int vallen, int leftjust)
static void
-leading_pad(int zpad, int signvalue, int *padlen, PrintfTarget *target)
+leading_pad(int zpad, int signvalue, int *padlen, PrintfTarget * target)
{
int maxpad;
@@ -1587,7 +1587,7 @@ leading_pad(int zpad, int signvalue, int *padlen, PrintfTarget *target)
static void
-trailing_pad(int padlen, PrintfTarget *target)
+trailing_pad(int padlen, PrintfTarget * target)
{
if (padlen < 0)
dopr_outchmulti(' ', -padlen, target);
diff --git a/src/parser/wchar.c b/src/parser/wchar.c
index 9dbae197c..1e3a1b0ae 100644
--- a/src/parser/wchar.c
+++ b/src/parser/wchar.c
@@ -708,328 +708,328 @@ ucs_wcwidth(pg_wchar ucs)
{
/* sorted list of non-overlapping intervals of non-spacing characters */
static const struct mbinterval nonspacing[] = {
- {0x00AD, 0x00AD},
- {0x0300, 0x036F},
- {0x0483, 0x0489},
- {0x0591, 0x05BD},
- {0x05BF, 0x05BF},
- {0x05C1, 0x05C2},
- {0x05C4, 0x05C5},
- {0x05C7, 0x05C7},
- {0x0600, 0x0605},
- {0x0610, 0x061A},
- {0x061C, 0x061C},
- {0x064B, 0x065F},
- {0x0670, 0x0670},
- {0x06D6, 0x06DD},
- {0x06DF, 0x06E4},
- {0x06E7, 0x06E8},
- {0x06EA, 0x06ED},
- {0x070F, 0x070F},
- {0x0711, 0x0711},
- {0x0730, 0x074A},
- {0x07A6, 0x07B0},
- {0x07EB, 0x07F3},
- {0x07FD, 0x07FD},
- {0x0816, 0x0819},
- {0x081B, 0x0823},
- {0x0825, 0x0827},
- {0x0829, 0x082D},
- {0x0859, 0x085B},
- {0x0890, 0x089F},
- {0x08CA, 0x0902},
- {0x093A, 0x093A},
- {0x093C, 0x093C},
- {0x0941, 0x0948},
- {0x094D, 0x094D},
- {0x0951, 0x0957},
- {0x0962, 0x0963},
- {0x0981, 0x0981},
- {0x09BC, 0x09BC},
- {0x09C1, 0x09C4},
- {0x09CD, 0x09CD},
- {0x09E2, 0x09E3},
- {0x09FE, 0x0A02},
- {0x0A3C, 0x0A3C},
- {0x0A41, 0x0A51},
- {0x0A70, 0x0A71},
- {0x0A75, 0x0A75},
- {0x0A81, 0x0A82},
- {0x0ABC, 0x0ABC},
- {0x0AC1, 0x0AC8},
- {0x0ACD, 0x0ACD},
- {0x0AE2, 0x0AE3},
- {0x0AFA, 0x0B01},
- {0x0B3C, 0x0B3C},
- {0x0B3F, 0x0B3F},
- {0x0B41, 0x0B44},
- {0x0B4D, 0x0B56},
- {0x0B62, 0x0B63},
- {0x0B82, 0x0B82},
- {0x0BC0, 0x0BC0},
- {0x0BCD, 0x0BCD},
- {0x0C00, 0x0C00},
- {0x0C04, 0x0C04},
- {0x0C3C, 0x0C3C},
- {0x0C3E, 0x0C40},
- {0x0C46, 0x0C56},
- {0x0C62, 0x0C63},
- {0x0C81, 0x0C81},
- {0x0CBC, 0x0CBC},
- {0x0CBF, 0x0CBF},
- {0x0CC6, 0x0CC6},
- {0x0CCC, 0x0CCD},
- {0x0CE2, 0x0CE3},
- {0x0D00, 0x0D01},
- {0x0D3B, 0x0D3C},
- {0x0D41, 0x0D44},
- {0x0D4D, 0x0D4D},
- {0x0D62, 0x0D63},
- {0x0D81, 0x0D81},
- {0x0DCA, 0x0DCA},
- {0x0DD2, 0x0DD6},
- {0x0E31, 0x0E31},
- {0x0E34, 0x0E3A},
- {0x0E47, 0x0E4E},
- {0x0EB1, 0x0EB1},
- {0x0EB4, 0x0EBC},
- {0x0EC8, 0x0ECE},
- {0x0F18, 0x0F19},
- {0x0F35, 0x0F35},
- {0x0F37, 0x0F37},
- {0x0F39, 0x0F39},
- {0x0F71, 0x0F7E},
- {0x0F80, 0x0F84},
- {0x0F86, 0x0F87},
- {0x0F8D, 0x0FBC},
- {0x0FC6, 0x0FC6},
- {0x102D, 0x1030},
- {0x1032, 0x1037},
- {0x1039, 0x103A},
- {0x103D, 0x103E},
- {0x1058, 0x1059},
- {0x105E, 0x1060},
- {0x1071, 0x1074},
- {0x1082, 0x1082},
- {0x1085, 0x1086},
- {0x108D, 0x108D},
- {0x109D, 0x109D},
- {0x135D, 0x135F},
- {0x1712, 0x1714},
- {0x1732, 0x1733},
- {0x1752, 0x1753},
- {0x1772, 0x1773},
- {0x17B4, 0x17B5},
- {0x17B7, 0x17BD},
- {0x17C6, 0x17C6},
- {0x17C9, 0x17D3},
- {0x17DD, 0x17DD},
- {0x180B, 0x180F},
- {0x1885, 0x1886},
- {0x18A9, 0x18A9},
- {0x1920, 0x1922},
- {0x1927, 0x1928},
- {0x1932, 0x1932},
- {0x1939, 0x193B},
- {0x1A17, 0x1A18},
- {0x1A1B, 0x1A1B},
- {0x1A56, 0x1A56},
- {0x1A58, 0x1A60},
- {0x1A62, 0x1A62},
- {0x1A65, 0x1A6C},
- {0x1A73, 0x1A7F},
- {0x1AB0, 0x1B03},
- {0x1B34, 0x1B34},
- {0x1B36, 0x1B3A},
- {0x1B3C, 0x1B3C},
- {0x1B42, 0x1B42},
- {0x1B6B, 0x1B73},
- {0x1B80, 0x1B81},
- {0x1BA2, 0x1BA5},
- {0x1BA8, 0x1BA9},
- {0x1BAB, 0x1BAD},
- {0x1BE6, 0x1BE6},
- {0x1BE8, 0x1BE9},
- {0x1BED, 0x1BED},
- {0x1BEF, 0x1BF1},
- {0x1C2C, 0x1C33},
- {0x1C36, 0x1C37},
- {0x1CD0, 0x1CD2},
- {0x1CD4, 0x1CE0},
- {0x1CE2, 0x1CE8},
- {0x1CED, 0x1CED},
- {0x1CF4, 0x1CF4},
- {0x1CF8, 0x1CF9},
- {0x1DC0, 0x1DFF},
- {0x200B, 0x200F},
- {0x202A, 0x202E},
- {0x2060, 0x206F},
- {0x20D0, 0x20F0},
- {0x2CEF, 0x2CF1},
- {0x2D7F, 0x2D7F},
- {0x2DE0, 0x2DFF},
- {0x302A, 0x302D},
- {0x3099, 0x309A},
- {0xA66F, 0xA672},
- {0xA674, 0xA67D},
- {0xA69E, 0xA69F},
- {0xA6F0, 0xA6F1},
- {0xA802, 0xA802},
- {0xA806, 0xA806},
- {0xA80B, 0xA80B},
- {0xA825, 0xA826},
- {0xA82C, 0xA82C},
- {0xA8C4, 0xA8C5},
- {0xA8E0, 0xA8F1},
- {0xA8FF, 0xA8FF},
- {0xA926, 0xA92D},
- {0xA947, 0xA951},
- {0xA980, 0xA982},
- {0xA9B3, 0xA9B3},
- {0xA9B6, 0xA9B9},
- {0xA9BC, 0xA9BD},
- {0xA9E5, 0xA9E5},
- {0xAA29, 0xAA2E},
- {0xAA31, 0xAA32},
- {0xAA35, 0xAA36},
- {0xAA43, 0xAA43},
- {0xAA4C, 0xAA4C},
- {0xAA7C, 0xAA7C},
- {0xAAB0, 0xAAB0},
- {0xAAB2, 0xAAB4},
- {0xAAB7, 0xAAB8},
- {0xAABE, 0xAABF},
- {0xAAC1, 0xAAC1},
- {0xAAEC, 0xAAED},
- {0xAAF6, 0xAAF6},
- {0xABE5, 0xABE5},
- {0xABE8, 0xABE8},
- {0xABED, 0xABED},
- {0xFB1E, 0xFB1E},
- {0xFE00, 0xFE0F},
- {0xFE20, 0xFE2F},
- {0xFEFF, 0xFEFF},
- {0xFFF9, 0xFFFB},
- {0x101FD, 0x101FD},
- {0x102E0, 0x102E0},
- {0x10376, 0x1037A},
- {0x10A01, 0x10A0F},
- {0x10A38, 0x10A3F},
- {0x10AE5, 0x10AE6},
- {0x10D24, 0x10D27},
- {0x10EAB, 0x10EAC},
- {0x10EFD, 0x10EFF},
- {0x10F46, 0x10F50},
- {0x10F82, 0x10F85},
- {0x11001, 0x11001},
- {0x11038, 0x11046},
- {0x11070, 0x11070},
- {0x11073, 0x11074},
- {0x1107F, 0x11081},
- {0x110B3, 0x110B6},
- {0x110B9, 0x110BA},
- {0x110BD, 0x110BD},
- {0x110C2, 0x110CD},
- {0x11100, 0x11102},
- {0x11127, 0x1112B},
- {0x1112D, 0x11134},
- {0x11173, 0x11173},
- {0x11180, 0x11181},
- {0x111B6, 0x111BE},
- {0x111C9, 0x111CC},
- {0x111CF, 0x111CF},
- {0x1122F, 0x11231},
- {0x11234, 0x11234},
- {0x11236, 0x11237},
- {0x1123E, 0x1123E},
- {0x11241, 0x11241},
- {0x112DF, 0x112DF},
- {0x112E3, 0x112EA},
- {0x11300, 0x11301},
- {0x1133B, 0x1133C},
- {0x11340, 0x11340},
- {0x11366, 0x11374},
- {0x11438, 0x1143F},
- {0x11442, 0x11444},
- {0x11446, 0x11446},
- {0x1145E, 0x1145E},
- {0x114B3, 0x114B8},
- {0x114BA, 0x114BA},
- {0x114BF, 0x114C0},
- {0x114C2, 0x114C3},
- {0x115B2, 0x115B5},
- {0x115BC, 0x115BD},
- {0x115BF, 0x115C0},
- {0x115DC, 0x115DD},
- {0x11633, 0x1163A},
- {0x1163D, 0x1163D},
- {0x1163F, 0x11640},
- {0x116AB, 0x116AB},
- {0x116AD, 0x116AD},
- {0x116B0, 0x116B5},
- {0x116B7, 0x116B7},
- {0x1171D, 0x1171F},
- {0x11722, 0x11725},
- {0x11727, 0x1172B},
- {0x1182F, 0x11837},
- {0x11839, 0x1183A},
- {0x1193B, 0x1193C},
- {0x1193E, 0x1193E},
- {0x11943, 0x11943},
- {0x119D4, 0x119DB},
- {0x119E0, 0x119E0},
- {0x11A01, 0x11A0A},
- {0x11A33, 0x11A38},
- {0x11A3B, 0x11A3E},
- {0x11A47, 0x11A47},
- {0x11A51, 0x11A56},
- {0x11A59, 0x11A5B},
- {0x11A8A, 0x11A96},
- {0x11A98, 0x11A99},
- {0x11C30, 0x11C3D},
- {0x11C3F, 0x11C3F},
- {0x11C92, 0x11CA7},
- {0x11CAA, 0x11CB0},
- {0x11CB2, 0x11CB3},
- {0x11CB5, 0x11CB6},
- {0x11D31, 0x11D45},
- {0x11D47, 0x11D47},
- {0x11D90, 0x11D91},
- {0x11D95, 0x11D95},
- {0x11D97, 0x11D97},
- {0x11EF3, 0x11EF4},
- {0x11F00, 0x11F01},
- {0x11F36, 0x11F3A},
- {0x11F40, 0x11F40},
- {0x11F42, 0x11F42},
- {0x13430, 0x13440},
- {0x13447, 0x13455},
- {0x16AF0, 0x16AF4},
- {0x16B30, 0x16B36},
- {0x16F4F, 0x16F4F},
- {0x16F8F, 0x16F92},
- {0x16FE4, 0x16FE4},
- {0x1BC9D, 0x1BC9E},
- {0x1BCA0, 0x1CF46},
- {0x1D167, 0x1D169},
- {0x1D173, 0x1D182},
- {0x1D185, 0x1D18B},
- {0x1D1AA, 0x1D1AD},
- {0x1D242, 0x1D244},
- {0x1DA00, 0x1DA36},
- {0x1DA3B, 0x1DA6C},
- {0x1DA75, 0x1DA75},
- {0x1DA84, 0x1DA84},
- {0x1DA9B, 0x1DAAF},
- {0x1E000, 0x1E02A},
- {0x1E08F, 0x1E08F},
- {0x1E130, 0x1E136},
- {0x1E2AE, 0x1E2AE},
- {0x1E2EC, 0x1E2EF},
- {0x1E4EC, 0x1E4EF},
- {0x1E8D0, 0x1E8D6},
- {0x1E944, 0x1E94A},
- {0xE0001, 0xE01EF},
+ {0x00AD, 0x00AD},
+ {0x0300, 0x036F},
+ {0x0483, 0x0489},
+ {0x0591, 0x05BD},
+ {0x05BF, 0x05BF},
+ {0x05C1, 0x05C2},
+ {0x05C4, 0x05C5},
+ {0x05C7, 0x05C7},
+ {0x0600, 0x0605},
+ {0x0610, 0x061A},
+ {0x061C, 0x061C},
+ {0x064B, 0x065F},
+ {0x0670, 0x0670},
+ {0x06D6, 0x06DD},
+ {0x06DF, 0x06E4},
+ {0x06E7, 0x06E8},
+ {0x06EA, 0x06ED},
+ {0x070F, 0x070F},
+ {0x0711, 0x0711},
+ {0x0730, 0x074A},
+ {0x07A6, 0x07B0},
+ {0x07EB, 0x07F3},
+ {0x07FD, 0x07FD},
+ {0x0816, 0x0819},
+ {0x081B, 0x0823},
+ {0x0825, 0x0827},
+ {0x0829, 0x082D},
+ {0x0859, 0x085B},
+ {0x0890, 0x089F},
+ {0x08CA, 0x0902},
+ {0x093A, 0x093A},
+ {0x093C, 0x093C},
+ {0x0941, 0x0948},
+ {0x094D, 0x094D},
+ {0x0951, 0x0957},
+ {0x0962, 0x0963},
+ {0x0981, 0x0981},
+ {0x09BC, 0x09BC},
+ {0x09C1, 0x09C4},
+ {0x09CD, 0x09CD},
+ {0x09E2, 0x09E3},
+ {0x09FE, 0x0A02},
+ {0x0A3C, 0x0A3C},
+ {0x0A41, 0x0A51},
+ {0x0A70, 0x0A71},
+ {0x0A75, 0x0A75},
+ {0x0A81, 0x0A82},
+ {0x0ABC, 0x0ABC},
+ {0x0AC1, 0x0AC8},
+ {0x0ACD, 0x0ACD},
+ {0x0AE2, 0x0AE3},
+ {0x0AFA, 0x0B01},
+ {0x0B3C, 0x0B3C},
+ {0x0B3F, 0x0B3F},
+ {0x0B41, 0x0B44},
+ {0x0B4D, 0x0B56},
+ {0x0B62, 0x0B63},
+ {0x0B82, 0x0B82},
+ {0x0BC0, 0x0BC0},
+ {0x0BCD, 0x0BCD},
+ {0x0C00, 0x0C00},
+ {0x0C04, 0x0C04},
+ {0x0C3C, 0x0C3C},
+ {0x0C3E, 0x0C40},
+ {0x0C46, 0x0C56},
+ {0x0C62, 0x0C63},
+ {0x0C81, 0x0C81},
+ {0x0CBC, 0x0CBC},
+ {0x0CBF, 0x0CBF},
+ {0x0CC6, 0x0CC6},
+ {0x0CCC, 0x0CCD},
+ {0x0CE2, 0x0CE3},
+ {0x0D00, 0x0D01},
+ {0x0D3B, 0x0D3C},
+ {0x0D41, 0x0D44},
+ {0x0D4D, 0x0D4D},
+ {0x0D62, 0x0D63},
+ {0x0D81, 0x0D81},
+ {0x0DCA, 0x0DCA},
+ {0x0DD2, 0x0DD6},
+ {0x0E31, 0x0E31},
+ {0x0E34, 0x0E3A},
+ {0x0E47, 0x0E4E},
+ {0x0EB1, 0x0EB1},
+ {0x0EB4, 0x0EBC},
+ {0x0EC8, 0x0ECE},
+ {0x0F18, 0x0F19},
+ {0x0F35, 0x0F35},
+ {0x0F37, 0x0F37},
+ {0x0F39, 0x0F39},
+ {0x0F71, 0x0F7E},
+ {0x0F80, 0x0F84},
+ {0x0F86, 0x0F87},
+ {0x0F8D, 0x0FBC},
+ {0x0FC6, 0x0FC6},
+ {0x102D, 0x1030},
+ {0x1032, 0x1037},
+ {0x1039, 0x103A},
+ {0x103D, 0x103E},
+ {0x1058, 0x1059},
+ {0x105E, 0x1060},
+ {0x1071, 0x1074},
+ {0x1082, 0x1082},
+ {0x1085, 0x1086},
+ {0x108D, 0x108D},
+ {0x109D, 0x109D},
+ {0x135D, 0x135F},
+ {0x1712, 0x1714},
+ {0x1732, 0x1733},
+ {0x1752, 0x1753},
+ {0x1772, 0x1773},
+ {0x17B4, 0x17B5},
+ {0x17B7, 0x17BD},
+ {0x17C6, 0x17C6},
+ {0x17C9, 0x17D3},
+ {0x17DD, 0x17DD},
+ {0x180B, 0x180F},
+ {0x1885, 0x1886},
+ {0x18A9, 0x18A9},
+ {0x1920, 0x1922},
+ {0x1927, 0x1928},
+ {0x1932, 0x1932},
+ {0x1939, 0x193B},
+ {0x1A17, 0x1A18},
+ {0x1A1B, 0x1A1B},
+ {0x1A56, 0x1A56},
+ {0x1A58, 0x1A60},
+ {0x1A62, 0x1A62},
+ {0x1A65, 0x1A6C},
+ {0x1A73, 0x1A7F},
+ {0x1AB0, 0x1B03},
+ {0x1B34, 0x1B34},
+ {0x1B36, 0x1B3A},
+ {0x1B3C, 0x1B3C},
+ {0x1B42, 0x1B42},
+ {0x1B6B, 0x1B73},
+ {0x1B80, 0x1B81},
+ {0x1BA2, 0x1BA5},
+ {0x1BA8, 0x1BA9},
+ {0x1BAB, 0x1BAD},
+ {0x1BE6, 0x1BE6},
+ {0x1BE8, 0x1BE9},
+ {0x1BED, 0x1BED},
+ {0x1BEF, 0x1BF1},
+ {0x1C2C, 0x1C33},
+ {0x1C36, 0x1C37},
+ {0x1CD0, 0x1CD2},
+ {0x1CD4, 0x1CE0},
+ {0x1CE2, 0x1CE8},
+ {0x1CED, 0x1CED},
+ {0x1CF4, 0x1CF4},
+ {0x1CF8, 0x1CF9},
+ {0x1DC0, 0x1DFF},
+ {0x200B, 0x200F},
+ {0x202A, 0x202E},
+ {0x2060, 0x206F},
+ {0x20D0, 0x20F0},
+ {0x2CEF, 0x2CF1},
+ {0x2D7F, 0x2D7F},
+ {0x2DE0, 0x2DFF},
+ {0x302A, 0x302D},
+ {0x3099, 0x309A},
+ {0xA66F, 0xA672},
+ {0xA674, 0xA67D},
+ {0xA69E, 0xA69F},
+ {0xA6F0, 0xA6F1},
+ {0xA802, 0xA802},
+ {0xA806, 0xA806},
+ {0xA80B, 0xA80B},
+ {0xA825, 0xA826},
+ {0xA82C, 0xA82C},
+ {0xA8C4, 0xA8C5},
+ {0xA8E0, 0xA8F1},
+ {0xA8FF, 0xA8FF},
+ {0xA926, 0xA92D},
+ {0xA947, 0xA951},
+ {0xA980, 0xA982},
+ {0xA9B3, 0xA9B3},
+ {0xA9B6, 0xA9B9},
+ {0xA9BC, 0xA9BD},
+ {0xA9E5, 0xA9E5},
+ {0xAA29, 0xAA2E},
+ {0xAA31, 0xAA32},
+ {0xAA35, 0xAA36},
+ {0xAA43, 0xAA43},
+ {0xAA4C, 0xAA4C},
+ {0xAA7C, 0xAA7C},
+ {0xAAB0, 0xAAB0},
+ {0xAAB2, 0xAAB4},
+ {0xAAB7, 0xAAB8},
+ {0xAABE, 0xAABF},
+ {0xAAC1, 0xAAC1},
+ {0xAAEC, 0xAAED},
+ {0xAAF6, 0xAAF6},
+ {0xABE5, 0xABE5},
+ {0xABE8, 0xABE8},
+ {0xABED, 0xABED},
+ {0xFB1E, 0xFB1E},
+ {0xFE00, 0xFE0F},
+ {0xFE20, 0xFE2F},
+ {0xFEFF, 0xFEFF},
+ {0xFFF9, 0xFFFB},
+ {0x101FD, 0x101FD},
+ {0x102E0, 0x102E0},
+ {0x10376, 0x1037A},
+ {0x10A01, 0x10A0F},
+ {0x10A38, 0x10A3F},
+ {0x10AE5, 0x10AE6},
+ {0x10D24, 0x10D27},
+ {0x10EAB, 0x10EAC},
+ {0x10EFD, 0x10EFF},
+ {0x10F46, 0x10F50},
+ {0x10F82, 0x10F85},
+ {0x11001, 0x11001},
+ {0x11038, 0x11046},
+ {0x11070, 0x11070},
+ {0x11073, 0x11074},
+ {0x1107F, 0x11081},
+ {0x110B3, 0x110B6},
+ {0x110B9, 0x110BA},
+ {0x110BD, 0x110BD},
+ {0x110C2, 0x110CD},
+ {0x11100, 0x11102},
+ {0x11127, 0x1112B},
+ {0x1112D, 0x11134},
+ {0x11173, 0x11173},
+ {0x11180, 0x11181},
+ {0x111B6, 0x111BE},
+ {0x111C9, 0x111CC},
+ {0x111CF, 0x111CF},
+ {0x1122F, 0x11231},
+ {0x11234, 0x11234},
+ {0x11236, 0x11237},
+ {0x1123E, 0x1123E},
+ {0x11241, 0x11241},
+ {0x112DF, 0x112DF},
+ {0x112E3, 0x112EA},
+ {0x11300, 0x11301},
+ {0x1133B, 0x1133C},
+ {0x11340, 0x11340},
+ {0x11366, 0x11374},
+ {0x11438, 0x1143F},
+ {0x11442, 0x11444},
+ {0x11446, 0x11446},
+ {0x1145E, 0x1145E},
+ {0x114B3, 0x114B8},
+ {0x114BA, 0x114BA},
+ {0x114BF, 0x114C0},
+ {0x114C2, 0x114C3},
+ {0x115B2, 0x115B5},
+ {0x115BC, 0x115BD},
+ {0x115BF, 0x115C0},
+ {0x115DC, 0x115DD},
+ {0x11633, 0x1163A},
+ {0x1163D, 0x1163D},
+ {0x1163F, 0x11640},
+ {0x116AB, 0x116AB},
+ {0x116AD, 0x116AD},
+ {0x116B0, 0x116B5},
+ {0x116B7, 0x116B7},
+ {0x1171D, 0x1171F},
+ {0x11722, 0x11725},
+ {0x11727, 0x1172B},
+ {0x1182F, 0x11837},
+ {0x11839, 0x1183A},
+ {0x1193B, 0x1193C},
+ {0x1193E, 0x1193E},
+ {0x11943, 0x11943},
+ {0x119D4, 0x119DB},
+ {0x119E0, 0x119E0},
+ {0x11A01, 0x11A0A},
+ {0x11A33, 0x11A38},
+ {0x11A3B, 0x11A3E},
+ {0x11A47, 0x11A47},
+ {0x11A51, 0x11A56},
+ {0x11A59, 0x11A5B},
+ {0x11A8A, 0x11A96},
+ {0x11A98, 0x11A99},
+ {0x11C30, 0x11C3D},
+ {0x11C3F, 0x11C3F},
+ {0x11C92, 0x11CA7},
+ {0x11CAA, 0x11CB0},
+ {0x11CB2, 0x11CB3},
+ {0x11CB5, 0x11CB6},
+ {0x11D31, 0x11D45},
+ {0x11D47, 0x11D47},
+ {0x11D90, 0x11D91},
+ {0x11D95, 0x11D95},
+ {0x11D97, 0x11D97},
+ {0x11EF3, 0x11EF4},
+ {0x11F00, 0x11F01},
+ {0x11F36, 0x11F3A},
+ {0x11F40, 0x11F40},
+ {0x11F42, 0x11F42},
+ {0x13430, 0x13440},
+ {0x13447, 0x13455},
+ {0x16AF0, 0x16AF4},
+ {0x16B30, 0x16B36},
+ {0x16F4F, 0x16F4F},
+ {0x16F8F, 0x16F92},
+ {0x16FE4, 0x16FE4},
+ {0x1BC9D, 0x1BC9E},
+ {0x1BCA0, 0x1CF46},
+ {0x1D167, 0x1D169},
+ {0x1D173, 0x1D182},
+ {0x1D185, 0x1D18B},
+ {0x1D1AA, 0x1D1AD},
+ {0x1D242, 0x1D244},
+ {0x1DA00, 0x1DA36},
+ {0x1DA3B, 0x1DA6C},
+ {0x1DA75, 0x1DA75},
+ {0x1DA84, 0x1DA84},
+ {0x1DA9B, 0x1DAAF},
+ {0x1E000, 0x1E02A},
+ {0x1E08F, 0x1E08F},
+ {0x1E130, 0x1E136},
+ {0x1E2AE, 0x1E2AE},
+ {0x1E2EC, 0x1E2EF},
+ {0x1E4EC, 0x1E4EF},
+ {0x1E8D0, 0x1E8D6},
+ {0x1E944, 0x1E94A},
+ {0xE0001, 0xE01EF},
};
static const struct mbinterval east_asian_fw[] = {
@@ -2437,7 +2437,7 @@ static bool
pg_generic_charinc(unsigned char *charptr, int len)
{
unsigned char *lastbyte = charptr + len - 1;
- mbchar_verifier mbverify;
+ mbchar_verifier mbverify;
/* We can just invoke the character verifier directly. */
mbverify = pg_wchar_table[GetDatabaseEncoding()].mbverifychar;
@@ -2892,7 +2892,7 @@ pg_unicode_to_server(pg_wchar c, unsigned char *s)
int
pg_verify_mbstr_len(int encoding, const char *mbstr, int len, bool noError)
{
- mbchar_verifier mbverify;
+ mbchar_verifier mbverify;
int mb_len;
Assert(PG_VALID_ENCODING(encoding));
diff --git a/src/pcp_con/pcp_child.c b/src/pcp_con/pcp_child.c
index f06c9ed1e..e07c8897e 100644
--- a/src/pcp_con/pcp_child.c
+++ b/src/pcp_con/pcp_child.c
@@ -86,7 +86,7 @@ static void start_pcp_command_processor_process(int port, int *fds);
static void pcp_child_will_die(int code, Datum arg);
static void pcp_kill_all_children(int sig);
static void reaper(void);
-static bool pcp_unix_fds_not_isset(int *fds, int num_pcp_fds, fd_set* opt);
+static bool pcp_unix_fds_not_isset(int *fds, int num_pcp_fds, fd_set *opt);
#define CHECK_RESTART_REQUEST \
@@ -191,7 +191,7 @@ pcp_do_accept(int *fds)
int rfds;
int fd = 0;
int afd;
- int *walk;
+ int *walk;
int nsocks = 0;
SockAddr saddr;
@@ -256,9 +256,11 @@ pcp_do_accept(int *fds)
* Set no delay if AF_INET socket. Not sure if this is really necessary
* but PostgreSQL does this.
*/
- if (pcp_unix_fds_not_isset(fds, pool_config->num_pcp_socket_directories, &rmask)) /* fds are UNIX domain socket for pcp process */
+ if (pcp_unix_fds_not_isset(fds, pool_config->num_pcp_socket_directories, &rmask)) /* fds are UNIX domain
+ * socket for pcp
+ * process */
{
- int on;
+ int on;
on = 1;
if (setsockopt(afd, IPPROTO_TCP, TCP_NODELAY,
@@ -276,17 +278,18 @@ pcp_do_accept(int *fds)
}
static bool
-pcp_unix_fds_not_isset(int* fds, int num_pcp_fds, fd_set* opt)
+pcp_unix_fds_not_isset(int *fds, int num_pcp_fds, fd_set *opt)
{
- int i;
- for (i = 0; i < num_pcp_fds; i++)
- {
- if (!FD_ISSET(fds[i], opt))
- continue;
-
- return false;
- }
- return true;
+ int i;
+
+ for (i = 0; i < num_pcp_fds; i++)
+ {
+ if (!FD_ISSET(fds[i], opt))
+ continue;
+
+ return false;
+ }
+ return true;
}
/*
@@ -296,7 +299,7 @@ static void
start_pcp_command_processor_process(int port, int *fds)
{
pid_t pid = fork();
- int *walk;
+ int *walk;
if (pid == 0) /* child */
{
@@ -428,7 +431,7 @@ pcp_exit_handler(int sig)
foreach(lc, pcp_worker_children)
{
- int pid;
+ int pid;
do
{
diff --git a/src/pcp_con/pcp_worker.c b/src/pcp_con/pcp_worker.c
index a5b88a898..a9d02e011 100644
--- a/src/pcp_con/pcp_worker.c
+++ b/src/pcp_con/pcp_worker.c
@@ -71,34 +71,34 @@ static RETSIGTYPE wakeup_handler_child(int sig);
static void unset_nonblock(int fd);
static int user_authenticate(char *buf, char *passwd_file, char *salt, int salt_len);
-static void process_authentication(PCP_CONNECTION * frontend, char *buf, char *salt, int *random_salt);
-static void send_md5salt(PCP_CONNECTION * frontend, char *salt);
+static void process_authentication(PCP_CONNECTION *frontend, char *buf, char *salt, int *random_salt);
+static void send_md5salt(PCP_CONNECTION *frontend, char *salt);
static void pcp_process_command(char tos, char *buf, int buf_len);
static int pool_detach_node(int node_id, bool gracefully, bool switchover);
static int pool_promote_node(int node_id, bool gracefully);
-static void inform_process_count(PCP_CONNECTION * frontend);
-static void inform_process_info(PCP_CONNECTION * frontend, char *buf);
-static void inform_watchdog_info(PCP_CONNECTION * frontend, char *buf);
-static void inform_node_info(PCP_CONNECTION * frontend, char *buf);
-static void inform_node_count(PCP_CONNECTION * frontend);
-static void process_reload_config(PCP_CONNECTION * frontend,char scope);
-static void process_log_rotate(PCP_CONNECTION * frontend,char scope);
+static void inform_process_count(PCP_CONNECTION *frontend);
+static void inform_process_info(PCP_CONNECTION *frontend, char *buf);
+static void inform_watchdog_info(PCP_CONNECTION *frontend, char *buf);
+static void inform_node_info(PCP_CONNECTION *frontend, char *buf);
+static void inform_node_count(PCP_CONNECTION *frontend);
+static void process_reload_config(PCP_CONNECTION *frontend, char scope);
+static void process_log_rotate(PCP_CONNECTION *frontend, char scope);
static void inform_health_check_stats(PCP_CONNECTION *frontend, char *buf);
-static void process_detach_node(PCP_CONNECTION * frontend, char *buf, char tos);
-static void process_attach_node(PCP_CONNECTION * frontend, char *buf);
-static void process_recovery_request(PCP_CONNECTION * frontend, char *buf);
-static void process_status_request(PCP_CONNECTION * frontend);
-static void process_promote_node(PCP_CONNECTION * frontend, char *buf, char tos);
-static void process_shutdown_request(PCP_CONNECTION * frontend, char mode, char tos);
-static void process_set_configuration_parameter(PCP_CONNECTION * frontend, char *buf, int len);
-static void process_invalidate_query_cache(PCP_CONNECTION * frontend);
+static void process_detach_node(PCP_CONNECTION *frontend, char *buf, char tos);
+static void process_attach_node(PCP_CONNECTION *frontend, char *buf);
+static void process_recovery_request(PCP_CONNECTION *frontend, char *buf);
+static void process_status_request(PCP_CONNECTION *frontend);
+static void process_promote_node(PCP_CONNECTION *frontend, char *buf, char tos);
+static void process_shutdown_request(PCP_CONNECTION *frontend, char mode, char tos);
+static void process_set_configuration_parameter(PCP_CONNECTION *frontend, char *buf, int len);
+static void process_invalidate_query_cache(PCP_CONNECTION *frontend);
static void pcp_worker_will_go_down(int code, Datum arg);
-static void do_pcp_flush(PCP_CONNECTION * frontend);
-static void do_pcp_read(PCP_CONNECTION * pc, void *buf, int len);
+static void do_pcp_flush(PCP_CONNECTION *frontend);
+static void do_pcp_read(PCP_CONNECTION *pc, void *buf, int len);
/*
* main entry pont of pcp worker child process
@@ -167,7 +167,7 @@ pcp_worker_main(int port)
for (;;)
{
- char *buf = NULL;
+ char *buf = NULL;
MemoryContextSwitchTo(PCPMemoryContext);
MemoryContextResetAndDeleteChildren(PCPMemoryContext);
@@ -229,7 +229,10 @@ pcp_process_command(char tos, char *buf, int buf_len)
/* The request is recovery or pcp shutdown request? */
if (tos == 'O' || tos == 'T')
{
- /* Prevent those pcp requests while processing failover/failback request */
+ /*
+ * Prevent those pcp requests while processing failover/failback
+ * request
+ */
if (Req_info->switching)
{
if (Req_info->request_queue_tail != Req_info->request_queue_head)
@@ -343,7 +346,7 @@ pcp_process_command(char tos, char *buf, int buf_len)
inform_watchdog_info(pcp_frontend, buf);
break;
- case 'Z': /*reload config file */
+ case 'Z': /* reload config file */
set_ps_display("PCP: processing reload config request", false);
process_reload_config(pcp_frontend, buf[0]);
break;
@@ -534,7 +537,7 @@ user_authenticate(char *buf, char *passwd_file, char *salt, int salt_len)
static int
pool_detach_node(int node_id, bool gracefully, bool switchover)
{
- int flag = 0;
+ int flag = 0;
if (switchover)
flag = REQ_DETAIL_PROMOTE;
@@ -641,7 +644,7 @@ pool_promote_node(int node_id, bool gracefully)
}
static void
-inform_process_count(PCP_CONNECTION * frontend)
+inform_process_count(PCP_CONNECTION *frontend)
{
int wsize;
int process_count;
@@ -654,7 +657,8 @@ inform_process_count(PCP_CONNECTION * frontend)
process_list = pool_get_process_list(&process_count);
- mesg = (char *) palloc(8 * process_count); /* PID is at most 7 characters long */
+ mesg = (char *) palloc(8 * process_count); /* PID is at most 7 characters
+ * long */
snprintf(process_count_str, sizeof(process_count_str), "%d", process_count);
@@ -690,13 +694,13 @@ inform_process_count(PCP_CONNECTION * frontend)
* pcp_process_info
*/
static void
-inform_process_info(PCP_CONNECTION * frontend, char *buf)
+inform_process_info(PCP_CONNECTION *frontend, char *buf)
{
int proc_id;
int wsize;
int num_proc = pool_config->num_init_children;
int i;
- int *offsets;
+ int *offsets;
int n;
POOL_REPORT_POOLS *pools;
@@ -756,7 +760,7 @@ inform_process_info(PCP_CONNECTION * frontend, char *buf)
wsize = 0;
for (j = 0; j < n; j++)
{
- wsize += strlen((char *)&pools[i] + offsets[j]) + 1;
+ wsize += strlen((char *) &pools[i] + offsets[j]) + 1;
}
wsize += sizeof(code) + sizeof(int);
wsize = htonl(wsize);
@@ -769,7 +773,7 @@ inform_process_info(PCP_CONNECTION * frontend, char *buf)
/* send each process info data to frontend */
for (j = 0; j < n; j++)
{
- pcp_write(frontend, (char *)&pools[i] + offsets[j], strlen((char *)&pools[i] + offsets[j]) + 1);
+ pcp_write(frontend, (char *) &pools[i] + offsets[j], strlen((char *) &pools[i] + offsets[j]) + 1);
}
do_pcp_flush(frontend);
@@ -790,7 +794,7 @@ inform_process_info(PCP_CONNECTION * frontend, char *buf)
}
static void
-inform_watchdog_info(PCP_CONNECTION * frontend, char *buf)
+inform_watchdog_info(PCP_CONNECTION *frontend, char *buf)
{
int wd_index;
int json_data_len;
@@ -835,7 +839,7 @@ inform_watchdog_info(PCP_CONNECTION * frontend, char *buf)
}
static void
-inform_node_info(PCP_CONNECTION * frontend, char *buf)
+inform_node_info(PCP_CONNECTION *frontend, char *buf)
{
POOL_REPORT_NODES *nodes;
int nrows;
@@ -882,7 +886,7 @@ inform_node_info(PCP_CONNECTION * frontend, char *buf)
do_pcp_flush(frontend);
/* Second, send process information for all connection_info */
- for (i = 0; i < NUM_BACKENDS ; i++)
+ for (i = 0; i < NUM_BACKENDS; i++)
{
char port_str[6];
char status[2];
@@ -904,7 +908,7 @@ inform_node_info(PCP_CONNECTION * frontend, char *buf)
if (bi == NULL)
ereport(ERROR,
(errmsg("informing node info failed"),
- errdetail("invalid node ID")));
+ errdetail("invalid node ID")));
snprintf(port_str, sizeof(port_str), "%d", bi->backend_port);
snprintf(status, sizeof(status), "%d", bi->backend_status);
@@ -995,14 +999,14 @@ inform_health_check_stats(PCP_CONNECTION *frontend, char *buf)
{
POOL_HEALTH_CHECK_STATS *stats;
POOL_HEALTH_CHECK_STATS *s;
- int *offsets;
- int n;
- int nrows;
- int i;
- int node_id;
- bool node_id_ok = false;
- int wsize;
- char code[] = "CommandComplete";
+ int *offsets;
+ int n;
+ int nrows;
+ int i;
+ int node_id;
+ bool node_id_ok = false;
+ int wsize;
+ char code[] = "CommandComplete";
node_id = atoi(buf);
@@ -1012,7 +1016,7 @@ inform_health_check_stats(PCP_CONNECTION *frontend, char *buf)
(errmsg("informing health check stats info failed"),
errdetail("invalid node ID %d", node_id)));
}
-
+
stats = get_health_check_stats(&nrows);
for (i = 0; i < nrows; i++)
@@ -1032,7 +1036,8 @@ inform_health_check_stats(PCP_CONNECTION *frontend, char *buf)
errdetail("stats data for node ID %d does not exist", node_id)));
}
- pcp_write(frontend, "h", 1); /* indicate that this is a reply to health check stats request */
+ pcp_write(frontend, "h", 1); /* indicate that this is a reply to health
+ * check stats request */
wsize = sizeof(code) + sizeof(int);
@@ -1041,9 +1046,9 @@ inform_health_check_stats(PCP_CONNECTION *frontend, char *buf)
for (i = 0; i < n; i++)
{
- wsize += strlen((char *)s + offsets[i]) + 1;
+ wsize += strlen((char *) s + offsets[i]) + 1;
}
- wsize = htonl(wsize); /* convert to network byte order */
+ wsize = htonl(wsize); /* convert to network byte order */
/* send packet length to frontend */
pcp_write(frontend, &wsize, sizeof(int));
@@ -1053,14 +1058,14 @@ inform_health_check_stats(PCP_CONNECTION *frontend, char *buf)
/* send each health check stats data to frontend */
for (i = 0; i < n; i++)
{
- pcp_write(frontend, (char *)s + offsets[i], strlen((char *)s + offsets[i]) + 1);
+ pcp_write(frontend, (char *) s + offsets[i], strlen((char *) s + offsets[i]) + 1);
}
pfree(stats);
do_pcp_flush(frontend);
}
static void
-inform_node_count(PCP_CONNECTION * frontend)
+inform_node_count(PCP_CONNECTION *frontend)
{
int wsize;
char mesg[16];
@@ -1084,10 +1089,10 @@ inform_node_count(PCP_CONNECTION * frontend)
}
static void
-process_reload_config(PCP_CONNECTION * frontend, char scope)
+process_reload_config(PCP_CONNECTION *frontend, char scope)
{
- char code[] = "CommandComplete";
- int wsize;
+ char code[] = "CommandComplete";
+ int wsize;
if (scope == 'c' && pool_config->use_watchdog)
{
@@ -1100,11 +1105,11 @@ process_reload_config(PCP_CONNECTION * frontend, char scope)
errdetail("failed to propagate reload config command through watchdog")));
}
- if(pool_signal_parent(SIGHUP) == -1)
+ if (pool_signal_parent(SIGHUP) == -1)
{
- ereport(ERROR,
- (errmsg("process reload config request failed"),
- errdetail("failed to signal pgpool parent process")));
+ ereport(ERROR,
+ (errmsg("process reload config request failed"),
+ errdetail("failed to signal pgpool parent process")));
}
pcp_write(frontend, "z", 1);
@@ -1115,10 +1120,10 @@ process_reload_config(PCP_CONNECTION * frontend, char scope)
}
static void
-process_log_rotate(PCP_CONNECTION * frontend, char scope)
+process_log_rotate(PCP_CONNECTION *frontend, char scope)
{
- char code[] = "CommandComplete";
- int wsize;
+ char code[] = "CommandComplete";
+ int wsize;
if (scope == 'c' && pool_config->use_watchdog)
{
@@ -1141,7 +1146,7 @@ process_log_rotate(PCP_CONNECTION * frontend, char scope)
}
static void
-process_detach_node(PCP_CONNECTION * frontend, char *buf, char tos)
+process_detach_node(PCP_CONNECTION *frontend, char *buf, char tos)
{
int node_id;
int wsize;
@@ -1168,7 +1173,7 @@ process_detach_node(PCP_CONNECTION * frontend, char *buf, char tos)
}
static void
-process_attach_node(PCP_CONNECTION * frontend, char *buf)
+process_attach_node(PCP_CONNECTION *frontend, char *buf)
{
int node_id;
int wsize;
@@ -1190,7 +1195,7 @@ process_attach_node(PCP_CONNECTION * frontend, char *buf)
static void
-process_recovery_request(PCP_CONNECTION * frontend, char *buf)
+process_recovery_request(PCP_CONNECTION *frontend, char *buf)
{
int wsize;
char code[] = "CommandComplete";
@@ -1249,7 +1254,7 @@ process_recovery_request(PCP_CONNECTION * frontend, char *buf)
}
static void
-process_status_request(PCP_CONNECTION * frontend)
+process_status_request(PCP_CONNECTION *frontend)
{
int nrows = 0;
int i;
@@ -1307,14 +1312,14 @@ process_status_request(PCP_CONNECTION * frontend)
* actually promote the specified node and detach current primary.
*/
static void
-process_promote_node(PCP_CONNECTION * frontend, char *buf, char tos)
+process_promote_node(PCP_CONNECTION *frontend, char *buf, char tos)
{
int node_id;
int wsize;
char code[] = "CommandComplete";
bool gracefully;
char node_id_buf[64];
- char *p;
+ char *p;
char promote_option;
if (tos == 'J')
@@ -1358,8 +1363,8 @@ process_promote_node(PCP_CONNECTION * frontend, char *buf, char tos)
if (promote_option == 's')
{
ereport(DEBUG1,
- (errmsg("PCP: processing promote node"),
- errdetail("promoting Node ID %d and shutdown primary node %d", node_id, REAL_PRIMARY_NODE_ID)));
+ (errmsg("PCP: processing promote node"),
+ errdetail("promoting Node ID %d and shutdown primary node %d", node_id, REAL_PRIMARY_NODE_ID)));
pool_detach_node(node_id, gracefully, true);
}
else
@@ -1381,7 +1386,7 @@ process_promote_node(PCP_CONNECTION * frontend, char *buf, char tos)
* Process pcp_invalidate_query_cache
*/
static void
-process_invalidate_query_cache(PCP_CONNECTION * frontend)
+process_invalidate_query_cache(PCP_CONNECTION *frontend)
{
int wsize;
char code[] = "CommandComplete";
@@ -1404,7 +1409,7 @@ process_invalidate_query_cache(PCP_CONNECTION * frontend)
}
static void
-process_authentication(PCP_CONNECTION * frontend, char *buf, char *salt, int *random_salt)
+process_authentication(PCP_CONNECTION *frontend, char *buf, char *salt, int *random_salt)
{
int wsize;
int authenticated;
@@ -1439,7 +1444,7 @@ process_authentication(PCP_CONNECTION * frontend, char *buf, char *salt, int *ra
}
static void
-send_md5salt(PCP_CONNECTION * frontend, char *salt)
+send_md5salt(PCP_CONNECTION *frontend, char *salt)
{
int wsize;
@@ -1456,7 +1461,7 @@ send_md5salt(PCP_CONNECTION * frontend, char *salt)
}
static void
-process_shutdown_request(PCP_CONNECTION * frontend, char mode, char tos)
+process_shutdown_request(PCP_CONNECTION *frontend, char mode, char tos)
{
char code[] = "CommandComplete";
int len;
@@ -1465,10 +1470,11 @@ process_shutdown_request(PCP_CONNECTION * frontend, char mode, char tos)
(errmsg("PCP: processing shutdown request"),
errdetail("shutdown mode \"%c\"", mode)));
- /* quickly bail out if invalid mode is specified
- * because we do not want to propagate the command
- * with invalid mode over the watchdog network */
- if (mode != 's' && mode != 'i' && mode != 'f' )
+ /*
+ * quickly bail out if invalid mode is specified because we do not want to
+ * propagate the command with invalid mode over the watchdog network
+ */
+ if (mode != 's' && mode != 'i' && mode != 'f')
{
ereport(ERROR,
(errmsg("PCP: error while processing shutdown request"),
@@ -1478,11 +1484,11 @@ process_shutdown_request(PCP_CONNECTION * frontend, char mode, char tos)
if (tos == 't' && pool_config->use_watchdog)
{
WDExecCommandArg wdExecCommandArg;
- List *args_list = NULL;
+ List *args_list = NULL;
strncpy(wdExecCommandArg.arg_name, "mode", sizeof(wdExecCommandArg.arg_name) - 1);
- snprintf(wdExecCommandArg.arg_value, sizeof(wdExecCommandArg.arg_value) - 1, "%c",mode);
- args_list = lappend(args_list,&wdExecCommandArg);
+ snprintf(wdExecCommandArg.arg_value, sizeof(wdExecCommandArg.arg_value) - 1, "%c", mode);
+ args_list = lappend(args_list, &wdExecCommandArg);
ereport(LOG,
(errmsg("PCP: sending command to watchdog to shutdown cluster")));
@@ -1504,7 +1510,7 @@ process_shutdown_request(PCP_CONNECTION * frontend, char mode, char tos)
}
static void
-process_set_configuration_parameter(PCP_CONNECTION * frontend, char *buf, int len)
+process_set_configuration_parameter(PCP_CONNECTION *frontend, char *buf, int len)
{
char *param_name;
char *param_value;
@@ -1570,7 +1576,7 @@ process_set_configuration_parameter(PCP_CONNECTION * frontend, char *buf, int le
* Wrapper around pcp_flush which throws FATAL error when pcp_flush fails
*/
static void
-do_pcp_flush(PCP_CONNECTION * frontend)
+do_pcp_flush(PCP_CONNECTION *frontend)
{
if (pcp_flush(frontend) < 0)
ereport(FATAL,
@@ -1582,7 +1588,7 @@ do_pcp_flush(PCP_CONNECTION * frontend)
* Wrapper around pcp_read which throws FATAL error when read fails
*/
static void
-do_pcp_read(PCP_CONNECTION * pc, void *buf, int len)
+do_pcp_read(PCP_CONNECTION *pc, void *buf, int len)
{
if (pcp_read(pc, buf, len))
ereport(FATAL,
diff --git a/src/pcp_con/recovery.c b/src/pcp_con/recovery.c
index cc4dd5e14..8ad4a2fd5 100644
--- a/src/pcp_con/recovery.c
+++ b/src/pcp_con/recovery.c
@@ -45,10 +45,10 @@
#define SECOND_STAGE 1
static void exec_checkpoint(PGconn *conn);
-static void exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node);
-static void exec_remote_start(PGconn *conn, BackendInfo * backend);
-static PGconn *connect_backend_libpq(BackendInfo * backend);
-static void check_postmaster_started(BackendInfo * backend);
+static void exec_recovery(PGconn *conn, BackendInfo *main_backend, BackendInfo *recovery_backend, char stage, int recovery_node);
+static void exec_remote_start(PGconn *conn, BackendInfo *backend);
+static PGconn *connect_backend_libpq(BackendInfo *backend);
+static void check_postmaster_started(BackendInfo *backend);
static char recovery_command[1024];
@@ -95,12 +95,12 @@ start_recovery(int recovery_node)
conn = connect_backend_libpq(backend);
if (conn == NULL)
{
- if(check_password_type_is_not_md5(pool_config->recovery_user, pool_config->recovery_password) == -1)
+ if (check_password_type_is_not_md5(pool_config->recovery_user, pool_config->recovery_password) == -1)
{
ereport(ERROR,
(errmsg("the password of recovery_user %s is invalid format",
pool_config->recovery_user),
- errdetail("recovery_password is not allowed to be md5 hashed format")));
+ errdetail("recovery_password is not allowed to be md5 hashed format")));
}
ereport(ERROR,
(errmsg("node recovery failed, unable to connect to main node: %d ", node_id)));
@@ -247,7 +247,7 @@ exec_checkpoint(PGconn *conn)
* mode) or main backend node (in other mode).
*/
static void
-exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_backend, char stage, int recovery_node)
+exec_recovery(PGconn *conn, BackendInfo *main_backend, BackendInfo *recovery_backend, char stage, int recovery_node)
{
PGresult *result;
char *hostname;
@@ -324,7 +324,7 @@ exec_recovery(PGconn *conn, BackendInfo * main_backend, BackendInfo * recovery_b
* Call pgpool_remote_start() function.
*/
static void
-exec_remote_start(PGconn *conn, BackendInfo * backend)
+exec_remote_start(PGconn *conn, BackendInfo *backend)
{
PGresult *result;
char *hostname;
@@ -359,7 +359,7 @@ exec_remote_start(PGconn *conn, BackendInfo * backend)
* Check postmaster is started.
*/
static void
-check_postmaster_started(BackendInfo * backend)
+check_postmaster_started(BackendInfo *backend)
{
int i = 0;
char port_str[16];
@@ -459,16 +459,17 @@ check_postmaster_started(BackendInfo * backend)
}
static PGconn *
-connect_backend_libpq(BackendInfo * backend)
+connect_backend_libpq(BackendInfo *backend)
{
char port_str[16];
PGconn *conn;
- char *dbname;
+ char *dbname;
char *password = get_pgpool_config_user_password(pool_config->recovery_user,
pool_config->recovery_password);
snprintf(port_str, sizeof(port_str),
"%d", backend->backend_port);
+
/*
* If database is not specified, "postgres" database is assumed.
*/
@@ -519,7 +520,8 @@ wait_connection_closed(void)
return ensure_conn_counter_validity();
}
-int ensure_conn_counter_validity(void)
+int
+ensure_conn_counter_validity(void)
{
/*
* recovery_timeout was expired. Before returning with failure status,
diff --git a/src/protocol/CommandComplete.c b/src/protocol/CommandComplete.c
index b71678155..ef144ca31 100644
--- a/src/protocol/CommandComplete.c
+++ b/src/protocol/CommandComplete.c
@@ -40,15 +40,15 @@
#include "utils/pool_stream.h"
static int extract_ntuples(char *message);
-static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *packet, int packetlen, bool command_complete);
-static int forward_command_complete(POOL_CONNECTION * frontend, char *packet, int packetlen);
-static int forward_empty_query(POOL_CONNECTION * frontend, char *packet, int packetlen);
-static int forward_packet_to_frontend(POOL_CONNECTION * frontend, char kind, char *packet, int packetlen);
-static void process_clear_cache(POOL_CONNECTION_POOL * backend);
+static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *packet, int packetlen, bool command_complete);
+static int forward_command_complete(POOL_CONNECTION *frontend, char *packet, int packetlen);
+static int forward_empty_query(POOL_CONNECTION *frontend, char *packet, int packetlen);
+static int forward_packet_to_frontend(POOL_CONNECTION *frontend, char kind, char *packet, int packetlen);
+static void process_clear_cache(POOL_CONNECTION_POOL *backend);
static bool check_alter_role_statement(AlterRoleStmt *stmt);
POOL_STATUS
-CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool command_complete)
+CommandComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, bool command_complete)
{
int len,
len1;
@@ -230,9 +230,9 @@ CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool
state = TSTATE(backend, MAIN_NODE_ID);
/*
- * If some rows have been fetched by an execute with non 0 row option,
- * we do not create cache.
- */
+ * If some rows have been fetched by an execute with non 0 row
+ * option, we do not create cache.
+ */
pool_handle_query_cache(backend, query, node, state,
session_context->query_context->partial_fetch);
@@ -276,7 +276,7 @@ CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool
if (can_query_context_destroy(session_context->query_context))
{
- POOL_SENT_MESSAGE * msg = pool_get_sent_message_by_query_context(session_context->query_context);
+ POOL_SENT_MESSAGE *msg = pool_get_sent_message_by_query_context(session_context->query_context);
if (!msg || (msg && *msg->name == '\0'))
{
@@ -294,7 +294,7 @@ CommandComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, bool
* Handle misc process which is necessary when query context exists.
*/
void
-handle_query_context(POOL_CONNECTION_POOL * backend)
+handle_query_context(POOL_CONNECTION_POOL *backend)
{
POOL_SESSION_CONTEXT *session_context;
Node *node;
@@ -344,13 +344,13 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
}
/*
- * JDBC driver sends "BEGIN" query internally if setAutoCommit(false).
- * But it does not send Sync message after "BEGIN" query. In extended
- * query protocol, PostgreSQL returns ReadyForQuery when a client sends
- * Sync message. Problem is, pgpool can't know the transaction state
- * without receiving ReadyForQuery. So we remember that we need to send
- * Sync message internally afterward, whenever we receive BEGIN in
- * extended protocol.
+ * JDBC driver sends "BEGIN" query internally if setAutoCommit(false). But
+ * it does not send Sync message after "BEGIN" query. In extended query
+ * protocol, PostgreSQL returns ReadyForQuery when a client sends Sync
+ * message. Problem is, pgpool can't know the transaction state without
+ * receiving ReadyForQuery. So we remember that we need to send Sync
+ * message internally afterward, whenever we receive BEGIN in extended
+ * protocol.
*/
else if (IsA(node, TransactionStmt))
{
@@ -374,10 +374,10 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
pool_unset_failed_transaction();
pool_unset_transaction_isolation();
}
- else if (stmt->kind == TRANS_STMT_COMMIT)
+ else if (stmt->kind == TRANS_STMT_COMMIT)
{
/* Commit ongoing CREATE/DROP temp table status */
- pool_temp_tables_commit_pending();
+ pool_temp_tables_commit_pending();
/* Forget a transaction was started by multi statement query */
unset_tx_started_by_multi_statement_query();
@@ -412,12 +412,13 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
else if (IsA(node, CreateStmt))
{
CreateStmt *stmt = (CreateStmt *) node;
- POOL_TEMP_TABLE_STATE state;
+ POOL_TEMP_TABLE_STATE state;
/* Is this a temporary table? */
if (stmt->relation->relpersistence == 't')
{
- if (TSTATE(backend, MAIN_NODE_ID ) == 'T') /* Are we inside a transaction? */
+ if (TSTATE(backend, MAIN_NODE_ID) == 'T') /* Are we inside a
+ * transaction? */
{
state = TEMP_TABLE_CREATING;
}
@@ -433,8 +434,8 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
}
else if (IsA(node, DropStmt))
{
- DropStmt *stmt = (DropStmt *) node;
- POOL_TEMP_TABLE_STATE state;
+ DropStmt *stmt = (DropStmt *) node;
+ POOL_TEMP_TABLE_STATE state;
if (stmt->removeType == OBJECT_TABLE)
{
@@ -442,7 +443,8 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
ListCell *cell;
ListCell *next;
- if (TSTATE(backend, MAIN_NODE_ID ) == 'T') /* Are we inside a transaction? */
+ if (TSTATE(backend, MAIN_NODE_ID) == 'T') /* Are we inside a
+ * transaction? */
{
state = TEMP_TABLE_DROPPING;
}
@@ -453,7 +455,8 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
for (cell = list_head(session_context->temp_tables); cell; cell = next)
{
- char *tablename = (char *)lfirst(cell);
+ char *tablename = (char *) lfirst(cell);
+
ereport(DEBUG1,
(errmsg("Dropping temp table: %s", tablename)));
pool_temp_tables_delete(tablename, state);
@@ -478,7 +481,7 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
}
else if (IsA(node, GrantStmt))
{
- GrantStmt *stmt = (GrantStmt *) node;
+ GrantStmt *stmt = (GrantStmt *) node;
/* REVOKE? */
if (stmt->is_grant)
@@ -510,15 +513,15 @@ handle_query_context(POOL_CONNECTION_POOL * backend)
static bool
check_alter_role_statement(AlterRoleStmt *stmt)
{
- ListCell *l;
+ ListCell *l;
foreach(l, stmt->options)
{
- DefElem *elm = (DefElem *) lfirst(l);
+ DefElem *elm = (DefElem *) lfirst(l);
/*
- * We want to detect other than ALTER ROLE foo WITH PASSWORD or
- * WITH CONNECTION LIMIT case. It does not change any privilege of the
+ * We want to detect other than ALTER ROLE foo WITH PASSWORD or WITH
+ * CONNECTION LIMIT case. It does not change any privilege of the
* role.
*/
if (strcmp(elm->defname, "password") &&
@@ -553,7 +556,8 @@ extract_ntuples(char *message)
/*
* Handle mismatch tuples
*/
-static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *packet, int packetlen, bool command_complete)
+static POOL_STATUS
+handle_mismatch_tuples(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *packet, int packetlen, bool command_complete)
{
POOL_SESSION_CONTEXT *session_context;
@@ -620,7 +624,7 @@ static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNE
if (session_context->mismatch_ntuples)
{
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendStringInfoString(&msg, "pgpool detected difference of the number of inserted, updated or deleted tuples. Possible last query was: \"");
@@ -667,7 +671,7 @@ static POOL_STATUS handle_mismatch_tuples(POOL_CONNECTION * frontend, POOL_CONNE
* Forward Command complete packet to frontend
*/
static int
-forward_command_complete(POOL_CONNECTION * frontend, char *packet, int packetlen)
+forward_command_complete(POOL_CONNECTION *frontend, char *packet, int packetlen)
{
return forward_packet_to_frontend(frontend, 'C', packet, packetlen);
}
@@ -676,7 +680,7 @@ forward_command_complete(POOL_CONNECTION * frontend, char *packet, int packetlen
* Forward Empty query response to frontend
*/
static int
-forward_empty_query(POOL_CONNECTION * frontend, char *packet, int packetlen)
+forward_empty_query(POOL_CONNECTION *frontend, char *packet, int packetlen)
{
return forward_packet_to_frontend(frontend, 'I', packet, packetlen);
}
@@ -685,7 +689,7 @@ forward_empty_query(POOL_CONNECTION * frontend, char *packet, int packetlen)
* Forward packet to frontend
*/
static int
-forward_packet_to_frontend(POOL_CONNECTION * frontend, char kind, char *packet, int packetlen)
+forward_packet_to_frontend(POOL_CONNECTION *frontend, char kind, char *packet, int packetlen)
{
int sendlen;
@@ -705,7 +709,7 @@ forward_packet_to_frontend(POOL_CONNECTION * frontend, char kind, char *packet,
* Process statements that need clearing query cache
*/
static void
-process_clear_cache(POOL_CONNECTION_POOL * backend)
+process_clear_cache(POOL_CONNECTION_POOL *backend)
{
/* Query cache enabled? */
if (!pool_config->memory_cache_enabled)
@@ -717,15 +721,15 @@ process_clear_cache(POOL_CONNECTION_POOL * backend)
/*
* Are we inside a transaction?
*/
- if (TSTATE(backend, MAIN_NODE_ID ) == 'T')
+ if (TSTATE(backend, MAIN_NODE_ID) == 'T')
{
/*
- * Disable query cache in this transaction.
- * All query cache will be cleared at commit.
+ * Disable query cache in this transaction. All query cache will
+ * be cleared at commit.
*/
set_query_cache_disabled_tx();
}
- else if (TSTATE(backend, MAIN_NODE_ID ) == 'I') /* outside transaction */
+ else if (TSTATE(backend, MAIN_NODE_ID) == 'I') /* outside transaction */
{
/*
* Clear all the query cache.
@@ -738,14 +742,14 @@ process_clear_cache(POOL_CONNECTION_POOL * backend)
/*
* Are we inside a transaction?
*/
- if (TSTATE(backend, MAIN_NODE_ID ) == 'T')
+ if (TSTATE(backend, MAIN_NODE_ID) == 'T')
{
/* Inside user started transaction? */
if (!INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID))
{
/*
- * Disable query cache in this transaction.
- * All query cache will be cleared at commit.
+ * Disable query cache in this transaction. All query cache
+ * will be cleared at commit.
*/
set_query_cache_disabled_tx();
}
@@ -757,7 +761,7 @@ process_clear_cache(POOL_CONNECTION_POOL * backend)
clear_query_cache();
}
}
- else if (TSTATE(backend, MAIN_NODE_ID ) == 'I') /* outside transaction */
+ else if (TSTATE(backend, MAIN_NODE_ID) == 'I') /* outside transaction */
{
/*
* Clear all the query cache.
diff --git a/src/protocol/child.c b/src/protocol/child.c
index 1ef88910d..cf2161806 100644
--- a/src/protocol/child.c
+++ b/src/protocol/child.c
@@ -68,19 +68,19 @@
#include "auth/pool_passwd.h"
#include "auth/pool_hba.h"
-static StartupPacket *read_startup_packet(POOL_CONNECTION * cp);
-static POOL_CONNECTION_POOL * connect_backend(StartupPacket *sp, POOL_CONNECTION * frontend);
+static StartupPacket *read_startup_packet(POOL_CONNECTION *cp);
+static POOL_CONNECTION_POOL *connect_backend(StartupPacket *sp, POOL_CONNECTION *frontend);
static RETSIGTYPE die(int sig);
static RETSIGTYPE close_idle_connection(int sig);
static RETSIGTYPE wakeup_handler(int sig);
static RETSIGTYPE reload_config_handler(int sig);
static RETSIGTYPE authentication_timeout(int sig);
-static void send_params(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+static void send_params(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
static int connection_count_up(void);
static void connection_count_down(void);
-static bool connect_using_existing_connection(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- StartupPacket *sp);
+static bool connect_using_existing_connection(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ StartupPacket *sp);
static void check_restart_request(void);
static void check_exit_request(void);
static void enable_authentication_timeout(void);
@@ -89,17 +89,17 @@ static int wait_for_new_connections(int *fds, SockAddr *saddr);
static void check_config_reload(void);
static void get_backends_status(unsigned int *valid_backends, unsigned int *down_backends);
static void validate_backend_connectivity(int front_end_fd);
-static POOL_CONNECTION * get_connection(int front_end_fd, SockAddr *saddr);
-static POOL_CONNECTION_POOL * get_backend_connection(POOL_CONNECTION * frontend);
+static POOL_CONNECTION *get_connection(int front_end_fd, SockAddr *saddr);
+static POOL_CONNECTION_POOL *get_backend_connection(POOL_CONNECTION *frontend);
static StartupPacket *StartupPacketCopy(StartupPacket *sp);
static void log_disconnections(char *database, char *username);
static void print_process_status(char *remote_host, char *remote_port);
-static bool backend_cleanup(POOL_CONNECTION * volatile *frontend, POOL_CONNECTION_POOL * volatile backend, bool frontend_invalid);
+static bool backend_cleanup(POOL_CONNECTION *volatile *frontend, POOL_CONNECTION_POOL *volatile backend, bool frontend_invalid);
static void child_will_go_down(int code, Datum arg);
-static int opt_sort(const void *a, const void *b);
+static int opt_sort(const void *a, const void *b);
-static bool unix_fds_not_isset(int* fds, int num_unix_fds, fd_set* opt);
+static bool unix_fds_not_isset(int *fds, int num_unix_fds, fd_set *opt);
/*
* Non 0 means SIGTERM (smart shutdown) or SIGINT (fast shutdown) has arrived
@@ -154,11 +154,12 @@ do_child(int *fds)
sigjmp_buf local_sigjmp_buf;
POOL_CONNECTION_POOL *volatile backend = NULL;
- /* counter for child_max_connections. "volatile" declaration is necessary
+ /*
+ * counter for child_max_connections. "volatile" declaration is necessary
* so that this is counted up even if long jump is issued due to
* ereport(ERROR).
*/
- volatile int connections_count = 0;
+ volatile int connections_count = 0;
char psbuf[NI_MAXHOST + 128];
@@ -355,7 +356,8 @@ do_child(int *fds)
*/
if (con_count > (pool_config->num_init_children - pool_config->reserved_connections))
{
- POOL_CONNECTION * cp;
+ POOL_CONNECTION *cp;
+
cp = pool_open(front_end_fd, false);
if (cp == NULL)
{
@@ -405,8 +407,8 @@ do_child(int *fds)
pool_initialize_private_backend_status();
/*
- * Connect to backend. Also do authentication between
- * frontend <--> pgpool and pgpool <--> backend.
+ * Connect to backend. Also do authentication between frontend <-->
+ * pgpool and pgpool <--> backend.
*/
backend = get_backend_connection(child_frontend);
if (!backend)
@@ -513,7 +515,7 @@ do_child(int *fds)
* return true if backend connection is cached
*/
static bool
-backend_cleanup(POOL_CONNECTION * volatile *frontend, POOL_CONNECTION_POOL * volatile backend, bool frontend_invalid)
+backend_cleanup(POOL_CONNECTION *volatile *frontend, POOL_CONNECTION_POOL *volatile backend, bool frontend_invalid)
{
StartupPacket *sp;
bool cache_connection = false;
@@ -600,18 +602,18 @@ backend_cleanup(POOL_CONNECTION * volatile *frontend, POOL_CONNECTION_POOL * vol
* Read the startup packet and parse the contents.
*/
static StartupPacket *
-read_startup_packet(POOL_CONNECTION * cp)
+read_startup_packet(POOL_CONNECTION *cp)
{
StartupPacket *sp;
StartupPacket_v2 *sp2;
int protov;
int len;
char *p;
- char **guc_options;
- int opt_num = 0;
- char *sp_sort;
- char *tmpopt;
- int i;
+ char **guc_options;
+ int opt_num = 0;
+ char *sp_sort;
+ char *tmpopt;
+ int i;
sp = (StartupPacket *) palloc0(sizeof(*sp));
@@ -659,37 +661,38 @@ read_startup_packet(POOL_CONNECTION * cp)
case PROTO_MAJOR_V3: /* V3 */
/* copy startup_packet */
sp_sort = palloc0(len);
- memcpy(sp_sort,sp->startup_packet,len);
+ memcpy(sp_sort, sp->startup_packet, len);
p = sp_sort;
- p += sizeof(int); /* skip protocol version info */
+ p += sizeof(int); /* skip protocol version info */
/* count the number of options */
while (*p)
{
- p += (strlen(p) + 1); /* skip option name */
- p += (strlen(p) + 1); /* skip option value */
- opt_num ++;
+ p += (strlen(p) + 1); /* skip option name */
+ p += (strlen(p) + 1); /* skip option value */
+ opt_num++;
}
- guc_options = (char **)palloc0(opt_num * sizeof(char *));
+ guc_options = (char **) palloc0(opt_num * sizeof(char *));
/* get guc_option name list */
p = sp_sort + sizeof(int);
for (i = 0; i < opt_num; i++)
{
guc_options[i] = p;
- p += (strlen(p) + 1); /* skip option name */
- p += (strlen(p) + 1); /* skip option value */
+ p += (strlen(p) + 1); /* skip option name */
+ p += (strlen(p) + 1); /* skip option value */
}
/* sort option name using quick sort */
- qsort( (void *)guc_options, opt_num, sizeof(char *), opt_sort );
+ qsort((void *) guc_options, opt_num, sizeof(char *), opt_sort);
- p = sp->startup_packet + sizeof(int); /* skip protocol version info */
+ p = sp->startup_packet + sizeof(int); /* skip protocol version
+ * info */
for (i = 0; i < opt_num; i++)
{
tmpopt = guc_options[i];
- memcpy(p, tmpopt ,strlen(tmpopt) + 1); /* memcpy option name */
+ memcpy(p, tmpopt, strlen(tmpopt) + 1); /* memcpy option name */
p += (strlen(tmpopt) + 1);
tmpopt += (strlen(tmpopt) + 1);
- memcpy(p, tmpopt ,strlen(tmpopt) + 1); /* memcpy option value */
+ memcpy(p, tmpopt, strlen(tmpopt) + 1); /* memcpy option value */
p += (strlen(tmpopt) + 1);
}
@@ -733,7 +736,7 @@ read_startup_packet(POOL_CONNECTION * cp)
{
ereport(DEBUG1,
(errmsg("reading startup packet"),
- errdetail("guc name: %s value: %s", p, p+strlen(p)+1)));
+ errdetail("guc name: %s value: %s", p, p + strlen(p) + 1)));
p += (strlen(p) + 1);
}
@@ -789,8 +792,8 @@ read_startup_packet(POOL_CONNECTION * cp)
* Reuse existing connection
*/
static bool
-connect_using_existing_connection(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+connect_using_existing_connection(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
StartupPacket *sp)
{
int i,
@@ -824,8 +827,8 @@ connect_using_existing_connection(POOL_CONNECTION * frontend,
/* Reuse existing connection to backend */
frontend_auth_cxt = AllocSetContextCreate(CurrentMemoryContext,
- "frontend_auth",
- ALLOCSET_DEFAULT_SIZES);
+ "frontend_auth",
+ ALLOCSET_DEFAULT_SIZES);
oldContext = MemoryContextSwitchTo(frontend_auth_cxt);
pool_do_reauth(frontend, backend);
@@ -905,7 +908,7 @@ connect_using_existing_connection(POOL_CONNECTION * frontend,
* process cancel request
*/
void
-cancel_request(CancelPacket * sp, int32 splen)
+cancel_request(CancelPacket *sp, int32 splen)
{
int len;
int fd;
@@ -915,7 +918,7 @@ cancel_request(CancelPacket * sp, int32 splen)
k;
ConnectionInfo *c = NULL;
bool found = false;
- int32 keylen; /* cancel key length */
+ int32 keylen; /* cancel key length */
if (pool_config->log_client_messages)
ereport(LOG,
@@ -958,7 +961,8 @@ cancel_request(CancelPacket * sp, int32 splen)
errdetail("found pid:%d keylen:%d i:%d", ntohl(c->pid), c->keylen, i)));
/*
- * "c" is a pointer to i th child, j th pool, and 0 th backend.
+ * "c" is a pointer to i th child, j th pool, and 0 th
+ * backend.
*/
c = pool_coninfo(i, j, 0);
found = true;
@@ -978,12 +982,12 @@ found:
/*
* We are sending cancel request message to all backend groups. So some
- * of query cancel requests may not work but it should not be a
- * problem. They are just ignored by the backend.
+ * of query cancel requests may not work but it should not be a problem.
+ * They are just ignored by the backend.
*/
for (i = 0; i < NUM_BACKENDS; i++, c++)
{
- int32 cancel_request_code;
+ int32 cancel_request_code;
if (!VALID_BACKEND(i))
continue;
@@ -1006,9 +1010,10 @@ found:
pool_set_db_node_id(con, i);
- len = htonl(splen + sizeof(int32)); /* splen does not include packet length field */
- pool_write(con, &len, sizeof(len)); /* send cancel messages length */
- cancel_request_code = htonl(PG_PROTOCOL(1234,5678)); /* cancel request code */
+ len = htonl(splen + sizeof(int32)); /* splen does not include packet
+ * length field */
+ pool_write(con, &len, sizeof(len)); /* send cancel messages length */
+ cancel_request_code = htonl(PG_PROTOCOL(1234, 5678)); /* cancel request code */
pool_write(con, &cancel_request_code, sizeof(int32));
pool_write(con, &c->pid, sizeof(int32)); /* send pid */
pool_write(con, c->key, keylen); /* send cancel key */
@@ -1068,11 +1073,12 @@ StartupPacketCopy(StartupPacket *sp)
* Create a new connection to backend.
* Authentication is performed if requested by backend.
*/
-static POOL_CONNECTION_POOL * connect_backend(StartupPacket *sp, POOL_CONNECTION * frontend)
+static POOL_CONNECTION_POOL *
+connect_backend(StartupPacket *sp, POOL_CONNECTION *frontend)
{
POOL_CONNECTION_POOL *backend;
StartupPacket *volatile topmem_sp = NULL;
- volatile bool topmem_sp_set = false;
+ volatile bool topmem_sp_set = false;
int i;
/* connect to the backend */
@@ -1122,8 +1128,8 @@ static POOL_CONNECTION_POOL * connect_backend(StartupPacket *sp, POOL_CONNECTION
* do authentication stuff
*/
frontend_auth_cxt = AllocSetContextCreate(CurrentMemoryContext,
- "frontend_auth",
- ALLOCSET_DEFAULT_SIZES);
+ "frontend_auth",
+ ALLOCSET_DEFAULT_SIZES);
oldContext = MemoryContextSwitchTo(frontend_auth_cxt);
/* do authentication against backend */
@@ -1141,7 +1147,8 @@ static POOL_CONNECTION_POOL * connect_backend(StartupPacket *sp, POOL_CONNECTION
}
PG_END_TRY();
- /* At this point, we need to free previously allocated memory for the
+ /*
+ * At this point, we need to free previously allocated memory for the
* startup packet if no backend is up.
*/
if (!topmem_sp_set && topmem_sp != NULL)
@@ -1244,7 +1251,7 @@ static RETSIGTYPE close_idle_connection(int sig)
if (CONNECTION_SLOT(p, main_node_id)->closetime > 0) /* idle connection? */
{
- bool freed = false;
+ bool freed = false;
pool_send_frontend_exits(p);
@@ -1313,7 +1320,7 @@ disable_authentication_timeout(void)
* Send parameter status message to frontend.
*/
static void
-send_params(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+send_params(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int index;
char *name,
@@ -1363,7 +1370,7 @@ child_will_go_down(int code, Datum arg)
if (child_frontend)
log_disconnections(child_frontend->database, child_frontend->username);
else
- log_disconnections("","");
+ log_disconnections("", "");
}
}
@@ -1594,7 +1601,7 @@ wait_for_new_connections(int *fds, SockAddr *saddr)
}
else
{
- int sts;
+ int sts;
for (;;)
{
@@ -1609,10 +1616,10 @@ wait_for_new_connections(int *fds, SockAddr *saddr)
if (backend_timer_expired)
{
/*
- * We add 10 seconds to connection_life_time so that there's
- * enough margin.
+ * We add 10 seconds to connection_life_time so that
+ * there's enough margin.
*/
- int seconds = pool_config->connection_life_time + 10;
+ int seconds = pool_config->connection_life_time + 10;
while (seconds-- > 0)
{
@@ -1627,7 +1634,7 @@ wait_for_new_connections(int *fds, SockAddr *saddr)
}
}
}
- else /* success or other error */
+ else /* success or other error */
break;
}
}
@@ -1661,7 +1668,7 @@ wait_for_new_connections(int *fds, SockAddr *saddr)
numfds = select(nsocks, &rmask, NULL, NULL, timeout);
- /* not timeout*/
+ /* not timeout */
if (numfds != 0)
break;
@@ -1796,9 +1803,10 @@ retry_accept:
}
static bool
-unix_fds_not_isset(int* fds, int num_unix_fds, fd_set* opt)
+unix_fds_not_isset(int *fds, int num_unix_fds, fd_set *opt)
{
- int i;
+ int i;
+
for (i = 0; i < num_unix_fds; i++)
{
if (!FD_ISSET(fds[i], opt))
@@ -1900,7 +1908,7 @@ validate_backend_connectivity(int front_end_fd)
error_hint,
__FILE__,
__LINE__);
-
+
}
PG_CATCH();
{
@@ -1926,7 +1934,7 @@ static POOL_CONNECTION *
get_connection(int front_end_fd, SockAddr *saddr)
{
POOL_CONNECTION *cp;
- ProcessInfo *pi;
+ ProcessInfo *pi;
ereport(DEBUG1,
(errmsg("I am %d accept fd %d", getpid(), front_end_fd)));
@@ -1993,7 +2001,7 @@ get_connection(int front_end_fd, SockAddr *saddr)
* pgpool <--> backend.
*/
static POOL_CONNECTION_POOL *
-get_backend_connection(POOL_CONNECTION * frontend)
+get_backend_connection(POOL_CONNECTION *frontend)
{
int found = 0;
StartupPacket *sp;
@@ -2049,8 +2057,8 @@ retry_startup:
* return if frontend was rejected; it simply terminates this process.
*/
MemoryContext frontend_auth_cxt = AllocSetContextCreate(CurrentMemoryContext,
- "frontend_auth",
- ALLOCSET_DEFAULT_SIZES);
+ "frontend_auth",
+ ALLOCSET_DEFAULT_SIZES);
MemoryContext oldContext = MemoryContextSwitchTo(frontend_auth_cxt);
/*
@@ -2133,8 +2141,8 @@ retry_startup:
if (backend == NULL)
{
/*
- * Create a new connection to backend.
- * Authentication is performed if requested by backend.
+ * Create a new connection to backend. Authentication is performed if
+ * requested by backend.
*/
backend = connect_backend(sp, frontend);
}
@@ -2153,7 +2161,7 @@ static void
log_disconnections(char *database, char *username)
{
struct timeval endTime;
- long diff;
+ long diff;
long secs;
int msecs,
hours,
@@ -2161,7 +2169,7 @@ log_disconnections(char *database, char *username)
seconds;
gettimeofday(&endTime, NULL);
- diff = (long) ((endTime.tv_sec - startTime.tv_sec) * 1000000 + (endTime.tv_usec - startTime.tv_usec));
+ diff = (long) ((endTime.tv_sec - startTime.tv_sec) * 1000000 + (endTime.tv_usec - startTime.tv_usec));
msecs = (int) (diff % 1000000) / 1000;
secs = (long) (diff / 1000000);
@@ -2233,9 +2241,10 @@ pg_frontend_exists(void)
return 0;
}
-static int opt_sort(const void *a, const void *b)
+static int
+opt_sort(const void *a, const void *b)
{
- return strcmp( *(char **)a, *(char **)b);
+ return strcmp(*(char **) a, *(char **) b);
}
void
diff --git a/src/protocol/pool_connection_pool.c b/src/protocol/pool_connection_pool.c
index 666187216..c3b369dc2 100644
--- a/src/protocol/pool_connection_pool.c
+++ b/src/protocol/pool_connection_pool.c
@@ -63,8 +63,8 @@ volatile sig_atomic_t backend_timer_expired = 0; /* flag for connection
* closed timer is expired */
volatile sig_atomic_t health_check_timer_expired; /* non 0 if health check
* timer expired */
-static POOL_CONNECTION_POOL_SLOT * create_cp(POOL_CONNECTION_POOL_SLOT * cp, int slot);
-static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p);
+static POOL_CONNECTION_POOL_SLOT *create_cp(POOL_CONNECTION_POOL_SLOT *cp, int slot);
+static POOL_CONNECTION_POOL *new_connection(POOL_CONNECTION_POOL *p);
static int check_socket_status(int fd);
static bool connect_with_timeout(int fd, struct addrinfo *walk, char *host, int port, bool retry);
@@ -161,6 +161,7 @@ pool_get_cp(char *user, char *database, int protoMajor, int check_socket)
ereport(LOG,
(errmsg("connection closed."),
errdetail("retry to create new connection pool")));
+
/*
* It is possible that one of backend just broke. sleep 1
* second to wait for failover occurres, then wait for the
@@ -259,7 +260,7 @@ pool_create_cp(void)
POOL_CONNECTION_POOL *oldestp;
POOL_CONNECTION_POOL *ret;
ConnectionInfo *info;
- int main_node_id;
+ int main_node_id;
POOL_CONNECTION_POOL *p = pool_connection_pool;
@@ -297,7 +298,7 @@ pool_create_cp(void)
{
main_node_id = in_use_backend_id(p);
if (main_node_id < 0)
- elog(ERROR, "no in use backend found"); /* this should not happen */
+ elog(ERROR, "no in use backend found"); /* this should not happen */
ereport(DEBUG1,
(errmsg("creating connection pool"),
@@ -318,7 +319,7 @@ pool_create_cp(void)
p = oldestp;
main_node_id = in_use_backend_id(p);
if (main_node_id < 0)
- elog(ERROR, "no in use backend found"); /* this should not happen */
+ elog(ERROR, "no in use backend found"); /* this should not happen */
pool_send_frontend_exits(p);
ereport(DEBUG1,
@@ -358,7 +359,7 @@ pool_create_cp(void)
* set backend connection close timer
*/
void
-pool_connection_pool_timer(POOL_CONNECTION_POOL * backend)
+pool_connection_pool_timer(POOL_CONNECTION_POOL *backend)
{
POOL_CONNECTION_POOL *p = pool_connection_pool;
int i;
@@ -782,7 +783,7 @@ connect_inet_domain_socket_by_port(char *host, int port, bool retry)
struct addrinfo *res;
struct addrinfo *walk;
struct addrinfo hints;
- int retry_cnt = 5; /* getaddrinfo() retry count in case EAI_AGAIN */
+ int retry_cnt = 5; /* getaddrinfo() retry count in case EAI_AGAIN */
/*
* getaddrinfo() requires a string because it also accepts service names,
@@ -875,7 +876,8 @@ connect_inet_domain_socket_by_port(char *host, int port, bool retry)
/*
* create connection pool
*/
-static POOL_CONNECTION_POOL_SLOT * create_cp(POOL_CONNECTION_POOL_SLOT * cp, int slot)
+static POOL_CONNECTION_POOL_SLOT *
+create_cp(POOL_CONNECTION_POOL_SLOT *cp, int slot)
{
BackendInfo *b = &pool_config->backend_desc->backend_info[slot];
int fd;
@@ -902,13 +904,14 @@ static POOL_CONNECTION_POOL_SLOT * create_cp(POOL_CONNECTION_POOL_SLOT * cp, int
* Create actual connections to backends.
* New connection resides in TopMemoryContext.
*/
-static POOL_CONNECTION_POOL * new_connection(POOL_CONNECTION_POOL * p)
+static POOL_CONNECTION_POOL *
+new_connection(POOL_CONNECTION_POOL *p)
{
POOL_CONNECTION_POOL_SLOT *s;
int active_backend_count = 0;
int i;
bool status_changed = false;
- volatile BACKEND_STATUS status;
+ volatile BACKEND_STATUS status;
MemoryContext oldContext = MemoryContextSwitchTo(TopMemoryContext);
@@ -1097,7 +1100,7 @@ close_all_backend_connections(void)
for (i = 0; i < pool_config->max_pool; i++, p++)
{
- int backend_id = in_use_backend_id(p);
+ int backend_id = in_use_backend_id(p);
if (backend_id < 0)
continue;
@@ -1120,9 +1123,10 @@ close_all_backend_connections(void)
void
update_pooled_connection_count(void)
{
- int i;
- int count = 0;
+ int i;
+ int count = 0;
POOL_CONNECTION_POOL *p = pool_connection_pool;
+
for (i = 0; i < pool_config->max_pool; i++, p++)
{
if (MAIN_CONNECTION(p))
@@ -1138,7 +1142,7 @@ update_pooled_connection_count(void)
int
in_use_backend_id(POOL_CONNECTION_POOL *pool)
{
- int i;
+ int i;
for (i = 0; i < NUM_BACKENDS; i++)
{
diff --git a/src/protocol/pool_pg_utils.c b/src/protocol/pool_pg_utils.c
index ccbe2b03c..2eebbbb3c 100644
--- a/src/protocol/pool_pg_utils.c
+++ b/src/protocol/pool_pg_utils.c
@@ -41,7 +41,7 @@
#include "pool_config_variables.h"
static int choose_db_node_id(char *str);
-static void free_persistent_db_connection_memory(POOL_CONNECTION_POOL_SLOT * cp);
+static void free_persistent_db_connection_memory(POOL_CONNECTION_POOL_SLOT *cp);
static void si_enter_critical_region(void);
static void si_leave_critical_region(void);
@@ -64,9 +64,9 @@ make_persistent_db_connection(
{
int protoVersion;
char data[MAX_USER_AND_DATABASE];
- } StartupPacket_v3;
+ } StartupPacket_v3;
- static StartupPacket_v3 * startup_packet;
+ static StartupPacket_v3 *startup_packet;
int len,
len1;
@@ -200,8 +200,8 @@ make_persistent_db_connection_noerror(
* receives an ERROR, it stops processing and terminates, which is not
* good. This is problematic especially with pcp_node_info, since it
* calls db_node_role(), and db_node_role() calls this function. So if
- * the target PostgreSQL is down, EmitErrorReport() sends ERROR message
- * to pcp frontend and it stops (see process_pcp_response() in
+ * the target PostgreSQL is down, EmitErrorReport() sends ERROR
+ * message to pcp frontend and it stops (see process_pcp_response() in
* src/libs/pcp/pcp.c. To fix this, just eliminate calling
* EmitErrorReport(). This will suppress ERROR message but as you can
* see the comment in this function "does not ereports in case of an
@@ -221,7 +221,7 @@ make_persistent_db_connection_noerror(
* make_persistent_db_connection and discard_persistent_db_connection.
*/
static void
-free_persistent_db_connection_memory(POOL_CONNECTION_POOL_SLOT * cp)
+free_persistent_db_connection_memory(POOL_CONNECTION_POOL_SLOT *cp)
{
if (!cp)
return;
@@ -245,7 +245,7 @@ free_persistent_db_connection_memory(POOL_CONNECTION_POOL_SLOT * cp)
* make_persistent_db_connection().
*/
void
-discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT * cp)
+discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT *cp)
{
int len;
@@ -274,7 +274,7 @@ discard_persistent_db_connection(POOL_CONNECTION_POOL_SLOT * cp)
* send startup packet
*/
void
-send_startup_packet(POOL_CONNECTION_POOL_SLOT * cp)
+send_startup_packet(POOL_CONNECTION_POOL_SLOT *cp)
{
int len;
@@ -319,10 +319,10 @@ select_load_balancing_node(void)
int tmp;
int no_load_balance_node_id = -2;
uint64 lowest_delay;
- int lowest_delay_nodes[NUM_BACKENDS];
+ int lowest_delay_nodes[NUM_BACKENDS];
/* prng state data for load balancing */
- static pg_prng_state backsel_state;
+ static pg_prng_state backsel_state;
/*
* -2 indicates there's no database_redirect_preference_list. -1 indicates
@@ -443,24 +443,27 @@ select_load_balancing_node(void)
if (suggested_node_id >= 0)
{
/*
- * If pgpool is running in Streaming Replication mode and delay_threshold
- * and prefer_lower_delay_standby are true, we choose the least delayed
- * node if suggested_node is standby and delayed over delay_threshold.
+ * If pgpool is running in Streaming Replication mode and
+ * delay_threshold and prefer_lower_delay_standby are true, we choose
+ * the least delayed node if suggested_node is standby and delayed
+ * over delay_threshold.
*/
if (STREAM && pool_config->prefer_lower_delay_standby &&
suggested_node_id != PRIMARY_NODE_ID &&
check_replication_delay(suggested_node_id) < 0)
{
ereport(DEBUG1,
- (errmsg("selecting load balance node"),
- errdetail("suggested backend %d is streaming delayed over delay_threshold", suggested_node_id)));
+ (errmsg("selecting load balance node"),
+ errdetail("suggested backend %d is streaming delayed over delay_threshold", suggested_node_id)));
/*
- * The new load balancing node is selected from the
- * nodes which have the lowest delay.
+ * The new load balancing node is selected from the nodes which
+ * have the lowest delay.
*/
if (pool_config->delay_threshold_by_time > 0)
- lowest_delay = pool_config->delay_threshold_by_time * 1000; /* convert from milli seconds to micro seconds */
+ lowest_delay = pool_config->delay_threshold_by_time * 1000; /* convert from milli
+ * seconds to micro
+ * seconds */
else
lowest_delay = pool_config->delay_threshold;
@@ -484,7 +487,8 @@ select_load_balancing_node(void)
}
else if (lowest_delay > BACKEND_INFO(i).standby_delay)
{
- int ii;
+ int ii;
+
lowest_delay = BACKEND_INFO(i).standby_delay;
for (ii = 0; ii < NUM_BACKENDS; ii++)
{
@@ -628,7 +632,8 @@ select_load_balancing_node(void)
}
else if (lowest_delay > BACKEND_INFO(i).standby_delay)
{
- int ii;
+ int ii;
+
lowest_delay = BACKEND_INFO(i).standby_delay;
for (ii = 0; ii < NUM_BACKENDS; ii++)
{
@@ -679,13 +684,13 @@ static void
initialize_prng(pg_prng_state *state)
{
static bool prng_seed_set = false;
- uint64 seed;
+ uint64 seed;
if (unlikely(!prng_seed_set))
{
/* initialize prng */
if (!pg_strong_random(&seed, sizeof(seed)))
- seed = UINT64CONST(1); /* Pick a value, as long as it spreads */
+ seed = UINT64CONST(1); /* Pick a value, as long as it spreads */
pg_prng_seed(state, seed);
prng_seed_set = true;
}
@@ -702,17 +707,17 @@ initialize_prng(pg_prng_state *state)
*
*/
PGVersion *
-Pgversion(POOL_CONNECTION_POOL * backend)
+Pgversion(POOL_CONNECTION_POOL *backend)
{
#define VERSION_BUF_SIZE 10
- static PGVersion pgversion;
- static POOL_RELCACHE *relcache;
- char *result;
- char *p;
- char buf[VERSION_BUF_SIZE];
- int i;
- int major;
- int minor;
+ static PGVersion pgversion;
+ static POOL_RELCACHE *relcache;
+ char *result;
+ char *p;
+ char buf[VERSION_BUF_SIZE];
+ int i;
+ int major;
+ int minor;
/*
* First, check local cache. If cache is set, just return it.
@@ -743,7 +748,7 @@ Pgversion(POOL_CONNECTION_POOL * backend)
/*
* Search relcache.
*/
- result = (char *)pool_search_relcache(relcache, backend, "version");
+ result = (char *) pool_search_relcache(relcache, backend, "version");
if (result == 0)
{
ereport(FATAL,
@@ -801,7 +806,7 @@ Pgversion(POOL_CONNECTION_POOL * backend)
{
p++;
i = 0;
- while (i < VERSION_BUF_SIZE -1 && p && *p != '.' && *p != ' ')
+ while (i < VERSION_BUF_SIZE - 1 && p && *p != '.' && *p != ' ')
{
buf[i++] = *p++;
}
@@ -817,7 +822,7 @@ Pgversion(POOL_CONNECTION_POOL * backend)
*/
p++;
i = 0;
- while (i < VERSION_BUF_SIZE -1 && p && *p != '.' && *p != ' ')
+ while (i < VERSION_BUF_SIZE - 1 && p && *p != '.' && *p != ' ')
{
buf[i++] = *p++;
}
@@ -879,6 +884,7 @@ choose_db_node_id(char *str)
}
return node_id;
}
+
/*
*---------------------------------------------------------------------------------
* Snapshot Isolation modules
@@ -1003,7 +1009,7 @@ void
si_snapshot_acquired(void)
{
POOL_SESSION_CONTEXT *session;
- int i;
+ int i;
session = pool_get_session_context(true);
@@ -1018,9 +1024,10 @@ si_snapshot_acquired(void)
if (si_manage_info->snapshot_counter == 0)
{
/* wakeup all waiting children */
- for (i = 0; i < pool_config->num_init_children ; i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
{
- pid_t pid = si_manage_info->snapshot_waiting_children[i];
+ pid_t pid = si_manage_info->snapshot_waiting_children[i];
+
if (pid > 0)
{
elog(SI_DEBUG_LOG_LEVEL, "si_snapshot_acquired: send SIGUSR2 to %d", pid);
@@ -1076,7 +1083,7 @@ void
si_commit_done(void)
{
POOL_SESSION_CONTEXT *session;
- int i;
+ int i;
session = pool_get_session_context(true);
@@ -1092,9 +1099,10 @@ si_commit_done(void)
if (si_manage_info->commit_counter == 0)
{
/* wakeup all waiting children */
- for (i = 0; i < pool_config->num_init_children ; i++)
+ for (i = 0; i < pool_config->num_init_children; i++)
{
- pid_t pid = si_manage_info->commit_waiting_children[i];
+ pid_t pid = si_manage_info->commit_waiting_children[i];
+
if (pid > 0)
{
elog(SI_DEBUG_LOG_LEVEL, "si_commit_done: send SIGUSR2 to %d", pid);
@@ -1117,7 +1125,8 @@ si_commit_done(void)
* -1: delay exceeds delay_threshold_by_time
* -2: delay exceeds delay_threshold
*/
-int check_replication_delay(int node_id)
+int
+check_replication_delay(int node_id)
{
BackendInfo *bkinfo;
@@ -1132,7 +1141,7 @@ int check_replication_delay(int node_id)
* to multiply delay_threshold_by_time by 1000 to normalize.
*/
if (pool_config->delay_threshold_by_time > 0 &&
- bkinfo->standby_delay > pool_config->delay_threshold_by_time*1000)
+ bkinfo->standby_delay > pool_config->delay_threshold_by_time * 1000)
return -1;
/*
@@ -1144,4 +1153,3 @@ int check_replication_delay(int node_id)
return 0;
}
-
diff --git a/src/protocol/pool_process_query.c b/src/protocol/pool_process_query.c
index 5a6b97ba1..b69cb3d52 100644
--- a/src/protocol/pool_process_query.c
+++ b/src/protocol/pool_process_query.c
@@ -80,28 +80,28 @@
#define IDLE_IN_TRANSACTION_SESSION_TIMEOUT_ERROR_CODE "25P03"
#define IDLE_SESSION_TIMEOUT_ERROR_CODE "57P05"
-static int reset_backend(POOL_CONNECTION_POOL * backend, int qcnt);
+static int reset_backend(POOL_CONNECTION_POOL *backend, int qcnt);
static char *get_insert_command_table_name(InsertStmt *node);
-static bool is_cache_empty(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+static bool is_cache_empty(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
static bool is_panic_or_fatal_error(char *message, int major);
-static int extract_message(POOL_CONNECTION * backend, char *error_code, int major, char class, bool unread);
-static int detect_postmaster_down_error(POOL_CONNECTION * backend, int major);
+static int extract_message(POOL_CONNECTION *backend, char *error_code, int major, char class, bool unread);
+static int detect_postmaster_down_error(POOL_CONNECTION *backend, int major);
static bool is_internal_transaction_needed(Node *node);
static bool pool_has_insert_lock(void);
-static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table);
-static bool has_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table, bool for_update);
-static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table);
-static POOL_STATUS read_packets_and_process(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int reset_request, int *state, short *num_fields, bool *cont);
+static POOL_STATUS add_lock_target(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *table);
+static bool has_lock_target(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *table, bool for_update);
+static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *table);
+static POOL_STATUS read_packets_and_process(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int reset_request, int *state, short *num_fields, bool *cont);
static bool is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int main_node);
-static bool pool_process_notice_message_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int backend_idx, char kind);
+static bool pool_process_notice_message_from_one_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int backend_idx, char kind);
/*
* Main module for query processing
* reset_request: if non 0, call reset_backend to execute reset queries
*/
POOL_STATUS
-pool_process_query(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+pool_process_query(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int reset_request)
{
short num_fields = 0; /* the number of fields in a row (V2 protocol) */
@@ -135,8 +135,8 @@ pool_process_query(POOL_CONNECTION * frontend,
}
/*
- * Reset error flag while processing reset queries.
- * The flag is set to on inside pool_send_and_wait().
+ * Reset error flag while processing reset queries. The flag is set to on
+ * inside pool_send_and_wait().
*/
reset_query_error = false;
@@ -306,8 +306,7 @@ pool_process_query(POOL_CONNECTION * frontend,
else
{
/*
- * If we have pending data in main, we need to process
- * it
+ * If we have pending data in main, we need to process it
*/
if (pool_ssl_pending(MAIN(backend)) ||
!pool_read_buffer_is_empty(MAIN(backend)))
@@ -327,8 +326,8 @@ pool_process_query(POOL_CONNECTION * frontend,
!pool_read_buffer_is_empty(CONNECTION(backend, i)))
{
/*
- * If we have pending data in main, we need
- * to process it
+ * If we have pending data in main, we need to
+ * process it
*/
if (IS_MAIN_NODE_ID(i))
{
@@ -344,9 +343,8 @@ pool_process_query(POOL_CONNECTION * frontend,
char *string;
/*
- * If main does not have pending data,
- * we discard one packet from other
- * backend
+ * If main does not have pending data, we
+ * discard one packet from other backend
*/
pool_read_with_error(CONNECTION(backend, i), &kind, sizeof(kind),
"reading message kind from backend");
@@ -358,22 +356,22 @@ pool_process_query(POOL_CONNECTION * frontend,
int sendlen;
/*
- * In native replication mode we may
- * send the query to the standby
- * node and the NOTIFY comes back
- * only from primary node. But
- * since we have sent the query to
- * the standby, so the current
- * MAIN_NODE_ID will be pointing
- * to the standby node. And we
- * will get stuck if we keep
- * waiting for the current main
- * node (standby) in this case to
- * send us the NOTIFY message. see
- * "0000116: LISTEN Notifications
- * Not Reliably Delivered Using
- * JDBC4 Demonstrator" for the
- * scenario
+ * In native replication mode we
+ * may send the query to the
+ * standby node and the NOTIFY
+ * comes back only from primary
+ * node. But since we have sent
+ * the query to the standby, so
+ * the current MAIN_NODE_ID will
+ * be pointing to the standby
+ * node. And we will get stuck if
+ * we keep waiting for the current
+ * main node (standby) in this
+ * case to send us the NOTIFY
+ * message. see "0000116: LISTEN
+ * Notifications Not Reliably
+ * Delivered Using JDBC4
+ * Demonstrator" for the scenario
*/
pool_read_with_error(CONNECTION(backend, i), &len, sizeof(len),
"reading message length from backend");
@@ -396,11 +394,11 @@ pool_process_query(POOL_CONNECTION * frontend,
* sent to all backends. However
* the order of arrival of
* 'Notification response' is not
- * necessarily the main first
- * and then standbys. So if it
- * arrives standby first, we should
- * try to read from main, rather
- * than just discard it.
+ * necessarily the main first and
+ * then standbys. So if it arrives
+ * standby first, we should try to
+ * read from main, rather than
+ * just discard it.
*/
pool_unread(CONNECTION(backend, i), &kind, sizeof(kind));
ereport(LOG,
@@ -473,6 +471,7 @@ pool_process_query(POOL_CONNECTION * frontend,
if (pool_config->memory_cache_enabled)
{
volatile bool invalidate_request = Req_info->query_cache_invalidate_request;
+
if (invalidate_request)
{
/*
@@ -489,7 +488,7 @@ pool_process_query(POOL_CONNECTION * frontend,
* send simple query message to a node.
*/
void
-send_simplequery_message(POOL_CONNECTION * backend, int len, char *string, int major)
+send_simplequery_message(POOL_CONNECTION *backend, int len, char *string, int major)
{
/* forward the query to the backend */
pool_write(backend, "Q", 1);
@@ -512,7 +511,7 @@ send_simplequery_message(POOL_CONNECTION * backend, int len, char *string, int m
*/
void
-wait_for_query_response_with_trans_cleanup(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+wait_for_query_response_with_trans_cleanup(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
int protoVersion, int pid, char *key, int keylen)
{
PG_TRY();
@@ -545,7 +544,7 @@ wait_for_query_response_with_trans_cleanup(POOL_CONNECTION * frontend, POOL_CONN
* response.
*/
POOL_STATUS
-wait_for_query_response(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, int protoVersion)
+wait_for_query_response(POOL_CONNECTION *frontend, POOL_CONNECTION *backend, int protoVersion)
{
#define DUMMY_PARAMETER "pgpool_dummy_param"
#define DUMMY_VALUE "pgpool_dummy_value"
@@ -632,7 +631,7 @@ wait_for_query_response(POOL_CONNECTION * frontend, POOL_CONNECTION * backend, i
* Extended query protocol has to send Flush message.
*/
POOL_STATUS
-send_extended_protocol_message(POOL_CONNECTION_POOL * backend,
+send_extended_protocol_message(POOL_CONNECTION_POOL *backend,
int node_id, char *kind,
int len, char *string)
{
@@ -665,7 +664,7 @@ send_extended_protocol_message(POOL_CONNECTION_POOL * backend,
* wait until read data is ready
*/
int
-synchronize(POOL_CONNECTION * cp)
+synchronize(POOL_CONNECTION *cp)
{
return pool_check_fd(cp);
}
@@ -678,7 +677,7 @@ synchronize(POOL_CONNECTION * cp)
* valid backends might be changed by failover/failback.
*/
void
-pool_send_frontend_exits(POOL_CONNECTION_POOL * backend)
+pool_send_frontend_exits(POOL_CONNECTION_POOL *backend)
{
int len;
int i;
@@ -720,8 +719,8 @@ pool_send_frontend_exits(POOL_CONNECTION_POOL * backend)
*/
POOL_STATUS
-SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+SimpleForwardToFrontend(char kind, POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
int len,
len1 = 0;
@@ -786,13 +785,13 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend,
/*
* Optimization for other than "Command Complete", "Ready For query",
- * "Error response" ,"Notice message", "Notification response",
- * "Row description", "No data" and "Close Complete".
- * messages. Especially, since it is too often to receive and forward
- * "Data Row" message, we do not flush the message to frontend now. We
- * expect that "Command Complete" message (or "Error response" or "Notice
- * response" message) follows the stream of data row message anyway, so
- * flushing will be done at that time.
+ * "Error response" ,"Notice message", "Notification response", "Row
+ * description", "No data" and "Close Complete". messages. Especially,
+ * since it is too often to receive and forward "Data Row" message, we do
+ * not flush the message to frontend now. We expect that "Command
+ * Complete" message (or "Error response" or "Notice response" message)
+ * follows the stream of data row message anyway, so flushing will be done
+ * at that time.
*
* Same thing can be said to CopyData message. Tremendous number of
* CopyData messages are sent to frontend (typical use case is pg_dump).
@@ -851,8 +850,8 @@ SimpleForwardToFrontend(char kind, POOL_CONNECTION * frontend,
}
POOL_STATUS
-SimpleForwardToBackend(char kind, POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+SimpleForwardToBackend(char kind, POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
int sendlen;
@@ -912,7 +911,7 @@ SimpleForwardToBackend(char kind, POOL_CONNECTION * frontend,
* Handle parameter status message
*/
POOL_STATUS
-ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+ParameterStatus(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int len,
len1 = 0;
@@ -922,7 +921,8 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
char *name;
char *value;
POOL_STATUS status;
- char *parambuf = NULL; /* pointer to parameter + value string buffer */
+ char *parambuf = NULL; /* pointer to parameter + value string
+ * buffer */
int i;
pool_write(frontend, "S", 1);
@@ -959,15 +959,16 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
if (IS_MAIN_NODE_ID(i))
{
- int pos;
+ int pos;
len1 = len;
+
/*
* To suppress Coverity false positive warning. Actually
* being IS_MAIN_NODE_ID(i)) true only happens in a loop. So
* we don't need to worry about to leak memory previously
- * allocated in parambuf. But Coverity is not smart enough
- * to realize it.
+ * allocated in parambuf. But Coverity is not smart enough to
+ * realize it.
*/
if (parambuf)
pfree(parambuf);
@@ -984,7 +985,8 @@ ParameterStatus(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
else
{
/*
- * Except "in_hot_standby" parameter, complain the message length difference.
+ * Except "in_hot_standby" parameter, complain the message
+ * length difference.
*/
if (strcmp(name, "in_hot_standby"))
{
@@ -1037,7 +1039,7 @@ reset_connection(void)
* 0: no query was issued 1: a query was issued 2: no more queries remain -1: error
*/
static int
-reset_backend(POOL_CONNECTION_POOL * backend, int qcnt)
+reset_backend(POOL_CONNECTION_POOL *backend, int qcnt)
{
char *query;
int qn;
@@ -1129,7 +1131,7 @@ reset_backend(POOL_CONNECTION_POOL * backend, int qcnt)
bool
is_select_query(Node *node, char *sql)
{
- bool prepare = false;
+ bool prepare = false;
if (node == NULL)
return false;
@@ -1154,6 +1156,7 @@ is_select_query(Node *node, char *sql)
if (IsA(node, PrepareStmt))
{
PrepareStmt *prepare_statement = (PrepareStmt *) node;
+
prepare = true;
node = (Node *) (prepare_statement->query);
}
@@ -1183,15 +1186,15 @@ is_select_query(Node *node, char *sql)
}
/*
- * If SQL comment is not allowed, the query must start with certain characters.
- * However if it's PREPARE, we should skip the check.
+ * If SQL comment is not allowed, the query must start with certain
+ * characters. However if it's PREPARE, we should skip the check.
*/
if (!pool_config->allow_sql_comments)
/* '\0' and ';' signify empty query */
return (*sql == 's' || *sql == 'S' || *sql == '(' ||
*sql == 'w' || *sql == 'W' || *sql == 't' || *sql == 'T' ||
*sql == '\0' || *sql == ';' ||
- prepare);
+ prepare);
else
return true;
}
@@ -1325,7 +1328,7 @@ is_rollback_to_query(Node *node)
* send error message to frontend
*/
void
-pool_send_error_message(POOL_CONNECTION * frontend, int protoMajor,
+pool_send_error_message(POOL_CONNECTION *frontend, int protoMajor,
char *code,
char *message,
char *detail,
@@ -1340,7 +1343,7 @@ pool_send_error_message(POOL_CONNECTION * frontend, int protoMajor,
* send fatal message to frontend
*/
void
-pool_send_fatal_message(POOL_CONNECTION * frontend, int protoMajor,
+pool_send_fatal_message(POOL_CONNECTION *frontend, int protoMajor,
char *code,
char *message,
char *detail,
@@ -1355,7 +1358,7 @@ pool_send_fatal_message(POOL_CONNECTION * frontend, int protoMajor,
* send severity message to frontend
*/
void
-pool_send_severity_message(POOL_CONNECTION * frontend, int protoMajor,
+pool_send_severity_message(POOL_CONNECTION *frontend, int protoMajor,
char *code,
char *message,
char *detail,
@@ -1460,7 +1463,7 @@ pool_send_severity_message(POOL_CONNECTION * frontend, int protoMajor,
}
void
-pool_send_readyforquery(POOL_CONNECTION * frontend)
+pool_send_readyforquery(POOL_CONNECTION *frontend)
{
int len;
@@ -1481,7 +1484,7 @@ pool_send_readyforquery(POOL_CONNECTION * frontend)
* length for ReadyForQuery. This mode is necessary when called from ReadyForQuery().
*/
POOL_STATUS
-do_command(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
+do_command(POOL_CONNECTION *frontend, POOL_CONNECTION *backend,
char *query, int protoMajor, int pid, char *key, int keylen, int no_ready_for_query)
{
int len;
@@ -1496,8 +1499,8 @@ do_command(POOL_CONNECTION * frontend, POOL_CONNECTION * backend,
send_simplequery_message(backend, strlen(query) + 1, query, protoMajor);
/*
- * Wait for response from backend while polling frontend connection is
- * ok. If not, cancel the transaction.
+ * Wait for response from backend while polling frontend connection is ok.
+ * If not, cancel the transaction.
*/
wait_for_query_response_with_trans_cleanup(frontend,
backend,
@@ -1696,7 +1699,7 @@ retry_read_packet:
* If SELECT is error, we must abort transaction on other nodes.
*/
void
-do_error_command(POOL_CONNECTION * backend, int major)
+do_error_command(POOL_CONNECTION *backend, int major)
{
char *error_query = POOL_ERROR_QUERY;
int len;
@@ -1753,7 +1756,7 @@ do_error_command(POOL_CONNECTION * backend, int major)
* than main node to ket them go into abort status.
*/
void
-do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major)
+do_error_execute_command(POOL_CONNECTION_POOL *backend, int node_id, int major)
{
char kind;
char *string;
@@ -1852,7 +1855,7 @@ do_error_execute_command(POOL_CONNECTION_POOL * backend, int node_id, int major)
* Free POOL_SELECT_RESULT object
*/
void
-free_select_result(POOL_SELECT_RESULT * result)
+free_select_result(POOL_SELECT_RESULT *result)
{
int i,
j;
@@ -1905,7 +1908,7 @@ free_select_result(POOL_SELECT_RESULT * result)
* to void. and now ereport is thrown in case of error occurred within the function
*/
void
-do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, int major)
+do_query(POOL_CONNECTION *backend, char *query, POOL_SELECT_RESULT **result, int major)
{
#define DO_QUERY_ALLOC_NUM 1024 /* memory allocation unit for
* POOL_SELECT_RESULT */
@@ -1970,10 +1973,9 @@ do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, i
/*
* Send a query to the backend. We use extended query protocol with named
- * statement/portal if we are processing extended query since simple
- * query breaks unnamed statements/portals. The name of named
- * statement/unnamed statement are "pgpool_PID" where PID is the process id
- * of itself.
+ * statement/portal if we are processing extended query since simple query
+ * breaks unnamed statements/portals. The name of named statement/unnamed
+ * statement are "pgpool_PID" where PID is the process id of itself.
*/
if (pool_get_session_context(true) && pool_is_doing_extended_query_message())
{
@@ -2140,7 +2142,8 @@ do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, i
if (pool_extract_error_message(false, backend, major, true, &message) == 1)
{
- int etype;
+ int etype;
+
/*
* This is fatal. Because: If we operate extended query,
* backend would not accept subsequent commands until "sync"
@@ -2150,9 +2153,8 @@ do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, i
* transaction is aborted, and subsequent query would not
* accepted. In summary there's no transparent way for
* frontend to handle error case. The only way is closing this
- * session.
- * However if the process type is main process, we should not
- * exit the process.
+ * session. However if the process type is main process, we
+ * should not exit the process.
*/
if (processType == PT_WORKER)
{
@@ -2499,7 +2501,7 @@ do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, i
* 3: row lock against insert_lock table is required
*/
int
-need_insert_lock(POOL_CONNECTION_POOL * backend, char *query, Node *node)
+need_insert_lock(POOL_CONNECTION_POOL *backend, char *query, Node *node)
{
/*
* Query to know if the target table has SERIAL column or not.
@@ -2519,7 +2521,7 @@ need_insert_lock(POOL_CONNECTION_POOL * backend, char *query, Node *node)
char *table;
int result;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
/* INSERT statement? */
if (!IsA(node, InsertStmt))
@@ -2619,7 +2621,7 @@ need_insert_lock(POOL_CONNECTION_POOL * backend, char *query, Node *node)
* [ADMIN] 'SGT DETAIL: Could not open file "pg_clog/05DC": ...
*/
POOL_STATUS
-insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *query, InsertStmt *node, int lock_kind)
+insert_lock(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *query, InsertStmt *node, int lock_kind)
{
char *table;
int len = 0;
@@ -2631,7 +2633,7 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu
regex_t preg;
size_t nmatch = 2;
regmatch_t pmatch[nmatch];
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_SELECT_RESULT *result;
POOL_STATUS status = POOL_CONTINUE;
@@ -2827,7 +2829,7 @@ insert_lock(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *qu
else
{
status = do_command(frontend, MAIN(backend), qbuf, MAJOR(backend), MAIN_CONNECTION(backend)->pid,
- MAIN_CONNECTION(backend)->key, MAIN_CONNECTION(backend)->keylen ,0);
+ MAIN_CONNECTION(backend)->key, MAIN_CONNECTION(backend)->keylen, 0);
}
}
}
@@ -2895,7 +2897,7 @@ pool_has_insert_lock(void)
*/
bool result;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
backend = pool_get_session_context(false)->backend;
@@ -2923,7 +2925,8 @@ pool_has_insert_lock(void)
* Return POOL_CONTINUE if the row is inserted successfully
* or the row already exists, the others return POOL_ERROR.
*/
-static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *table)
+static POOL_STATUS
+add_lock_target(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *table)
{
/*
* lock the row where reloid is 0 to avoid "duplicate key violates..."
@@ -2984,8 +2987,8 @@ static POOL_STATUS add_lock_target(POOL_CONNECTION * frontend, POOL_CONNECTION_P
* If lock is true, this function locks the row of the table oid.
*/
static bool
-has_lock_target(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+has_lock_target(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
char *table, bool lock)
{
char *suffix;
@@ -3025,9 +3028,10 @@ has_lock_target(POOL_CONNECTION * frontend,
/*
* Insert the oid of the specified table into insert_lock table.
*/
-static POOL_STATUS insert_oid_into_insert_lock(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- char *table)
+static POOL_STATUS
+insert_oid_into_insert_lock(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ char *table)
{
char qbuf[QUERY_STRING_BUFFER_LEN];
POOL_STATUS status;
@@ -3093,7 +3097,7 @@ is_drop_database(Node *node)
* check if any pending data remains in backend.
*/
bool
-is_backend_cache_empty(POOL_CONNECTION_POOL * backend)
+is_backend_cache_empty(POOL_CONNECTION_POOL *backend)
{
int i;
@@ -3120,14 +3124,14 @@ is_backend_cache_empty(POOL_CONNECTION_POOL * backend)
* check if any pending data remains.
*/
static bool
-is_cache_empty(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+is_cache_empty(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
/* Are we suspending reading from frontend? */
if (!pool_is_suspend_reading_from_frontend())
{
/*
- * If SSL is enabled, we need to check SSL internal buffer is empty or not
- * first.
+ * If SSL is enabled, we need to check SSL internal buffer is empty or
+ * not first.
*/
if (pool_ssl_pending(frontend))
return false;
@@ -3228,7 +3232,7 @@ check_copy_from_stdin(Node *node)
* read kind from one backend
*/
void
-read_kind_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *kind, int node)
+read_kind_from_one_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *kind, int node)
{
if (VALID_BACKEND(node))
{
@@ -3275,7 +3279,7 @@ is_all_standbys_command_complete(unsigned char *kind_list, int num_backends, int
* this function uses "decide by majority" method if kinds from all backends do not agree.
*/
void
-read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, char *decided_kind)
+read_kind_from_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, char *decided_kind)
{
int i;
unsigned char kind_list[MAX_NUM_BACKENDS]; /* records each backend's kind */
@@ -3378,9 +3382,9 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
read_kind_from_one_backend(frontend, backend, (char *) &kind, MAIN_NODE_ID);
/*
- * If we received a notification message in native replication mode, other
- * backends will not receive the message. So we should skip other
- * nodes otherwise we will hang in pool_read.
+ * If we received a notification message in native replication mode,
+ * other backends will not receive the message. So we should skip
+ * other nodes otherwise we will hang in pool_read.
*/
if (kind == 'A')
{
@@ -3454,7 +3458,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
*/
else if (kind == 'S')
{
- int len2;
+ int len2;
pool_read(CONNECTION(backend, i), &len, sizeof(len));
len2 = len;
@@ -3469,7 +3473,8 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
if (IS_MAIN_NODE_ID(i))
{
- int pos;
+ int pos;
+
pool_add_param(&CONNECTION(backend, i)->params, p, value);
if (!strcmp("application_name", p))
@@ -3525,16 +3530,16 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
*/
for (i = 0; i < NUM_BACKENDS; i++)
{
- int unread_len;
- char *unread_p;
- char *p;
- int len;
+ int unread_len;
+ char *unread_p;
+ char *p;
+ int len;
if (VALID_BACKEND(i))
{
if (kind_list[i] == 'E')
{
- int major = MAJOR(CONNECTION(backend, i));
+ int major = MAJOR(CONNECTION(backend, i));
if (major == PROTO_MAJOR_V3)
{
@@ -3618,8 +3623,8 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
* cases it is possible that similar issue could happen since returned
* messages do not follow the sequence recorded in the pending
* messages because the backend ignores requests till sync message is
- * received. In this case we need to re-sync either primary or standby.
- * So we check not only the standby but primary node.
+ * received. In this case we need to re-sync either primary or
+ * standby. So we check not only the standby but primary node.
*/
if (session_context->load_balance_node_id != MAIN_NODE_ID &&
(kind_list[MAIN_NODE_ID] == 'Z' ||
@@ -3697,8 +3702,8 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
/*
* In main/replica mode, if primary gets an error at commit, while
- * other standbys are normal at commit, we don't need to degenerate any
- * backend because it is likely that the error was caused by a
+ * other standbys are normal at commit, we don't need to degenerate
+ * any backend because it is likely that the error was caused by a
* deferred trigger.
*/
else if (MAIN_REPLICA && query_context->parse_tree &&
@@ -3712,7 +3717,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
errdetail("do not degenerate because it is likely caused by a delayed commit")));
if (SL_MODE && pool_is_doing_extended_query_message() && msg)
- pool_pending_message_free_pending_message(msg);
+ pool_pending_message_free_pending_message(msg);
return;
}
else if (max_count <= NUM_BACKENDS / 2.0)
@@ -3755,7 +3760,7 @@ read_kind_from_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backen
if (degenerate_node_num)
{
int retcode = 2;
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendStringInfoString(&msg, "kind mismatch among backends. ");
@@ -3948,7 +3953,7 @@ parse_copy_data(char *buf, int len, char delimiter, int col_id)
}
void
-query_ps_status(char *query, POOL_CONNECTION_POOL * backend)
+query_ps_status(char *query, POOL_CONNECTION_POOL *backend)
{
StartupPacket *sp;
char psbuf[1024];
@@ -4128,7 +4133,7 @@ is_internal_transaction_needed(Node *node)
* Start an internal transaction if necessary.
*/
POOL_STATUS
-start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node)
+start_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, Node *node)
{
int i;
@@ -4172,7 +4177,7 @@ start_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * ba
* that satisfy VALID_BACKEND macro.
*/
POOL_STATUS
-end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+end_internal_transaction(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
int i;
int len;
@@ -4253,10 +4258,9 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back
if (MAJOR(backend) == PROTO_MAJOR_V3 && VALID_BACKEND(MAIN_NODE_ID))
{
/*
- * Skip rest of Ready for Query packet for backends
- * satisfying VALID_BACKEND macro because they should have
- * been already received the data, which is not good for
- * do_command().
+ * Skip rest of Ready for Query packet for backends satisfying
+ * VALID_BACKEND macro because they should have been already
+ * received the data, which is not good for do_command().
*/
pool_read(CONNECTION(backend, MAIN_NODE_ID), &len, sizeof(len));
pool_read(CONNECTION(backend, MAIN_NODE_ID), &tstate, sizeof(tstate));
@@ -4291,8 +4295,8 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back
if (MAJOR(backend) == PROTO_MAJOR_V3 && !VALID_BACKEND(MAIN_NODE_ID))
{
/*
- * Skip rest of Ready for Query packet for the backend
- * that does not satisfy VALID_BACKEND.
+ * Skip rest of Ready for Query packet for the backend that
+ * does not satisfy VALID_BACKEND.
*/
pool_read(CONNECTION(backend, MAIN_NODE_ID), &len, sizeof(len));
pool_read(CONNECTION(backend, MAIN_NODE_ID), &tstate, sizeof(tstate));
@@ -4317,7 +4321,7 @@ end_internal_transaction(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * back
static bool
is_panic_or_fatal_error(char *message, int major)
{
- char *str;
+ char *str;
str = extract_error_kind(message, major);
@@ -4335,7 +4339,7 @@ is_panic_or_fatal_error(char *message, int major)
char *
extract_error_kind(char *message, int major)
{
- char *ret = "unknown";
+ char *ret = "unknown";
if (major == PROTO_MAJOR_V3)
{
@@ -4370,7 +4374,7 @@ extract_error_kind(char *message, int major)
}
static int
-detect_postmaster_down_error(POOL_CONNECTION * backend, int major)
+detect_postmaster_down_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, ADMIN_SHUTDOWN_ERROR_CODE, major, 'E', false);
@@ -4410,7 +4414,7 @@ detect_postmaster_down_error(POOL_CONNECTION * backend, int major)
}
int
-detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major)
+detect_active_sql_transaction_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, ACTIVE_SQL_TRANSACTION_ERROR_CODE, major, 'E', true);
@@ -4424,7 +4428,7 @@ detect_active_sql_transaction_error(POOL_CONNECTION * backend, int major)
}
int
-detect_deadlock_error(POOL_CONNECTION * backend, int major)
+detect_deadlock_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, DEADLOCK_ERROR_CODE, major, 'E', true);
@@ -4436,7 +4440,7 @@ detect_deadlock_error(POOL_CONNECTION * backend, int major)
}
int
-detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread)
+detect_serialization_error(POOL_CONNECTION *backend, int major, bool unread)
{
int r = extract_message(backend, SERIALIZATION_FAIL_ERROR_CODE, major, 'E', unread);
@@ -4448,7 +4452,7 @@ detect_serialization_error(POOL_CONNECTION * backend, int major, bool unread)
}
int
-detect_query_cancel_error(POOL_CONNECTION * backend, int major)
+detect_query_cancel_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, QUERY_CANCEL_ERROR_CODE, major, 'E', true);
@@ -4461,7 +4465,7 @@ detect_query_cancel_error(POOL_CONNECTION * backend, int major)
int
-detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION * backend, int major)
+detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, IDLE_IN_TRANSACTION_SESSION_TIMEOUT_ERROR_CODE, major, 'E', true);
@@ -4473,7 +4477,7 @@ detect_idle_in_transaction_session_timeout_error(POOL_CONNECTION * backend, int
}
int
-detect_idle_session_timeout_error(POOL_CONNECTION * backend, int major)
+detect_idle_session_timeout_error(POOL_CONNECTION *backend, int major)
{
int r = extract_message(backend, IDLE_SESSION_TIMEOUT_ERROR_CODE, major, 'E', true);
@@ -4490,13 +4494,13 @@ detect_idle_session_timeout_error(POOL_CONNECTION * backend, int major)
* throw an ereport for all other errors.
*/
static int
-extract_message(POOL_CONNECTION * backend, char *error_code, int major, char class, bool unread)
+extract_message(POOL_CONNECTION *backend, char *error_code, int major, char class, bool unread)
{
int is_error = 0;
char kind;
int len;
int nlen = 0;
- char *str = NULL;
+ char *str = NULL;
if (pool_read(backend, &kind, sizeof(kind)))
return -1;
@@ -4567,7 +4571,7 @@ extract_message(POOL_CONNECTION * backend, char *error_code, int major, char cla
*/
static bool
-pool_process_notice_message_from_one_backend(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int backend_idx, char kind)
+pool_process_notice_message_from_one_backend(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int backend_idx, char kind)
{
int major = MAJOR(backend);
POOL_CONNECTION *backend_conn = CONNECTION(backend, backend_idx);
@@ -4659,7 +4663,7 @@ pool_process_notice_message_from_one_backend(POOL_CONNECTION * frontend, POOL_CO
* -1: error
*/
int
-pool_extract_error_message(bool read_kind, POOL_CONNECTION * backend, int major, bool unread, char **message)
+pool_extract_error_message(bool read_kind, POOL_CONNECTION *backend, int major, bool unread, char **message)
{
char kind;
int ret = 1;
@@ -4764,7 +4768,7 @@ pool_extract_error_message(bool read_kind, POOL_CONNECTION * backend, int major,
* read message kind and rest of the packet then discard it
*/
POOL_STATUS
-pool_discard_packet(POOL_CONNECTION_POOL * cp)
+pool_discard_packet(POOL_CONNECTION_POOL *cp)
{
int i;
char kind;
@@ -4792,7 +4796,7 @@ pool_discard_packet(POOL_CONNECTION_POOL * cp)
* read message length and rest of the packet then discard it
*/
POOL_STATUS
-pool_discard_packet_contents(POOL_CONNECTION_POOL * cp)
+pool_discard_packet_contents(POOL_CONNECTION_POOL *cp)
{
int len,
i;
@@ -4834,7 +4838,8 @@ pool_discard_packet_contents(POOL_CONNECTION_POOL * cp)
/*
* Read packet from either frontend or backend and process it.
*/
-static POOL_STATUS read_packets_and_process(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int reset_request, int *state, short *num_fields, bool *cont)
+static POOL_STATUS
+read_packets_and_process(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int reset_request, int *state, short *num_fields, bool *cont)
{
fd_set readmask;
fd_set writemask;
@@ -4982,7 +4987,7 @@ SELECT_RETRY:
ereport(LOG,
(errmsg("error occurred while reading and processing packets"),
errdetail("FATAL ERROR: VALID_BACKEND returns non 0 but connection slot is empty. backend id:%d RAW_MODE:%d LOAD_BALANCE_STATUS:%d status:%d",
- i, RAW_MODE, LOAD_BALANCE_STATUS(i), BACKEND_INFO(i).backend_status)));
+ i, RAW_MODE, LOAD_BALANCE_STATUS (i), BACKEND_INFO(i).backend_status)));
was_error = 1;
break;
}
@@ -5006,7 +5011,8 @@ SELECT_RETRY:
}
/*
- * connection was terminated due to idle_in_transaction_session_timeout expired
+ * connection was terminated due to
+ * idle_in_transaction_session_timeout expired
*/
r = detect_idle_in_transaction_session_timeout_error(CONNECTION(backend, i), MAJOR(backend));
if (r == SPECIFIED_ERROR)
@@ -5017,7 +5023,8 @@ SELECT_RETRY:
}
/*
- * connection was terminated due to idle_session_timeout expired
+ * connection was terminated due to idle_session_timeout
+ * expired
*/
r = detect_idle_session_timeout_error(CONNECTION(backend, i), MAJOR(backend));
if (r == SPECIFIED_ERROR)
@@ -5167,7 +5174,7 @@ pool_dump_valid_backend(int backend_id)
* Returns true if data was actually pushed.
*/
bool
-pool_push_pending_data(POOL_CONNECTION * backend)
+pool_push_pending_data(POOL_CONNECTION *backend)
{
POOL_SESSION_CONTEXT *session_context;
int len;
@@ -5175,8 +5182,8 @@ pool_push_pending_data(POOL_CONNECTION * backend)
bool pending_data_existed = false;
static char random_statement[] = "pgpool_non_existent";
- int num_pending_messages;
- int num_pushed_messages;
+ int num_pending_messages;
+ int num_pushed_messages;
if (!pool_get_session_context(true) || !pool_is_doing_extended_query_message())
return false;
@@ -5194,7 +5201,8 @@ pool_push_pending_data(POOL_CONNECTION * backend)
* In streaming replication mode, send a Close message for none existing
* prepared statement and flush message before going any further to
* retrieve and save any pending response packet from backend. This
- * ensures that at least "close complete" message is returned from backend.
+ * ensures that at least "close complete" message is returned from
+ * backend.
*
* The saved packets will be popped up before returning to caller. This
* preserves the user's expectation of packet sequence.
@@ -5254,7 +5262,7 @@ pool_push_pending_data(POOL_CONNECTION * backend)
len = ntohl(len);
len -= sizeof(len);
buf = NULL;
- if (len > 0)
+ if (len > 0)
{
buf = palloc(len);
pool_read(backend, buf, len);
diff --git a/src/protocol/pool_proto2.c b/src/protocol/pool_proto2.c
index ffca996c3..bd09a4494 100644
--- a/src/protocol/pool_proto2.c
+++ b/src/protocol/pool_proto2.c
@@ -34,8 +34,8 @@
#include "utils/elog.h"
POOL_STATUS
-AsciiRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+AsciiRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields)
{
static char nullmap[8192],
@@ -161,8 +161,8 @@ AsciiRow(POOL_CONNECTION * frontend,
}
POOL_STATUS
-BinaryRow(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+BinaryRow(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short num_fields)
{
static char nullmap[8192],
@@ -273,8 +273,8 @@ BinaryRow(POOL_CONNECTION * frontend,
}
POOL_STATUS
-CompletedResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+CompletedResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
int i;
char *string = NULL;
@@ -339,8 +339,8 @@ CompletedResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-CursorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+CursorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char *string = NULL;
char *string1 = NULL;
@@ -387,8 +387,8 @@ CursorResponse(POOL_CONNECTION * frontend,
}
void
-EmptyQueryResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+EmptyQueryResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char c;
int i;
@@ -406,8 +406,8 @@ EmptyQueryResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-ErrorResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+ErrorResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char *string = "";
int len = 0;
@@ -448,8 +448,8 @@ ErrorResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-FunctionResultResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+FunctionResultResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char dummy;
int len;
@@ -517,8 +517,8 @@ FunctionResultResponse(POOL_CONNECTION * frontend,
}
void
-NoticeResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+NoticeResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char *string = NULL;
int len = 0;
@@ -550,8 +550,8 @@ NoticeResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-NotificationResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+NotificationResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
int pid,
pid1;
@@ -593,8 +593,8 @@ NotificationResponse(POOL_CONNECTION * frontend,
}
int
-RowDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+RowDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
short *result)
{
short num_fields,
diff --git a/src/protocol/pool_proto_modules.c b/src/protocol/pool_proto_modules.c
index c2802998a..6fad3353c 100644
--- a/src/protocol/pool_proto_modules.c
+++ b/src/protocol/pool_proto_modules.c
@@ -86,27 +86,27 @@ int is_select_for_update = 0; /* 1 if SELECT INTO or SELECT FOR
*/
char query_string_buffer[QUERY_STRING_BUFFER_LEN];
-static int check_errors(POOL_CONNECTION_POOL * backend, int backend_id);
+static int check_errors(POOL_CONNECTION_POOL *backend, int backend_id);
static void generate_error_message(char *prefix, int specific_error, char *query);
-static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- POOL_SENT_MESSAGE * message,
- POOL_SENT_MESSAGE * bind_message);
-static POOL_STATUS send_prepare(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- POOL_SENT_MESSAGE * message);
+static POOL_STATUS parse_before_bind(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ POOL_SENT_MESSAGE *message,
+ POOL_SENT_MESSAGE *bind_message);
+static POOL_STATUS send_prepare(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ POOL_SENT_MESSAGE *message);
static int *find_victim_nodes(int *ntuples, int nmembers, int main_node, int *number_of_nodes);
-static POOL_STATUS close_standby_transactions(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
+static POOL_STATUS close_standby_transactions(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
static char *flatten_set_variable_args(const char *name, List *args);
static bool
- process_pg_terminate_backend_func(POOL_QUERY_CONTEXT * query_context);
-static void pool_discard_except_sync_and_ready_for_query(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend);
-static void si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node *node, bool tstate_check);
+ process_pg_terminate_backend_func(POOL_QUERY_CONTEXT *query_context);
+static void pool_discard_except_sync_and_ready_for_query(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend);
+static void si_get_snapshot(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, Node *node, bool tstate_check);
-static bool check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend);
+static bool check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend);
static bool multi_statement_query(char *buf);
@@ -137,7 +137,7 @@ static POOL_QUERY_CONTEXT *create_dummy_query_context(void);
*
*/
static bool
-process_pg_terminate_backend_func(POOL_QUERY_CONTEXT * query_context)
+process_pg_terminate_backend_func(POOL_QUERY_CONTEXT *query_context)
{
/*
* locate pg_terminate_backend and get the pid argument, if
@@ -184,8 +184,8 @@ process_pg_terminate_backend_func(POOL_QUERY_CONTEXT * query_context)
* If frontend == NULL, we are called in case of reset queries.
*/
POOL_STATUS
-SimpleQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, int len, char *contents)
+SimpleQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, int len, char *contents)
{
static char *sq_config = "pool_status";
static char *sq_pools = "pool_pools";
@@ -249,7 +249,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
* Fetch memory cache if possible
*/
if (pool_config->memory_cache_enabled)
- is_likely_select = pool_is_likely_select(contents);
+ is_likely_select = pool_is_likely_select(contents);
/*
* If memory query cache enabled and the query seems to be a SELECT use
@@ -261,8 +261,8 @@ SimpleQuery(POOL_CONNECTION * frontend,
* transaction, but it will need parsing query and accessing to system
* catalog, which will add significant overhead. Moreover if we are in
* aborted transaction, commands should be ignored, so we should not use
+ * query cache. Also query cache is disabled, we should not fetch from
* query cache.
- * Also query cache is disabled, we should not fetch from query cache.
*/
if (pool_config->memory_cache_enabled && is_likely_select &&
!pool_is_writing_transaction() &&
@@ -308,6 +308,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
else
{
query_context->is_multi_statement = false;
+
/*
* Do not use minimal parser if we are in native replication or
* snapshot isolation mode.
@@ -355,8 +356,8 @@ SimpleQuery(POOL_CONNECTION * frontend,
* were an DELETE command. Note that the DELETE command does not
* execute, instead the original query will be sent to backends,
* which may or may not cause an actual syntax errors. The command
- * will be sent to all backends in replication mode or
- * primary in native replication mode.
+ * will be sent to all backends in replication mode or primary in
+ * native replication mode.
*/
if (!strcmp(remote_host, "[local]"))
{
@@ -438,7 +439,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
(errmsg("DB's oid to discard its cache directory: dboid = %d", query_context->dboid)));
}
}
-
+
/*
* check COPY FROM STDIN if true, set copy_* variable
*/
@@ -669,13 +670,13 @@ SimpleQuery(POOL_CONNECTION * frontend,
struct timeval stime;
stime.tv_usec = 0;
- stime.tv_sec = 5; /* XXX give arbitrary time to allow
- * closing idle connections */
+ stime.tv_sec = 5; /* XXX give arbitrary time to allow closing
+ * idle connections */
ereport(DEBUG1,
(errmsg("Query: sending SIGUSR1 signal to parent")));
- ignore_sigusr1 = 1; /* disable SIGUSR1 handler */
+ ignore_sigusr1 = 1; /* disable SIGUSR1 handler */
close_idle_connections();
/*
@@ -684,7 +685,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
*/
for (;;)
{
- int sts;
+ int sts;
errno = 0;
sts = select(0, NULL, NULL, NULL, &stime);
@@ -824,8 +825,9 @@ SimpleQuery(POOL_CONNECTION * frontend,
/*
* If the query is BEGIN READ WRITE or BEGIN ... SERIALIZABLE
* in streaming replication mode, we send BEGIN to standbys
- * instead. The original_query which is BEGIN READ WRITE is sent
- * to primary. The rewritten_query BEGIN is sent to standbys.
+ * instead. The original_query which is BEGIN READ WRITE is
+ * sent to primary. The rewritten_query BEGIN is sent to
+ * standbys.
*/
if (pool_need_to_treat_as_if_default_transaction(query_context))
{
@@ -898,8 +900,8 @@ SimpleQuery(POOL_CONNECTION * frontend,
}
/*
- * Send "COMMIT" or "ROLLBACK" to only main node if query is
- * "COMMIT" or "ROLLBACK"
+ * Send "COMMIT" or "ROLLBACK" to only main node if query is "COMMIT"
+ * or "ROLLBACK"
*/
if (commit)
{
@@ -930,7 +932,7 @@ SimpleQuery(POOL_CONNECTION * frontend,
* process EXECUTE (V3 only)
*/
POOL_STATUS
-Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+Execute(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
int commit = 0;
@@ -942,7 +944,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
POOL_SENT_MESSAGE *bind_msg;
bool foundp = false;
int num_rows;
- char *p;
+ char *p;
/* Get session context */
session_context = pool_get_session_context(false);
@@ -1002,7 +1004,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
elog(DEBUG1, "set partial_fetch in execute");
}
elog(DEBUG1, "execute: partial_fetch: %d", query_context->partial_fetch);
-
+
strlcpy(query_string_buffer, query, sizeof(query_string_buffer));
ereport(DEBUG2, (errmsg("Execute: query string = <%s>", query)));
@@ -1017,8 +1019,8 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
/*
* Fetch memory cache if possible. Also if atEnd is false or the execute
- * message has 0 row argument, we maybe able to use cache.
- * If partial_fetch is true, cannot use cache.
+ * message has 0 row argument, we maybe able to use cache. If
+ * partial_fetch is true, cannot use cache.
*/
if (pool_config->memory_cache_enabled && !pool_is_writing_transaction() &&
(TSTATE(backend, MAIN_REPLICA ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) != 'E')
@@ -1071,10 +1073,10 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
}
/*
- * If bind message is sent again to an existing prepared statement,
- * it is possible that query_w_hex remains. Before setting newly
- * allocated query_w_hex's pointer to the query context, free the
- * previously allocated memory.
+ * If bind message is sent again to an existing prepared
+ * statement, it is possible that query_w_hex remains. Before
+ * setting newly allocated query_w_hex's pointer to the query
+ * context, free the previously allocated memory.
*/
if (query_context->query_w_hex)
{
@@ -1203,8 +1205,8 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
}
/*
- * send "COMMIT" or "ROLLBACK" to only main node if query is
- * "COMMIT" or "ROLLBACK"
+ * send "COMMIT" or "ROLLBACK" to only main node if query is "COMMIT"
+ * or "ROLLBACK"
*/
if (commit)
{
@@ -1245,8 +1247,8 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
* Take care of "writing transaction" flag.
*/
if ((!is_select_query(node, query) || pool_has_function_call(node)) &&
- !is_start_transaction_query(node) &&
- !is_commit_or_rollback_query(node))
+ !is_start_transaction_query(node) &&
+ !is_commit_or_rollback_query(node))
{
ereport(DEBUG1,
(errmsg("Execute: TSTATE:%c",
@@ -1280,7 +1282,7 @@ Execute(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
* process Parse (V3 only)
*/
POOL_STATUS
-Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+Parse(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
int deadlock_detected = 0;
@@ -1316,7 +1318,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
/* parse SQL string */
MemoryContext old_context = MemoryContextSwitchTo(query_context->memory_context);
- parse_tree_list = raw_parser(stmt, RAW_PARSE_DEFAULT, strlen(stmt),&error,!REPLICATION);
+ parse_tree_list = raw_parser(stmt, RAW_PARSE_DEFAULT, strlen(stmt), &error, !REPLICATION);
if (parse_tree_list == NIL)
{
@@ -1338,8 +1340,8 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
* were an DELETE command. Note that the DELETE command does not
* execute, instead the original query will be sent to backends,
* which may or may not cause an actual syntax errors. The command
- * will be sent to all backends in replication mode or
- * primary in native replication mode.
+ * will be sent to all backends in replication mode or primary in
+ * native replication mode.
*/
if (!strcmp(remote_host, "[local]"))
{
@@ -1505,9 +1507,9 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
/*
* If the query is BEGIN READ WRITE in main replica mode, we send
- * BEGIN instead of it to standbys. original_query which is
- * BEGIN READ WRITE is sent to primary. rewritten_query which is BEGIN
- * is sent to standbys.
+ * BEGIN instead of it to standbys. original_query which is BEGIN READ
+ * WRITE is sent to primary. rewritten_query which is BEGIN is sent to
+ * standbys.
*/
if (is_start_transaction_query(query_context->parse_tree) &&
is_read_write((TransactionStmt *) query_context->parse_tree) &&
@@ -1518,7 +1520,8 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
}
/*
- * If not in streaming or logical replication mode, send "SYNC" message if not in a transaction.
+ * If not in streaming or logical replication mode, send "SYNC" message if
+ * not in a transaction.
*/
if (!SL_MODE)
{
@@ -1565,10 +1568,10 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
kind)));
}
else
- ereport(ERROR,
- (errmsg("unable to parse the query"),
- errdetail("invalid read kind \"%c\" returned from backend after Sync message sent",
- kind)));
+ ereport(ERROR,
+ (errmsg("unable to parse the query"),
+ errdetail("invalid read kind \"%c\" returned from backend after Sync message sent",
+ kind)));
}
/*
@@ -1687,7 +1690,7 @@ Parse(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
}
POOL_STATUS
-Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+Bind(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
char *pstmt_name;
@@ -1786,7 +1789,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
TSTATE(backend, MAIN_REPLICA ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID) == 'T')
{
pool_where_to_send(query_context, query_context->original_query,
- query_context->parse_tree);
+ query_context->parse_tree);
}
/*
@@ -1812,7 +1815,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
&oids);
/* Save to oid buffer */
for (i = 0; i < num_oids; i++)
- pool_add_dml_table_oid(oids[i]);
+ pool_add_dml_table_oid(oids[i]);
}
}
@@ -1883,7 +1886,7 @@ Bind(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
}
POOL_STATUS
-Describe(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+Describe(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
POOL_SENT_MESSAGE *msg;
@@ -1976,7 +1979,7 @@ Describe(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
POOL_STATUS
-Close(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+Close(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
POOL_SENT_MESSAGE *msg;
@@ -2012,10 +2015,9 @@ Close(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
errmsg("unable to execute close, invalid message")));
/*
- * For PostgreSQL, calling close on non existing portals or
- * statements is not an error. So on the same footings we will ignore all
- * such calls and return the close complete message to clients with out
- * going to backend
+ * For PostgreSQL, calling close on non existing portals or statements is
+ * not an error. So on the same footings we will ignore all such calls and
+ * return the close complete message to clients with out going to backend
*/
if (!msg)
{
@@ -2107,7 +2109,7 @@ Close(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
POOL_STATUS
-FunctionCall3(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+FunctionCall3(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
int len, char *contents)
{
/*
@@ -2138,8 +2140,8 @@ FunctionCall3(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
* - internal transaction is closed
*/
POOL_STATUS
-ReadyForQuery(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, bool send_ready, bool cache_commit)
+ReadyForQuery(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, bool send_ready, bool cache_commit)
{
int i;
int len;
@@ -2189,7 +2191,7 @@ ReadyForQuery(POOL_CONNECTION * frontend,
if (victim_nodes)
{
int i;
- StringInfoData msg;
+ StringInfoData msg;
initStringInfo(&msg);
appendStringInfoString(&msg, "ReadyForQuery: Degenerate backends:");
@@ -2280,7 +2282,7 @@ ReadyForQuery(POOL_CONNECTION * frontend,
/* if (pool_is_query_in_progress() && allow_close_transaction) */
if (REPLICATION && allow_close_transaction)
{
- bool internal_transaction_started = INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID);
+ bool internal_transaction_started = INTERNAL_TRANSACTION_STARTED(backend, MAIN_NODE_ID);
/*
* If we are running in snapshot isolation mode and started an
@@ -2332,10 +2334,11 @@ ReadyForQuery(POOL_CONNECTION * frontend,
TSTATE(backend, i) = kind;
ereport(DEBUG5,
(errmsg("processing ReadyForQuery"),
- errdetail("transaction state of node %d '%c'(%02x)", i, kind , kind)));
+ errdetail("transaction state of node %d '%c'(%02x)", i, kind, kind)));
/*
- * The transaction state to be returned to frontend is main node's.
+ * The transaction state to be returned to frontend is main
+ * node's.
*/
if (i == (MAIN_REPLICA ? PRIMARY_NODE_ID : REAL_MAIN_NODE_ID))
{
@@ -2475,8 +2478,9 @@ ReadyForQuery(POOL_CONNECTION * frontend,
/*
* Close running transactions on standbys.
*/
-static POOL_STATUS close_standby_transactions(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+static POOL_STATUS
+close_standby_transactions(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
int i;
@@ -2500,7 +2504,7 @@ static POOL_STATUS close_standby_transactions(POOL_CONNECTION * frontend,
}
POOL_STATUS
-ParseComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+ParseComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
POOL_SESSION_CONTEXT *session_context;
@@ -2524,7 +2528,7 @@ ParseComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
}
POOL_STATUS
-BindComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+BindComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
POOL_SESSION_CONTEXT *session_context;
@@ -2548,7 +2552,7 @@ BindComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
}
POOL_STATUS
-CloseComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+CloseComplete(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
POOL_SESSION_CONTEXT *session_context;
POOL_STATUS status;
@@ -2622,8 +2626,8 @@ CloseComplete(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
}
POOL_STATUS
-ParameterDescription(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+ParameterDescription(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
int len,
len1 = 0;
@@ -2706,8 +2710,8 @@ ParameterDescription(POOL_CONNECTION * frontend,
}
POOL_STATUS
-ErrorResponse3(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+ErrorResponse3(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
POOL_STATUS ret;
@@ -2722,8 +2726,8 @@ ErrorResponse3(POOL_CONNECTION * frontend,
}
POOL_STATUS
-FunctionCall(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+FunctionCall(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char dummy[2];
int oid;
@@ -2818,8 +2822,8 @@ FunctionCall(POOL_CONNECTION * frontend,
}
POOL_STATUS
-ProcessFrontendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+ProcessFrontendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
char fkind;
char *bufp = NULL;
@@ -3077,8 +3081,8 @@ ProcessFrontendResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-ProcessBackendResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+ProcessBackendResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
int *state, short *num_fields)
{
int status = POOL_CONTINUE;
@@ -3226,10 +3230,12 @@ ProcessBackendResponse(POOL_CONNECTION * frontend,
case 'E': /* ErrorResponse */
if (pool_is_doing_extended_query_message())
{
- char *message;
+ char *message;
- /* Log the error message which was possibly missed till
- * a sync message was sent */
+ /*
+ * Log the error message which was possibly missed till a
+ * sync message was sent
+ */
if (pool_extract_error_message(false, MAIN(backend), PROTO_MAJOR_V3,
true, &message) == 1)
{
@@ -3396,8 +3402,8 @@ ProcessBackendResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-CopyInResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+CopyInResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
POOL_STATUS status;
@@ -3415,8 +3421,8 @@ CopyInResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-CopyOutResponse(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+CopyOutResponse(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
POOL_STATUS status;
@@ -3434,8 +3440,8 @@ CopyOutResponse(POOL_CONNECTION * frontend,
}
POOL_STATUS
-CopyDataRows(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend, int copyin)
+CopyDataRows(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend, int copyin)
{
char *string = NULL;
int len;
@@ -3476,6 +3482,7 @@ CopyDataRows(POOL_CONNECTION * frontend,
copy_count++;
continue;
}
+
/*
* Flush (H) or Sync (S) messages should be ignored while in
* the COPY IN mode.
@@ -3607,7 +3614,7 @@ CopyDataRows(POOL_CONNECTION * frontend,
* transaction state.
*/
void
-raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend)
+raise_intentional_error_if_need(POOL_CONNECTION_POOL *backend)
{
int i;
POOL_SESSION_CONTEXT *session_context;
@@ -3688,7 +3695,7 @@ raise_intentional_error_if_need(POOL_CONNECTION_POOL * backend)
*---------------------------------------------------
*/
static int
-check_errors(POOL_CONNECTION_POOL * backend, int backend_id)
+check_errors(POOL_CONNECTION_POOL *backend, int backend_id)
{
/*
@@ -3760,7 +3767,7 @@ generate_error_message(char *prefix, int specific_error, char *query)
"received query cancel error message from main node. query: %s"
};
- StringInfoData msg;
+ StringInfoData msg;
session_context = pool_get_session_context(true);
if (!session_context)
@@ -3788,15 +3795,15 @@ generate_error_message(char *prefix, int specific_error, char *query)
* Make per DB node statement log
*/
void
-per_node_statement_log(POOL_CONNECTION_POOL * backend, int node_id, char *query)
+per_node_statement_log(POOL_CONNECTION_POOL *backend, int node_id, char *query)
{
- ProcessInfo *pi = pool_get_my_process_info();
+ ProcessInfo *pi = pool_get_my_process_info();
POOL_CONNECTION_POOL_SLOT *slot = backend->slots[node_id];
if (pool_config->log_per_node_statement)
ereport(LOG,
(errmsg("DB node id: %d backend pid: %d statement: %s", node_id, ntohl(slot->pid), query)));
-
+
pi_set(node_id);
StrNCpy(pi->statement, query, MAXSTMTLEN);
}
@@ -3807,7 +3814,7 @@ per_node_statement_log(POOL_CONNECTION_POOL * backend, int node_id, char *query)
void
init_pi_set(void)
{
- ProcessInfo *pi = pool_get_my_process_info();
+ ProcessInfo *pi = pool_get_my_process_info();
memset(pi->node_ids, 0, sizeof(pi->node_ids));
pi->statement[0] = '\0';
@@ -3819,7 +3826,7 @@ init_pi_set(void)
void
pi_set(int node_id)
{
- ProcessInfo *pi = pool_get_my_process_info();
+ ProcessInfo *pi = pool_get_my_process_info();
if (node_id < BITS_PER_TYPE(uint64))
pi->node_ids[0] |= (1 << node_id);
@@ -3833,7 +3840,7 @@ pi_set(int node_id)
bool
is_pi_set(uint64 *node_ids, int node_id)
{
- int set;
+ int set;
if (node_id < BITS_PER_TYPE(uint64))
set = node_ids[0] & (1 << node_id);
@@ -3846,7 +3853,7 @@ is_pi_set(uint64 *node_ids, int node_id)
* Make per DB node statement notice message
*/
void
-per_node_statement_notice(POOL_CONNECTION_POOL * backend, int node_id, char *query)
+per_node_statement_notice(POOL_CONNECTION_POOL *backend, int node_id, char *query)
{
if (pool_config->notice_per_node_statement)
ereport(NOTICE,
@@ -3856,56 +3863,57 @@ per_node_statement_notice(POOL_CONNECTION_POOL * backend, int node_id, char *que
/*
* Make backend message log when log_backend_messages is on.
*/
-void log_backend_messages(unsigned char kind, int backend_id)
+void
+log_backend_messages(unsigned char kind, int backend_id)
{
/*
* Map table for message kind and message label
*/
typedef struct
{
- unsigned char kind; /* message kind */
- char *label; /* message label */
- } BackendMessage;
-
+ unsigned char kind; /* message kind */
+ char *label; /* message label */
+ } BackendMessage;
+
static BackendMessage message_label[] =
- {
- {'1', "ParseComplete"},
- {'2', "BindComplete"},
- {'3', "CloseComplete"},
- {'A', "NotificationResponse"},
- {'C', "CommandComplete"},
- {'D', "DataRow"},
- {'E', "ErrorResponse"},
- {'G', "CopyInResponse"},
- {'H', "CopyOutResponse"},
- {'I', "EmptyQueryResponse"},
- {'K', "BackendKeyData"},
- {'N', "NoticeResponse"},
- {'R', "AuthenticationRequest"},
- {'S', "ParameterStatus"},
- {'T', "RowDescription"},
- {'V', "FunctionCallResponse"},
- {'W', "CopyBothResponse"},
- {'Z', "ReadyForQuery"},
- {'n', "NoData"},
- {'s', "PortalSuspended"},
- {'t', "ParameterDescription"},
- {'v', "NegotiateProtocolVersion"},
- {'c', "CopyDone"},
- {'d', "CopyData"},
- };
-
+ {
+ {'1', "ParseComplete"},
+ {'2', "BindComplete"},
+ {'3', "CloseComplete"},
+ {'A', "NotificationResponse"},
+ {'C', "CommandComplete"},
+ {'D', "DataRow"},
+ {'E', "ErrorResponse"},
+ {'G', "CopyInResponse"},
+ {'H', "CopyOutResponse"},
+ {'I', "EmptyQueryResponse"},
+ {'K', "BackendKeyData"},
+ {'N', "NoticeResponse"},
+ {'R', "AuthenticationRequest"},
+ {'S', "ParameterStatus"},
+ {'T', "RowDescription"},
+ {'V', "FunctionCallResponse"},
+ {'W', "CopyBothResponse"},
+ {'Z', "ReadyForQuery"},
+ {'n', "NoData"},
+ {'s', "PortalSuspended"},
+ {'t', "ParameterDescription"},
+ {'v', "NegotiateProtocolVersion"},
+ {'c', "CopyDone"},
+ {'d', "CopyData"},
+ };
+
/* store last kind for each backend */
static unsigned char kind_cache[MAX_NUM_BACKENDS];
/* number of repetitions of each kind */
- static int kind_count[MAX_NUM_BACKENDS];
+ static int kind_count[MAX_NUM_BACKENDS];
- int kind_num = sizeof(message_label)/sizeof(BackendMessage);
- char *label;
- static char *last_label;
- int i;
+ int kind_num = sizeof(message_label) / sizeof(BackendMessage);
+ char *label;
+ static char *last_label;
+ int i;
/* do nothing if log_backend_messages is disabled */
if (pool_config->log_backend_messages == BGMSG_NONE)
@@ -3944,7 +3952,7 @@ void log_backend_messages(unsigned char kind, int backend_id)
(errmsg("%s message from backend %d", label, backend_id)));
return;
}
-
+
/* just to make sure the setting is terse */
if (pool_config->log_backend_messages != BGMSG_TERSE)
{
@@ -3991,11 +3999,11 @@ void log_backend_messages(unsigned char kind, int backend_id)
* All data read in this function is returned to stream.
*/
char
-per_node_error_log(POOL_CONNECTION_POOL * backend, int node_id, char *query, char *prefix, bool unread)
+per_node_error_log(POOL_CONNECTION_POOL *backend, int node_id, char *query, char *prefix, bool unread)
{
POOL_CONNECTION_POOL_SLOT *slot = backend->slots[node_id];
char *message;
- char kind;
+ char kind;
pool_read(CONNECTION(backend, node_id), &kind, sizeof(kind));
pool_unread(CONNECTION(backend, node_id), &kind, sizeof(kind));
@@ -4020,10 +4028,11 @@ per_node_error_log(POOL_CONNECTION_POOL * backend, int node_id, char *query, cha
* message is not yet parsed on the primary/main node but parsed on other
* node. Caller must provide the parse message data as "message".
*/
-static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- POOL_SENT_MESSAGE * message,
- POOL_SENT_MESSAGE * bind_message)
+static POOL_STATUS
+parse_before_bind(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ POOL_SENT_MESSAGE *message,
+ POOL_SENT_MESSAGE *bind_message)
{
int i;
int len = message->len;
@@ -4061,8 +4070,8 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend,
* Before sending the parse message to the primary, we need to
* close the named statement. Otherwise we will get an error from
* backend if the named statement already exists. This could
- * happen if parse_before_bind is called with a bind message
- * using the same named statement. If the named statement does not
+ * happen if parse_before_bind is called with a bind message using
+ * the same named statement. If the named statement does not
* exist, it's fine. PostgreSQL just ignores a request trying to
* close a non-existing statement. If the statement is unnamed
* one, we do not need it because unnamed statement can be
@@ -4105,8 +4114,10 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend,
bind_message->query_context = new_qc;
#ifdef NOT_USED
+
/*
- * XXX pool_remove_sent_message() will pfree memory allocated by "contents".
+ * XXX pool_remove_sent_message() will pfree memory allocated by
+ * "contents".
*/
/* Remove old sent message */
@@ -4187,14 +4198,16 @@ static POOL_STATUS parse_before_bind(POOL_CONNECTION * frontend,
* node. Caller must provide the PREPARED message information as "message"
* argument.
*/
-static POOL_STATUS send_prepare(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
- POOL_SENT_MESSAGE * message)
+static POOL_STATUS
+send_prepare(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
+ POOL_SENT_MESSAGE *message)
{
int node_id;
bool backup[MAX_NUM_BACKENDS];
- POOL_QUERY_CONTEXT *qc, *new_qc;
- char qbuf[1024];
+ POOL_QUERY_CONTEXT *qc,
+ *new_qc;
+ char qbuf[1024];
POOL_SELECT_RESULT *res;
elog(DEBUG1, "send_prepare called");
@@ -4222,14 +4235,14 @@ static POOL_STATUS send_prepare(POOL_CONNECTION * frontend,
}
/*
- * we are in streaming replication mode and the PREPARE message has
- * not been sent to primary yet.
+ * we are in streaming replication mode and the PREPARE message has not
+ * been sent to primary yet.
*/
/*
- * Prepare modified query context This is a copy of original PREPARE
- * query context except the query sending destination is changed to
- * primary node.
+ * Prepare modified query context This is a copy of original PREPARE query
+ * context except the query sending destination is changed to primary
+ * node.
*/
new_qc = pool_query_context_shallow_copy(qc);
memset(new_qc->where_to_send, 0, sizeof(new_qc->where_to_send));
@@ -4242,8 +4255,8 @@ static POOL_STATUS send_prepare(POOL_CONNECTION * frontend,
{
/*
* Before sending the PREPARE message to the primary, we need to
- * DEALLOCATE the named statement. Otherwise we will get an error
- * from backend if an identical named statement already exists.
+ * DEALLOCATE the named statement. Otherwise we will get an error from
+ * backend if an identical named statement already exists.
*/
/* check to see if the named statement exists on primary node */
@@ -4256,6 +4269,7 @@ static POOL_STATUS send_prepare(POOL_CONNECTION * frontend,
if (res && res->data[0] && strcmp(res->data[0], "0"))
{
free_select_result(res);
+
/*
* The same named statement exists, We need to send DEALLOCATE
* message
@@ -4483,7 +4497,7 @@ flatten_set_variable_args(const char *name, List *args)
* Wait till ready for query received.
*/
static void
-pool_wait_till_ready_for_query(POOL_CONNECTION_POOL * backend)
+pool_wait_till_ready_for_query(POOL_CONNECTION_POOL *backend)
{
char kind;
int len;
@@ -4531,8 +4545,8 @@ pool_wait_till_ready_for_query(POOL_CONNECTION_POOL * backend)
* is read.
*/
static void
-pool_discard_except_sync_and_ready_for_query(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+pool_discard_except_sync_and_ready_for_query(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
POOL_PENDING_MESSAGE *pmsg;
int i;
@@ -4641,7 +4655,7 @@ pool_discard_except_sync_and_ready_for_query(POOL_CONNECTION * frontend,
* Preconditions: query is in progress. The command is succeeded.
*/
void
-pool_at_command_success(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+pool_at_command_success(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
Node *node;
char *query;
@@ -4769,7 +4783,7 @@ pool_at_command_success(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backe
* read message length (V3 only)
*/
int
-pool_read_message_length(POOL_CONNECTION_POOL * cp)
+pool_read_message_length(POOL_CONNECTION_POOL *cp)
{
int length,
length0;
@@ -4821,7 +4835,7 @@ pool_read_message_length(POOL_CONNECTION_POOL * cp)
* The array is in the static storage, thus it will be destroyed by subsequent calls.
*/
int *
-pool_read_message_length2(POOL_CONNECTION_POOL * cp)
+pool_read_message_length2(POOL_CONNECTION_POOL *cp)
{
int length,
length0;
@@ -4877,7 +4891,7 @@ pool_read_message_length2(POOL_CONNECTION_POOL * cp)
void
pool_emit_log_for_message_length_diff(int *length_array, char *name)
{
- int length0, /* message length of main node id */
+ int length0, /* message length of main node id */
length;
int i;
@@ -4908,7 +4922,7 @@ pool_emit_log_for_message_length_diff(int *length_array, char *name)
* Read kind from all valid backend
*/
signed char
-pool_read_kind(POOL_CONNECTION_POOL * cp)
+pool_read_kind(POOL_CONNECTION_POOL *cp)
{
char kind0,
kind;
@@ -4966,7 +4980,7 @@ pool_read_kind(POOL_CONNECTION_POOL * cp)
}
int
-pool_read_int(POOL_CONNECTION_POOL * cp)
+pool_read_int(POOL_CONNECTION_POOL *cp)
{
int data0,
data;
@@ -5006,7 +5020,7 @@ pool_read_int(POOL_CONNECTION_POOL * cp)
* In case of starting an internal transaction, this should be false.
*/
static void
-si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node * node, bool tstate_check)
+si_get_snapshot(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, Node *node, bool tstate_check)
{
POOL_SESSION_CONTEXT *session_context;
@@ -5015,13 +5029,12 @@ si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node
return;
/*
- * From now on it is possible that query is actually sent to backend.
- * So we need to acquire snapshot while there's no committing backend
- * in snapshot isolation mode except while processing reset queries.
- * For this purpose, we send a query to know whether the transaction
- * is READ ONLY or not. Sending actual user's query is not possible
- * because it might cause rw-conflict, which in turn causes a
- * deadlock.
+ * From now on it is possible that query is actually sent to backend. So
+ * we need to acquire snapshot while there's no committing backend in
+ * snapshot isolation mode except while processing reset queries. For this
+ * purpose, we send a query to know whether the transaction is READ ONLY
+ * or not. Sending actual user's query is not possible because it might
+ * cause rw-conflict, which in turn causes a deadlock.
*/
if (pool_config->backend_clustering_mode == CM_SNAPSHOT_ISOLATION &&
(!tstate_check || (tstate_check && TSTATE(backend, MAIN_NODE_ID) == 'T')) &&
@@ -5029,16 +5042,17 @@ si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node
!si_snapshot_prepared() &&
frontend && frontend->no_forward == 0)
{
- int i;
+ int i;
si_acquire_snapshot();
for (i = 0; i < NUM_BACKENDS; i++)
{
- static char *si_query = "SELECT current_setting('transaction_read_only')";
+ static char *si_query = "SELECT current_setting('transaction_read_only')";
POOL_SELECT_RESULT *res;
- /* We cannot use VALID_BACKEND macro here because load balance
+ /*
+ * We cannot use VALID_BACKEND macro here because load balance
* node has not been decided yet.
*/
if (!VALID_BACKEND_RAW(i))
@@ -5066,10 +5080,10 @@ si_get_snapshot(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, Node
* false to caller.
*/
static bool
-check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend)
+check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend)
{
- int len;
+ int len;
if (TSTATE(backend, MAIN_NODE_ID) != 'E')
return true;
@@ -5087,14 +5101,14 @@ check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION * fro
/* send an error message to frontend */
pool_send_error_message(
- frontend,
- MAJOR(backend),
- "25P02",
- "current transaction is aborted, commands ignored until end of transaction block",
- buf.data,
- "",
- __FILE__,
- __LINE__);
+ frontend,
+ MAJOR(backend),
+ "25P02",
+ "current transaction is aborted, commands ignored until end of transaction block",
+ buf.data,
+ "",
+ __FILE__,
+ __LINE__);
pfree(buf.data);
@@ -5117,14 +5131,15 @@ check_transaction_state_and_abort(char *query, Node *node, POOL_CONNECTION * fro
* As far as I know this is the most accurate and cheap way.
*/
static
-bool multi_statement_query(char *queries)
+bool
+multi_statement_query(char *queries)
{
PsqlScanState sstate;
promptStatus_t prompt;
PsqlScanResult sr;
PQExpBufferData lbuf;
- int num_semicolons = 0;
- bool done = false;
+ int num_semicolons = 0;
+ bool done = false;
/*
* callback functions for our flex lexer. need this to prevent crash when
@@ -5134,9 +5149,9 @@ bool multi_statement_query(char *queries)
NULL
};
- initPQExpBuffer(&lbuf); /* initialize line buffer */
+ initPQExpBuffer(&lbuf); /* initialize line buffer */
- sstate = psql_scan_create(&psqlscan_callbacks); /* create scan state */
+ sstate = psql_scan_create(&psqlscan_callbacks); /* create scan state */
/* add the query string to the scan state */
psql_scan_setup(sstate, queries, strlen(queries), 0, true);
@@ -5144,9 +5159,9 @@ bool multi_statement_query(char *queries)
for (;;)
{
resetPQExpBuffer(&lbuf);
- sr = psql_scan(sstate, &lbuf, &prompt); /* run scanner */
+ sr = psql_scan(sstate, &lbuf, &prompt); /* run scanner */
- switch(sr)
+ switch (sr)
{
case PSCAN_SEMICOLON: /* found command-ending semicolon */
num_semicolons++;
@@ -5154,7 +5169,8 @@ bool multi_statement_query(char *queries)
case PSCAN_BACKSLASH: /* found backslash command */
break;
case PSCAN_INCOMPLETE: /* end of line, SQL statement incomplete */
- case PSCAN_EOL: /* end of line, SQL possibly complete */
+ case PSCAN_EOL: /* end of line, SQL possibly complete */
+
/*
* If we have already seen ";" and this time something is
* transferred into buffer, we assume that the last query is
@@ -5193,17 +5209,17 @@ bool multi_statement_query(char *queries)
static void
check_prepare(List *parse_tree_list, int len, char *contents)
{
- Node *node;
- RawStmt *rstmt;
- POOL_QUERY_CONTEXT *query_context;
- ListCell *l;
- POOL_SENT_MESSAGE *message;
+ Node *node;
+ RawStmt *rstmt;
+ POOL_QUERY_CONTEXT *query_context;
+ ListCell *l;
+ POOL_SENT_MESSAGE *message;
/* sanity check */
if (list_length(parse_tree_list) <= 1)
return;
- foreach (l, parse_tree_list)
+ foreach(l, parse_tree_list)
{
if (l == list_head(parse_tree_list)) /* skip the first parse tree */
continue;
@@ -5214,14 +5230,16 @@ check_prepare(List *parse_tree_list, int len, char *contents)
if (!IsA(node, PrepareStmt)) /* PREPARE? */
continue;
- query_context = pool_init_query_context(); /* initialize query context */
- query_context->is_multi_statement = true; /* this is a multi statement query */
+ query_context = pool_init_query_context(); /* initialize query
+ * context */
+ query_context->is_multi_statement = true; /* this is a multi
+ * statement query */
pool_start_query(query_context, contents, len, node); /* start query context */
pool_where_to_send(query_context, query_context->original_query, /* set query destination */
query_context->parse_tree);
message = pool_create_sent_message('Q', len, contents, 0, /* create sent message */
((PrepareStmt *) node)->name, query_context);
- pool_add_sent_message(message); /* add it to the sent message list */
+ pool_add_sent_message(message); /* add it to the sent message list */
}
}
@@ -5231,12 +5249,13 @@ check_prepare(List *parse_tree_list, int len, char *contents)
* set.
*/
static
-POOL_QUERY_CONTEXT *create_dummy_query_context(void)
+POOL_QUERY_CONTEXT *
+create_dummy_query_context(void)
{
POOL_QUERY_CONTEXT *query_context;
- Node *node;
+ Node *node;
MemoryContext old_context;
- char *query = "UNKNOWN QUERY";
+ char *query = "UNKNOWN QUERY";
query_context = pool_init_query_context();
old_context = MemoryContextSwitchTo(query_context->memory_context);
diff --git a/src/query_cache/pool_memqcache.c b/src/query_cache/pool_memqcache.c
index 8b52b782e..40ad93e1a 100644
--- a/src/query_cache/pool_memqcache.c
+++ b/src/query_cache/pool_memqcache.c
@@ -63,13 +63,13 @@
memcached_st *memc;
#endif
-static char *encode_key(const char *s, char *buf, POOL_CONNECTION_POOL * backend);
+static char *encode_key(const char *s, char *buf, POOL_CONNECTION_POOL *backend);
#ifdef DEBUG
static void dump_cache_data(const char *data, size_t len);
#endif
-static int pool_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen, int num_oids, int *oids);
-static int send_cached_messages(POOL_CONNECTION * frontend, const char *qcache, int qcachelen);
-static void send_message(POOL_CONNECTION * conn, char kind, int len, const char *data);
+static int pool_commit_cache(POOL_CONNECTION_POOL *backend, char *query, char *data, size_t datalen, int num_oids, int *oids);
+static int send_cached_messages(POOL_CONNECTION *frontend, const char *qcache, int qcachelen);
+static void send_message(POOL_CONNECTION *conn, char kind, int len, const char *data);
#ifdef USE_MEMCACHED
static int delete_cache_on_memcached(const char *key);
#endif
@@ -78,23 +78,23 @@ static int pool_get_dropdb_table_oids(int **oids, int dboid);
static void pool_discard_dml_table_oid(void);
static void pool_invalidate_query_cache(int num_table_oids, int *table_oid, bool unlink, int dboid);
static int pool_get_database_oid(void);
-static void pool_add_table_oid_map(POOL_CACHEKEY * cachkey, int num_table_oids, int *table_oids);
+static void pool_add_table_oid_map(POOL_CACHEKEY *cachkey, int num_table_oids, int *table_oids);
static void pool_reset_memqcache_buffer(bool reset_dml_oids);
-static POOL_CACHEID * pool_add_item_shmem_cache(POOL_QUERY_HASH * query_hash, char *data, int size, time_t expire);
-static POOL_CACHEID * pool_find_item_on_shmem_cache(POOL_QUERY_HASH * query_hash);
-static char *pool_get_item_shmem_cache(POOL_QUERY_HASH * query_hash, int *size, int *sts);
-static POOL_QUERY_CACHE_ARRAY * pool_add_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array, POOL_TEMP_QUERY_CACHE * cache);
-static void pool_add_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, char kind, char *data, int data_len);
-static void pool_add_oids_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, int num_oids, int *oids);
-static POOL_INTERNAL_BUFFER * pool_create_buffer(void);
-static void pool_discard_buffer(POOL_INTERNAL_BUFFER * buffer);
-static void pool_add_buffer(POOL_INTERNAL_BUFFER * buffer, void *data, size_t len);
-static void *pool_get_buffer(POOL_INTERNAL_BUFFER * buffer, size_t *len);
+static POOL_CACHEID *pool_add_item_shmem_cache(POOL_QUERY_HASH *query_hash, char *data, int size, time_t expire);
+static POOL_CACHEID *pool_find_item_on_shmem_cache(POOL_QUERY_HASH *query_hash);
+static char *pool_get_item_shmem_cache(POOL_QUERY_HASH *query_hash, int *size, int *sts);
+static POOL_QUERY_CACHE_ARRAY *pool_add_query_cache_array(POOL_QUERY_CACHE_ARRAY *cache_array, POOL_TEMP_QUERY_CACHE *cache);
+static void pool_add_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache, char kind, char *data, int data_len);
+static void pool_add_oids_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache, int num_oids, int *oids);
+static POOL_INTERNAL_BUFFER *pool_create_buffer(void);
+static void pool_discard_buffer(POOL_INTERNAL_BUFFER *buffer);
+static void pool_add_buffer(POOL_INTERNAL_BUFFER *buffer, void *data, size_t len);
+static void *pool_get_buffer(POOL_INTERNAL_BUFFER *buffer, size_t *len);
#ifdef NOT_USED
-static char *pool_get_buffer_pointer(POOL_INTERNAL_BUFFER * buffer);
+static char *pool_get_buffer_pointer(POOL_INTERNAL_BUFFER *buffer);
#endif
static char *pool_get_current_cache_buffer(size_t *len);
-static size_t pool_get_buffer_length(POOL_INTERNAL_BUFFER * buffer);
+static size_t pool_get_buffer_length(POOL_INTERNAL_BUFFER *buffer);
static void pool_check_and_discard_cache_buffer(int num_oids, int *oids);
static void pool_set_memqcache_blocks(int num_blocks);
@@ -104,36 +104,36 @@ static void pool_reset_fsmm(size_t size);
static void *pool_fsmm_address(void);
static void pool_update_fsmm(POOL_CACHE_BLOCKID blockid, size_t free_space);
static POOL_CACHE_BLOCKID pool_get_block(size_t free_space);
-static POOL_CACHE_ITEM_HEADER * pool_cache_item_header(POOL_CACHEID * cacheid);
+static POOL_CACHE_ITEM_HEADER *pool_cache_item_header(POOL_CACHEID *cacheid);
static int pool_init_cache_block(POOL_CACHE_BLOCKID blockid);
#if NOT_USED
static void pool_wipe_out_cache_block(POOL_CACHE_BLOCKID blockid);
#endif
-static int pool_delete_item_shmem_cache(POOL_CACHEID * cacheid);
+static int pool_delete_item_shmem_cache(POOL_CACHEID *cacheid);
static char *block_address(int blockid);
-static POOL_CACHE_ITEM_POINTER * item_pointer(char *block, int i);
-static POOL_CACHE_ITEM_HEADER * item_header(char *block, int i);
+static POOL_CACHE_ITEM_POINTER *item_pointer(char *block, int i);
+static POOL_CACHE_ITEM_HEADER *item_header(char *block, int i);
static POOL_CACHE_BLOCKID pool_reuse_block(void);
#ifdef SHMEMCACHE_DEBUG
static void dump_shmem_cache(POOL_CACHE_BLOCKID blockid);
#endif
static int pool_hash_reset(int nelements);
-static int pool_hash_insert(POOL_QUERY_HASH * key, POOL_CACHEID * cacheid, bool update);
-static uint32 create_hash_key(POOL_QUERY_HASH * key);
+static int pool_hash_insert(POOL_QUERY_HASH *key, POOL_CACHEID *cacheid, bool update);
+static uint32 create_hash_key(POOL_QUERY_HASH *key);
static volatile POOL_HASH_ELEMENT *get_new_hash_element(void);
-static void put_back_hash_element(volatile POOL_HASH_ELEMENT * element);
+static void put_back_hash_element(volatile POOL_HASH_ELEMENT *element);
static bool is_free_hash_element(void);
-static void inject_cached_message(POOL_CONNECTION * backend, char *qcache, int qcachelen);
+static void inject_cached_message(POOL_CONNECTION *backend, char *qcache, int qcachelen);
#ifdef USE_MEMCACHED
-static int delete_all_cache_on_memcached(void);
+static int delete_all_cache_on_memcached(void);
#endif
static char *create_fake_cache(size_t *len);
/*
* if true, shared memory is locked in this process now.
*/
-static int is_shmem_locked;
+static int is_shmem_locked;
/*
* Connect to Memcached
@@ -172,7 +172,7 @@ memcached_connect(void)
{
ereport(WARNING,
(errmsg("failed to connect to memcached, server push error:\"%s\"\n", memcached_strerror(memc, rc))));
- memc = (memcached_st *) - 1;
+ memc = (memcached_st *) -1;
return -1;
}
memcached_server_list_free(servers);
@@ -207,7 +207,7 @@ memcached_disconnect(void)
*/
void
memqcache_register(char kind,
- POOL_CONNECTION * frontend,
+ POOL_CONNECTION *frontend,
char *data,
int data_len)
{
@@ -239,7 +239,7 @@ memqcache_register(char kind,
* Commit SELECT results to cache storage.
*/
static int
-pool_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen, int num_oids, int *oids)
+pool_commit_cache(POOL_CONNECTION_POOL *backend, char *query, char *data, size_t datalen, int num_oids, int *oids)
{
#ifdef USE_MEMCACHED
memcached_return rc;
@@ -301,7 +301,7 @@ pool_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_
}
else
{
- cacheid = pool_add_item_shmem_cache(&query_hash, data, datalen,memqcache_expire);
+ cacheid = pool_add_item_shmem_cache(&query_hash, data, datalen, memqcache_expire);
if (cacheid == NULL)
{
ereport(LOG,
@@ -349,7 +349,7 @@ pool_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_
* Commit SELECT system catalog results to cache storage.
*/
int
-pool_catalog_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *data, size_t datalen)
+pool_catalog_commit_cache(POOL_CONNECTION_POOL *backend, char *query, char *data, size_t datalen)
{
#ifdef USE_MEMCACHED
memcached_return rc;
@@ -457,7 +457,7 @@ pool_catalog_commit_cache(POOL_CONNECTION_POOL * backend, char *query, char *dat
* 1: not found
*/
int
-pool_fetch_cache(POOL_CONNECTION_POOL * backend, const char *query, char **buf, size_t *len)
+pool_fetch_cache(POOL_CONNECTION_POOL *backend, const char *query, char **buf, size_t *len)
{
char *ptr;
char tmpkey[MAX_KEY];
@@ -559,7 +559,7 @@ pool_fetch_cache(POOL_CONNECTION_POOL * backend, const char *query, char **buf,
* create cache key as md5(username + query string + database name)
*/
static char *
-encode_key(const char *s, char *buf, POOL_CONNECTION_POOL * backend)
+encode_key(const char *s, char *buf, POOL_CONNECTION_POOL *backend)
{
char *strkey;
int u_length;
@@ -632,7 +632,7 @@ dump_cache_data(const char *data, size_t len)
* send cached messages
*/
static int
-send_cached_messages(POOL_CONNECTION * frontend, const char *qcache, int qcachelen)
+send_cached_messages(POOL_CONNECTION *frontend, const char *qcache, int qcachelen)
{
int msg = 0;
int i = 0;
@@ -685,7 +685,7 @@ send_cached_messages(POOL_CONNECTION * frontend, const char *qcache, int qcachel
* send message to frontend
*/
static void
-send_message(POOL_CONNECTION * conn, char kind, int len, const char *data)
+send_message(POOL_CONNECTION *conn, char kind, int len, const char *data)
{
ereport(DEBUG2,
(errmsg("memcache: sending messages: kind '%c', len=%d, data=%p", kind, len, data)));
@@ -734,13 +734,13 @@ delete_cache_on_memcached(const char *key)
* If use_fake_cache is true, make up "CommandComplete 0" result and use it.
*/
POOL_STATUS
-pool_fetch_from_memory_cache(POOL_CONNECTION * frontend,
- POOL_CONNECTION_POOL * backend,
+pool_fetch_from_memory_cache(POOL_CONNECTION *frontend,
+ POOL_CONNECTION_POOL *backend,
char *contents, bool use_fake_cache, bool *foundp)
{
char *qcache;
size_t qcachelen;
- volatile int sts;
+ volatile int sts;
pool_sigset_t oldmask;
ereport(DEBUG1,
@@ -849,14 +849,17 @@ pool_fetch_from_memory_cache(POOL_CONNECTION * frontend,
static char *
create_fake_cache(size_t *len)
{
- char *qcache, *p;
- int32 mlen; /* message length including self */
- static char* msg = "SELECT 0";
-
- *len = sizeof(char) + /* message kind */
- sizeof(int32) + /* packet length including self */
- sizeof(msg); /* Command Complete message with 0 row returned */
- mlen = *len - 1; /* message length does not include message kind */
+ char *qcache,
+ *p;
+ int32 mlen; /* message length including self */
+ static char *msg = "SELECT 0";
+
+ *len = sizeof(char) + /* message kind */
+ sizeof(int32) + /* packet length including self */
+ sizeof(msg); /* Command Complete message with 0 row
+ * returned */
+ mlen = *len - 1; /* message length does not include message
+ * kind */
mlen = htonl(mlen);
p = qcache = palloc(*len);
*p++ = 'C';
@@ -1026,7 +1029,7 @@ pool_is_allow_to_cache(Node *node, char *query)
/*
* TABLESAMPLE is not allowed to cache.
*/
- if (IsA(node, SelectStmt) &&((SelectStmt *) node)->fromClause)
+ if (IsA(node, SelectStmt) && ((SelectStmt *) node)->fromClause)
{
List *tbl_list = ((SelectStmt *) node)->fromClause;
ListCell *tbl;
@@ -1091,17 +1094,18 @@ pool_is_allow_to_cache(Node *node, char *query)
/*
* If Data-modifying statements in WITH clause, it's not allowed to cache.
*/
- if(IsA(node, SelectStmt) && ((SelectStmt *) node)->withClause)
+ if (IsA(node, SelectStmt) && ((SelectStmt *) node)->withClause)
{
- ListCell *lc;
- WithClause *withClause = ((SelectStmt *) node)->withClause;
+ ListCell *lc;
+ WithClause *withClause = ((SelectStmt *) node)->withClause;
foreach(lc, withClause->ctes)
{
- CommonTableExpr *cte = (CommonTableExpr *)lfirst(lc);
- if(IsA(cte->ctequery, InsertStmt) ||
- IsA(cte->ctequery, DeleteStmt) ||
- IsA(cte->ctequery, UpdateStmt))
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+
+ if (IsA(cte->ctequery, InsertStmt) ||
+ IsA(cte->ctequery, DeleteStmt) ||
+ IsA(cte->ctequery, UpdateStmt))
{
return false;
}
@@ -1192,9 +1196,10 @@ pool_extract_table_oids(Node *node, int **oidsp)
num_oids = pool_extract_withclause_oids((Node *) stmt->withClause, *oidsp);
table = make_table_name_from_rangevar(stmt->relation);
}
- else if(IsA(node, SelectStmt))
+ else if (IsA(node, SelectStmt))
{
SelectStmt *stmt = (SelectStmt *) node;
+
num_oids = pool_extract_withclause_oids((Node *) stmt->withClause, *oidsp);
table = NULL;
}
@@ -1300,8 +1305,8 @@ pool_extract_table_oids(Node *node, int **oidsp)
}
else if (IsA(node, ExplainStmt))
{
- ListCell *cell;
- DefElem *def;
+ ListCell *cell;
+ DefElem *def;
ExplainStmt *stmt = (ExplainStmt *) node;
foreach(cell, stmt->options)
@@ -1354,12 +1359,12 @@ pool_extract_withclause_oids(Node *node, int *oidsp)
ListCell *lc;
WithClause *with;
- if(oidsp == NULL)
+ if (oidsp == NULL)
{
return 0;
}
- if(!node || !IsA(node, WithClause))
+ if (!node || !IsA(node, WithClause))
{
return 0;
}
@@ -1367,20 +1372,24 @@ pool_extract_withclause_oids(Node *node, int *oidsp)
with = (WithClause *) node;
foreach(lc, with->ctes)
{
- CommonTableExpr *cte = (CommonTableExpr *)lfirst(lc);
- if(IsA(cte->ctequery, InsertStmt))
+ CommonTableExpr *cte = (CommonTableExpr *) lfirst(lc);
+
+ if (IsA(cte->ctequery, InsertStmt))
{
InsertStmt *stmt = (InsertStmt *) cte->ctequery;
+
table = make_table_name_from_rangevar(stmt->relation);
}
- else if(IsA(cte->ctequery, DeleteStmt))
+ else if (IsA(cte->ctequery, DeleteStmt))
{
DeleteStmt *stmt = (DeleteStmt *) cte->ctequery;
+
table = make_table_name_from_rangevar(stmt->relation);
}
- else if(IsA(cte->ctequery, UpdateStmt))
+ else if (IsA(cte->ctequery, UpdateStmt))
{
UpdateStmt *stmt = (UpdateStmt *) cte->ctequery;
+
table = make_table_name_from_rangevar(stmt->relation);
}
else
@@ -1475,7 +1484,7 @@ pool_get_dropdb_table_oids(int **oids, int dboid)
int num_oids = 0;
DIR *dir;
struct dirent *dp;
- char *path;
+ char *path;
path = psprintf("%s/%d", pool_config->memqcache_oiddir, dboid);
if ((dir = opendir(path)) == NULL)
@@ -1554,7 +1563,7 @@ pool_get_database_oid(void)
* Query to convert table name to oid
*/
int oid = 0;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
backend = pool_get_session_context(false)->backend;
@@ -1592,7 +1601,7 @@ pool_get_database_oid_from_dbname(char *dbname)
{
int dboid = 0;
POOL_SELECT_RESULT *res;
- char *query;
+ char *query;
POOL_CONNECTION_POOL *backend;
@@ -1626,11 +1635,11 @@ pool_get_database_oid_from_dbname(char *dbname)
* (pool_handle_query_cache -> pool_commit_cache -> pool_add_table_oid_map)
*/
static void
-pool_add_table_oid_map(POOL_CACHEKEY * cachekey, int num_table_oids, int *table_oids)
+pool_add_table_oid_map(POOL_CACHEKEY *cachekey, int num_table_oids, int *table_oids)
{
char *dir;
int dboid;
- char *path;
+ char *path;
int i;
int len;
@@ -1803,7 +1812,7 @@ pool_add_table_oid_map(POOL_CACHEKEY * cachekey, int num_table_oids, int *table_
void
pool_discard_oid_maps(void)
{
- char *command;
+ char *command;
command = psprintf("/bin/rm -fr %s/[0-9]*",
pool_config->memqcache_oiddir);
@@ -1821,7 +1830,7 @@ pool_discard_oid_maps(void)
void
pool_discard_oid_maps_by_db(int dboid)
{
- char *command;
+ char *command;
if (pool_is_shmem_cache())
{
@@ -1851,7 +1860,7 @@ static void
pool_invalidate_query_cache(int num_table_oids, int *table_oid, bool unlinkp, int dboid)
{
char *dir;
- char *path;
+ char *path;
int i;
int len;
POOL_CACHEKEY buf;
@@ -2214,7 +2223,8 @@ delete_all_cache_on_memcached(void)
/*
* Clear query cache on shmem or memcached
*/
-void clear_query_cache(void)
+void
+clear_query_cache(void)
{
/*
* Clear all the shared memory cache and oid maps.
@@ -2259,8 +2269,8 @@ pool_memory_cache_address(void)
void
pool_init_whole_cache_blocks(void)
{
- int blocks = pool_get_memqcache_blocks();
- int i;
+ int blocks = pool_get_memqcache_blocks();
+ int i;
for (i = 0; i < blocks; i++)
{
@@ -2370,7 +2380,8 @@ pool_reset_fsmm(size_t size)
* See https://en.wikipedia.org/wiki/Page_replacement_algorithm#Clock for more details.
* It would be nice if we could use true clock algorithm someday.
*/
-static POOL_CACHE_BLOCKID pool_reuse_block(void)
+static POOL_CACHE_BLOCKID
+pool_reuse_block(void)
{
int maxblock = pool_get_memqcache_blocks();
char *block = block_address(*pool_fsmm_clock_hand);
@@ -2413,7 +2424,8 @@ static POOL_CACHE_BLOCKID pool_reuse_block(void)
/*
* Get block id which has enough space
*/
-static POOL_CACHE_BLOCKID pool_get_block(size_t free_space)
+static POOL_CACHE_BLOCKID
+pool_get_block(size_t free_space)
{
int encode_value;
unsigned char *p = pool_fsmm_address();
@@ -2505,7 +2517,8 @@ pool_update_fsmm(POOL_CACHE_BLOCKID blockid, size_t free_space)
* The cache id is overwritten by the subsequent call to this function.
* On error returns NULL.
*/
-static POOL_CACHEID * pool_add_item_shmem_cache(POOL_QUERY_HASH * query_hash, char *data, int size, time_t expire)
+static POOL_CACHEID *
+pool_add_item_shmem_cache(POOL_QUERY_HASH *query_hash, char *data, int size, time_t expire)
{
static POOL_CACHEID cacheid;
POOL_CACHE_BLOCKID blockid;
@@ -2788,7 +2801,7 @@ static POOL_CACHEID * pool_add_item_shmem_cache(POOL_QUERY_HASH * query_hash, ch
* Detail is set to *sts. (0: success, 1: not found, -1: error)
*/
static char *
-pool_get_item_shmem_cache(POOL_QUERY_HASH * query_hash, int *size, int *sts)
+pool_get_item_shmem_cache(POOL_QUERY_HASH *query_hash, int *size, int *sts)
{
POOL_CACHEID *cacheid;
POOL_CACHE_ITEM_HEADER *cih;
@@ -2838,7 +2851,8 @@ pool_get_item_shmem_cache(POOL_QUERY_HASH * query_hash, int *size, int *sts)
* On success returns cache id.
* The cache id is overwritten by the subsequent call to this function.
*/
-static POOL_CACHEID * pool_find_item_on_shmem_cache(POOL_QUERY_HASH * query_hash)
+static POOL_CACHEID *
+pool_find_item_on_shmem_cache(POOL_QUERY_HASH *query_hash)
{
static POOL_CACHEID cacheid;
POOL_CACHEID *c;
@@ -2867,6 +2881,7 @@ static POOL_CACHEID * pool_find_item_on_shmem_cache(POOL_QUERY_HASH * query_hash
*/
pool_shmem_unlock();
pool_shmem_lock(POOL_MEMQ_EXCLUSIVE_LOCK);
+
/*
* There is a window between pool_shmem_unlock() and
* pool_shmem_lock(). We need to get POOL_CACHEID and
@@ -2905,7 +2920,7 @@ static POOL_CACHEID * pool_find_item_on_shmem_cache(POOL_QUERY_HASH * query_hash
* FSMM is also updated.
*/
static int
-pool_delete_item_shmem_cache(POOL_CACHEID * cacheid)
+pool_delete_item_shmem_cache(POOL_CACHEID *cacheid)
{
POOL_CACHE_BLOCK_HEADER *bh;
POOL_CACHE_ITEM_POINTER *cip;
@@ -3018,7 +3033,8 @@ pool_delete_item_shmem_cache(POOL_CACHEID * cacheid)
/*
* Returns item header specified by cache id.
*/
-static POOL_CACHE_ITEM_HEADER * pool_cache_item_header(POOL_CACHEID * cacheid)
+static POOL_CACHE_ITEM_HEADER *
+pool_cache_item_header(POOL_CACHEID *cacheid)
{
POOL_CACHE_BLOCK_HEADER *bh;
@@ -3107,7 +3123,7 @@ pool_wipe_out_cache_block(POOL_CACHE_BLOCKID blockid)
#undef LOCK_TRACE
-static int memq_lock_fd = 0;
+static int memq_lock_fd = 0;
/*
* Acquire lock: XXX giant lock
@@ -3117,7 +3133,7 @@ pool_shmem_lock(POOL_MEMQ_LOCK_TYPE type)
{
if (memq_lock_fd == 0)
{
- char *path;
+ char *path;
path = psprintf("%s/%s", pool_config->logdir, QUERY_CACHE_LOCK_FILE);
memq_lock_fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, S_IRUSR | S_IWUSR);
@@ -3131,11 +3147,11 @@ pool_shmem_lock(POOL_MEMQ_LOCK_TYPE type)
}
#ifdef LOCK_TRACE
- elog(LOG, "LOCK TRACE: try to acquire lock %s", type == POOL_MEMQ_EXCLUSIVE_LOCK? "LOCK_EX" : "LOCK_SH");
+ elog(LOG, "LOCK TRACE: try to acquire lock %s", type == POOL_MEMQ_EXCLUSIVE_LOCK ? "LOCK_EX" : "LOCK_SH");
#endif
if (pool_is_shmem_cache() && !is_shmem_locked)
{
- if (flock(memq_lock_fd, type == POOL_MEMQ_EXCLUSIVE_LOCK? LOCK_EX : LOCK_SH))
+ if (flock(memq_lock_fd, type == POOL_MEMQ_EXCLUSIVE_LOCK ? LOCK_EX : LOCK_SH))
{
ereport(FATAL,
(errmsg("Failed to lock file for query cache"),
@@ -3144,7 +3160,7 @@ pool_shmem_lock(POOL_MEMQ_LOCK_TYPE type)
}
#ifdef LOCK_TRACE
- elog(LOG, "LOCK TRACE: acquire lock %s", type == POOL_MEMQ_EXCLUSIVE_LOCK? "LOCK_EX" : "LOCK_SH");
+ elog(LOG, "LOCK TRACE: acquire lock %s", type == POOL_MEMQ_EXCLUSIVE_LOCK ? "LOCK_EX" : "LOCK_SH");
#endif
is_shmem_locked = true;
}
@@ -3189,14 +3205,15 @@ block_address(int blockid)
char *p;
p = pool_memory_cache_address() +
- (uint64)blockid * pool_config->memqcache_cache_block_size;
+ (uint64) blockid * pool_config->memqcache_cache_block_size;
return p;
}
/*
* Returns i th item pointer in block address block
*/
-static POOL_CACHE_ITEM_POINTER * item_pointer(char *block, int i)
+static POOL_CACHE_ITEM_POINTER *
+item_pointer(char *block, int i)
{
return (POOL_CACHE_ITEM_POINTER *) (block + sizeof(POOL_CACHE_BLOCK_HEADER) +
sizeof(POOL_CACHE_ITEM_POINTER) * i);
@@ -3205,7 +3222,8 @@ static POOL_CACHE_ITEM_POINTER * item_pointer(char *block, int i)
/*
* Returns i th item header in block address block
*/
-static POOL_CACHE_ITEM_HEADER * item_header(char *block, int i)
+static POOL_CACHE_ITEM_HEADER *
+item_header(char *block, int i)
{
POOL_CACHE_ITEM_POINTER *cip;
@@ -3267,7 +3285,7 @@ pool_create_query_cache_array(void)
* Discard query cache array
*/
void
-pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array)
+pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY *cache_array)
{
int i;
@@ -3291,7 +3309,8 @@ pool_discard_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array)
/*
* Add query cache array
*/
-static POOL_QUERY_CACHE_ARRAY * pool_add_query_cache_array(POOL_QUERY_CACHE_ARRAY * cache_array, POOL_TEMP_QUERY_CACHE * cache)
+static POOL_QUERY_CACHE_ARRAY *
+pool_add_query_cache_array(POOL_QUERY_CACHE_ARRAY *cache_array, POOL_TEMP_QUERY_CACHE *cache)
{
size_t size;
POOL_QUERY_CACHE_ARRAY *cp = cache_array;
@@ -3348,7 +3367,7 @@ pool_create_temp_query_cache(char *query)
* Discard temp query cache
*/
void
-pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache)
+pool_discard_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache)
{
if (!temp_cache)
return;
@@ -3374,7 +3393,7 @@ pool_discard_current_temp_query_cache(void)
{
POOL_SESSION_CONTEXT *session_context;
POOL_QUERY_CONTEXT *query_context;
- POOL_TEMP_QUERY_CACHE * temp_cache;
+ POOL_TEMP_QUERY_CACHE *temp_cache;
session_context = pool_get_session_context(true);
query_context = session_context->query_context;
@@ -3394,7 +3413,7 @@ pool_discard_current_temp_query_cache(void)
* Data must be FE/BE protocol packet.
*/
static void
-pool_add_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, char kind, char *data, int data_len)
+pool_add_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache, char kind, char *data, int data_len)
{
POOL_INTERNAL_BUFFER *buffer;
size_t buflen;
@@ -3455,7 +3474,7 @@ pool_add_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, char kind, char *d
* Add table oids used by SELECT to temp query cache.
*/
static void
-pool_add_oids_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, int num_oids, int *oids)
+pool_add_oids_temp_query_cache(POOL_TEMP_QUERY_CACHE *temp_cache, int num_oids, int *oids)
{
POOL_INTERNAL_BUFFER *buffer;
@@ -3482,7 +3501,8 @@ pool_add_oids_temp_query_cache(POOL_TEMP_QUERY_CACHE * temp_cache, int num_oids,
/*
* Create and return internal buffer
*/
-static POOL_INTERNAL_BUFFER * pool_create_buffer(void)
+static POOL_INTERNAL_BUFFER *
+pool_create_buffer(void)
{
POOL_INTERNAL_BUFFER *p;
@@ -3494,7 +3514,7 @@ static POOL_INTERNAL_BUFFER * pool_create_buffer(void)
* Discard internal buffer
*/
static void
-pool_discard_buffer(POOL_INTERNAL_BUFFER * buffer)
+pool_discard_buffer(POOL_INTERNAL_BUFFER *buffer)
{
if (buffer)
{
@@ -3508,7 +3528,7 @@ pool_discard_buffer(POOL_INTERNAL_BUFFER * buffer)
* Add data to internal buffer
*/
static void
-pool_add_buffer(POOL_INTERNAL_BUFFER * buffer, void *data, size_t len)
+pool_add_buffer(POOL_INTERNAL_BUFFER *buffer, void *data, size_t len)
{
#define POOL_ALLOCATE_UNIT 8192
@@ -3547,7 +3567,7 @@ pool_add_buffer(POOL_INTERNAL_BUFFER * buffer, void *data, size_t len)
* Data length is returned to len.
*/
static void *
-pool_get_buffer(POOL_INTERNAL_BUFFER * buffer, size_t *len)
+pool_get_buffer(POOL_INTERNAL_BUFFER *buffer, size_t *len)
{
void *p;
@@ -3568,7 +3588,7 @@ pool_get_buffer(POOL_INTERNAL_BUFFER * buffer, size_t *len)
* Get internal buffer length.
*/
static size_t
-pool_get_buffer_length(POOL_INTERNAL_BUFFER * buffer)
+pool_get_buffer_length(POOL_INTERNAL_BUFFER *buffer)
{
if (buffer == NULL)
return 0;
@@ -3581,7 +3601,7 @@ pool_get_buffer_length(POOL_INTERNAL_BUFFER * buffer)
* Get internal buffer pointer.
*/
static char *
-pool_get_buffer_pointer(POOL_INTERNAL_BUFFER * buffer)
+pool_get_buffer_pointer(POOL_INTERNAL_BUFFER *buffer)
{
if (buffer == NULL)
return NULL;
@@ -3688,7 +3708,7 @@ pool_check_and_discard_cache_buffer(int num_oids, int *oids)
* For other case At Ready for Query handle query cache.
*/
void
-pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node, char state,
+pool_handle_query_cache(POOL_CONNECTION_POOL *backend, char *query, Node *node, char state,
bool partial_fetch)
{
POOL_SESSION_CONTEXT *session_context;
@@ -3819,7 +3839,8 @@ pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node,
/* Discard buffered data */
pool_reset_memqcache_buffer(true);
}
- else if (partial_fetch) /* cannot create cache because of partial fetch */
+ else if (partial_fetch) /* cannot create cache because of partial
+ * fetch */
{
/* Discard buffered data */
pool_reset_memqcache_buffer(true);
@@ -3907,9 +3928,9 @@ pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node,
else
{
/*
- * If we are inside a transaction, we cannot invalidate
- * query cache yet. However we can clear cache buffer, if
- * DML/DDL modifies the TABLE which SELECT uses.
+ * If we are inside a transaction, we cannot invalidate query
+ * cache yet. However we can clear cache buffer, if DML/DDL
+ * modifies the TABLE which SELECT uses.
*/
if (num_oids > 0 && pool_config->memqcache_auto_cache_invalidation)
{
@@ -3995,7 +4016,7 @@ pool_handle_query_cache(POOL_CONNECTION_POOL * backend, char *query, Node *node,
/*
* Create and initialize query cache stats
*/
-static POOL_QUERY_CACHE_STATS * stats;
+static POOL_QUERY_CACHE_STATS *stats;
int
pool_init_memqcache_stats(void)
{
@@ -4178,7 +4199,7 @@ pool_hash_init(int nelements)
for (i = 0; i < nelements2 - 1; i++)
{
- hash_elements[i].next = (POOL_HASH_ELEMENT *) & hash_elements[i + 1];
+ hash_elements[i].next = (POOL_HASH_ELEMENT *) &hash_elements[i + 1];
}
hash_elements[nelements2 - 1].next = NULL;
hash_free = hash_elements;
@@ -4212,6 +4233,7 @@ pool_hash_size(int nelements)
return size;
}
+
/*
* Reset hash table on shared memory "nelements" is max number of
* hash keys. The actual number of hash key is rounded up to power of
@@ -4254,7 +4276,7 @@ pool_hash_reset(int nelements)
for (i = 0; i < nelements2 - 1; i++)
{
- hash_elements[i].next = (POOL_HASH_ELEMENT *) & hash_elements[i + 1];
+ hash_elements[i].next = (POOL_HASH_ELEMENT *) &hash_elements[i + 1];
}
hash_elements[nelements2 - 1].next = NULL;
hash_free = hash_elements;
@@ -4267,9 +4289,9 @@ pool_hash_reset(int nelements)
* If found, returns cache id, otherwise NULL.
*/
POOL_CACHEID *
-pool_hash_search(POOL_QUERY_HASH * key)
+pool_hash_search(POOL_QUERY_HASH *key)
{
- volatile POOL_HASH_ELEMENT *element;
+ volatile POOL_HASH_ELEMENT *element;
uint32 hash_key = create_hash_key(key);
@@ -4312,7 +4334,7 @@ pool_hash_search(POOL_QUERY_HASH * key)
if (memcmp((const void *) element->hashkey.query_hash,
(const void *) key->query_hash, sizeof(key->query_hash)) == 0)
{
- return (POOL_CACHEID *) & element->cacheid;
+ return (POOL_CACHEID *) &element->cacheid;
}
element = element->next;
}
@@ -4325,7 +4347,7 @@ pool_hash_search(POOL_QUERY_HASH * key)
* rather than throw an error.
*/
static int
-pool_hash_insert(POOL_QUERY_HASH * key, POOL_CACHEID * cacheid, bool update)
+pool_hash_insert(POOL_QUERY_HASH *key, POOL_CACHEID *cacheid, bool update)
{
POOL_HASH_ELEMENT *element;
POOL_HASH_ELEMENT *new_element;
@@ -4410,7 +4432,7 @@ pool_hash_insert(POOL_QUERY_HASH * key, POOL_CACHEID * cacheid, bool update)
* Delete MD5 key and associated cache id from shmem hash table.
*/
int
-pool_hash_delete(POOL_QUERY_HASH * key)
+pool_hash_delete(POOL_QUERY_HASH *key)
{
POOL_HASH_ELEMENT *element;
POOL_HASH_ELEMENT **delete_point;
@@ -4431,7 +4453,7 @@ pool_hash_delete(POOL_QUERY_HASH * key)
* Look for delete location
*/
found = false;
- delete_point = (POOL_HASH_ELEMENT * *) & (hash_header->elements[hash_key].element);
+ delete_point = (POOL_HASH_ELEMENT **) &(hash_header->elements[hash_key].element);
element = hash_header->elements[hash_key].element;
while (element)
@@ -4470,7 +4492,7 @@ pool_hash_delete(POOL_QUERY_HASH * key)
* string. We use top most 8 characters of MD5 string for calculation.
*/
static uint32
-create_hash_key(POOL_QUERY_HASH * key)
+create_hash_key(POOL_QUERY_HASH *key)
{
#define POOL_HASH_NCHARS 8
@@ -4490,7 +4512,7 @@ create_hash_key(POOL_QUERY_HASH * key)
static volatile POOL_HASH_ELEMENT *
get_new_hash_element(void)
{
- volatile POOL_HASH_ELEMENT *elm;
+ volatile POOL_HASH_ELEMENT *elm;
if (!hash_free->next)
{
@@ -4515,7 +4537,7 @@ get_new_hash_element(void)
* Put back hash element to free list.
*/
static void
-put_back_hash_element(volatile POOL_HASH_ELEMENT * element)
+put_back_hash_element(volatile POOL_HASH_ELEMENT *element)
{
POOL_HASH_ELEMENT *elm;
@@ -4634,7 +4656,7 @@ pool_get_shmem_storage_stats(void)
* actually replies with Data row and Command Complete message.
*/
static void
-inject_cached_message(POOL_CONNECTION * backend, char *qcache, int qcachelen)
+inject_cached_message(POOL_CONNECTION *backend, char *qcache, int qcachelen)
{
char kind;
int len;
@@ -4698,18 +4720,19 @@ inject_cached_message(POOL_CONNECTION * backend, char *qcache, int qcachelen)
* Count up number of received messages to compare with the number of
* pending messages
*/
- switch(kind)
+ switch (kind)
{
- case '1': /* parse complete */
- case '2': /* bind complete */
- case '3': /* close complete */
- case 'C': /* command complete */
- case 's': /* portal suspended */
- case 'T': /* row description */
- msg_cnt++; /* count up number of messages */
+ case '1': /* parse complete */
+ case '2': /* bind complete */
+ case '3': /* close complete */
+ case 'C': /* command complete */
+ case 's': /* portal suspended */
+ case 'T': /* row description */
+ msg_cnt++; /* count up number of messages */
elog(DEBUG1, "count up message %c msg_cnt: %d", kind, msg_cnt);
break;
- case 'E': /* ErrorResponse */
+ case 'E': /* ErrorResponse */
+
/*
* If we receive ErrorResponse, it is likely that the last
* Execute caused an error and we can stop reading messsages
@@ -4822,7 +4845,8 @@ InvalidateQueryCache(int tableoid, int dboid)
* the entry since it's relatively expensive. It needs to rewrite the whole
* file in the worst case.
*/
-bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL * backend)
+bool
+query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL *backend)
{
bool rtn = true;
pool_sigset_t oldmask;
@@ -4839,7 +4863,7 @@ bool query_cache_delete_by_stmt(char *query, POOL_CONNECTION_POOL * backend)
if (pool_is_shmem_cache())
{
- POOL_QUERY_HASH hashkey;
+ POOL_QUERY_HASH hashkey;
memcpy(hashkey.query_hash, key, POOL_MD5_HASHKEYLEN);
cacheid = pool_hash_search(&hashkey);
diff --git a/src/rewrite/pool_lobj.c b/src/rewrite/pool_lobj.c
index 7601a9316..2e4fe6dea 100644
--- a/src/rewrite/pool_lobj.c
+++ b/src/rewrite/pool_lobj.c
@@ -53,7 +53,7 @@
*/
char *
pool_rewrite_lo_creat(char kind, char *packet, int packet_len,
- POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, int *len)
+ POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, int *len)
{
#define LO_CREAT_OID_QUERY "SELECT oid FROM pg_catalog.pg_proc WHERE proname = 'lo_creat' and pronamespace = (SELECT oid FROM pg_catalog.pg_namespace WHERE nspname = 'pg_catalog')"
@@ -67,8 +67,8 @@ pool_rewrite_lo_creat(char kind, char *packet, int packet_len,
static char rewritten_packet[LO_CREATE_PACKET_LENGTH];
- static POOL_RELCACHE * relcache_lo_creat;
- static POOL_RELCACHE * relcache_lo_create;
+ static POOL_RELCACHE *relcache_lo_creat;
+ static POOL_RELCACHE *relcache_lo_create;
int lo_creat_oid;
int lo_create_oid;
diff --git a/src/rewrite/pool_timestamp.c b/src/rewrite/pool_timestamp.c
index 4dca05e9f..01402bfa6 100644
--- a/src/rewrite/pool_timestamp.c
+++ b/src/rewrite/pool_timestamp.c
@@ -43,13 +43,13 @@ typedef struct
char *adsrc; /* default value expression */
int use_timestamp; /* not zero if timestamp is used in default
* value */
-} TSAttr;
+} TSAttr;
typedef struct
{
int relnatts; /* num of attributes */
TSAttr attr[1];
-} TSRel;
+} TSRel;
typedef struct
{
@@ -61,26 +61,26 @@ typedef struct
* instead of const */
bool rewrite; /* has rewritten? */
List *params; /* list of additional params */
-} TSRewriteContext;
+} TSRewriteContext;
-static void *ts_register_func(POOL_SELECT_RESULT * res);
+static void *ts_register_func(POOL_SELECT_RESULT *res);
static void *ts_unregister_func(void *data);
-static TSRel * relcache_lookup(TSRewriteContext * ctx);
+static TSRel *relcache_lookup(TSRewriteContext *ctx);
static bool isStringConst(Node *node, const char *str);
static bool rewrite_timestamp_walker(Node *node, void *context);
-static bool rewrite_timestamp_insert(InsertStmt *i_stmt, TSRewriteContext * ctx);
-static bool rewrite_timestamp_update(UpdateStmt *u_stmt, TSRewriteContext * ctx);
-static char *get_current_timestamp(POOL_CONNECTION_POOL * backend);
-static Node *makeTsExpr(TSRewriteContext * ctx);
+static bool rewrite_timestamp_insert(InsertStmt *i_stmt, TSRewriteContext *ctx);
+static bool rewrite_timestamp_update(UpdateStmt *u_stmt, TSRewriteContext *ctx);
+static char *get_current_timestamp(POOL_CONNECTION_POOL *backend);
+static Node *makeTsExpr(TSRewriteContext *ctx);
static TypeCast *makeTypeCastFromSvfOp(SQLValueFunctionOp op);
-static A_Const *makeStringConstFromQuery(POOL_CONNECTION_POOL * backend, char *expression);
+static A_Const *makeStringConstFromQuery(POOL_CONNECTION_POOL *backend, char *expression);
bool raw_expression_tree_walker(Node *node, bool (*walker) (), void *context);
POOL_RELCACHE *ts_relcache;
static void *
-ts_register_func(POOL_SELECT_RESULT * res)
+ts_register_func(POOL_SELECT_RESULT *res)
{
/* Number of result columns included in res */
#define NUM_COLS 3
@@ -136,7 +136,7 @@ ts_unregister_func(void *data)
static TSRel *
-relcache_lookup(TSRewriteContext * ctx)
+relcache_lookup(TSRewriteContext *ctx)
{
#define ATTRDEFQUERY (Pgversion(ctx->backend)->major >= 73 ? \
"SELECT attname, pg_catalog.pg_get_expr(d.adbin, d.adrelid), coalesce((pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%%now()%%' OR pg_catalog.pg_get_expr(d.adbin, d.adrelid) LIKE '%%''now''::text%%' OR" \
@@ -261,7 +261,7 @@ relcache_lookup(TSRewriteContext * ctx)
* and add it into params list in context.
*/
static Node *
-makeTsExpr(TSRewriteContext * ctx)
+makeTsExpr(TSRewriteContext *ctx)
{
ParamRef *param;
@@ -483,7 +483,7 @@ rewrite_timestamp_walker(Node *node, void *context)
* Get `now()' from MAIN node
*/
static char *
-get_current_timestamp(POOL_CONNECTION_POOL * backend)
+get_current_timestamp(POOL_CONNECTION_POOL *backend)
{
POOL_SELECT_RESULT *res;
static char timestamp[64];
@@ -507,7 +507,7 @@ get_current_timestamp(POOL_CONNECTION_POOL * backend)
* rewrite InsertStmt
*/
static bool
-rewrite_timestamp_insert(InsertStmt *i_stmt, TSRewriteContext * ctx)
+rewrite_timestamp_insert(InsertStmt *i_stmt, TSRewriteContext *ctx)
{
int i;
bool rewrite = false;
@@ -718,7 +718,7 @@ rewrite_timestamp_insert(InsertStmt *i_stmt, TSRewriteContext * ctx)
* rewrite UpdateStmt
*/
static bool
-rewrite_timestamp_update(UpdateStmt *u_stmt, TSRewriteContext * ctx)
+rewrite_timestamp_update(UpdateStmt *u_stmt, TSRewriteContext *ctx)
{
TSRel *relcache = NULL;
ListCell *lc;
@@ -786,8 +786,8 @@ rewrite_timestamp_update(UpdateStmt *u_stmt, TSRewriteContext * ctx)
* returns query string as palloced string, or NULL if not to need rewrite.
*/
char *
-rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
- bool rewrite_to_params, POOL_SENT_MESSAGE * message)
+rewrite_timestamp(POOL_CONNECTION_POOL *backend, Node *node,
+ bool rewrite_to_params, POOL_SENT_MESSAGE *message)
{
TSRewriteContext ctx;
Node *stmt;
@@ -818,18 +818,20 @@ rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
stmt = ((PrepareStmt *) node)->query;
ctx.rewrite_to_params = true;
}
+
/*
* CopyStmt
*/
- else if (IsA(node, CopyStmt) &&((CopyStmt *) node)->query != NULL)
+ else if (IsA(node, CopyStmt) && ((CopyStmt *) node)->query != NULL)
stmt = ((CopyStmt *) node)->query;
+
/*
* ExplainStmt
*/
else if (IsA(node, ExplainStmt))
{
ListCell *lc;
- bool analyze = false;
+ bool analyze = false;
/* Check to see if this is EXPLAIN ANALYZE */
foreach(lc, ((ExplainStmt *) node)->options)
@@ -889,7 +891,7 @@ rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
}
else if (IsA(stmt, CopyStmt))
{
- CopyStmt *c_stmt = (CopyStmt *) stmt;
+ CopyStmt *c_stmt = (CopyStmt *) stmt;
raw_expression_tree_walker(
(Node *) c_stmt->attlist,
@@ -908,7 +910,7 @@ rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
}
else if (IsA(stmt, MergeStmt))
{
- MergeStmt *m_stmt = (MergeStmt *) stmt;
+ MergeStmt *m_stmt = (MergeStmt *) stmt;
ListCell *temp;
/* USING data_source */
@@ -924,8 +926,8 @@ rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
foreach(temp, m_stmt->mergeWhenClauses)
{
raw_expression_tree_walker(
- lfirst(temp),
- rewrite_timestamp_walker, (void *) &ctx);
+ lfirst(temp),
+ rewrite_timestamp_walker, (void *) &ctx);
}
raw_expression_tree_walker(
@@ -1068,8 +1070,8 @@ rewrite_timestamp(POOL_CONNECTION_POOL * backend, Node *node,
* rewrite Bind message to add parameter values
*/
char *
-bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
- POOL_SENT_MESSAGE * message,
+bind_rewrite_timestamp(POOL_CONNECTION_POOL *backend,
+ POOL_SENT_MESSAGE *message,
const char *orig_msg, int *len)
{
int16 tmp2,
@@ -1134,15 +1136,19 @@ bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
if (num_formats == 0)
{
/*
- * If num_formats is 0, the original message has no parameters or the parameter formats are all text,
- * so we don't need additional format codes since timestamp parameters use text as its format.
+ * If num_formats is 0, the original message has no parameters or the
+ * parameter formats are all text, so we don't need additional format
+ * codes since timestamp parameters use text as its format.
*/
num_formats_new = 0;
}
else
{
- /* If num formats is 1, this means the specified format code is applied for all original parameters,
- * so enlarge message length to specify format codes for each of original parameters. */
+ /*
+ * If num formats is 1, this means the specified format code is
+ * applied for all original parameters, so enlarge message length to
+ * specify format codes for each of original parameters.
+ */
if (num_formats == 1)
*len += (num_org_params - 1) * sizeof(int16);
@@ -1161,10 +1167,14 @@ bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
/* 3.2. the format codes */
if (num_formats >= 1)
{
- /* If num_formats is 1, copy the specified format code as numbers of original parameters */
+ /*
+ * If num_formats is 1, copy the specified format code as numbers of
+ * original parameters
+ */
if (num_formats == 1)
{
- int16 org_format_code;
+ int16 org_format_code;
+
memcpy(&org_format_code, copy_from, sizeof(int16));
copy_from += copy_len;
@@ -1174,7 +1184,7 @@ bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
copy_to += sizeof(int16);
}
}
- /* otherwise, copy the original format codes as they are*/
+ /* otherwise, copy the original format codes as they are */
else
{
copy_len = num_formats * sizeof(int16);
@@ -1238,7 +1248,7 @@ bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
#ifdef TIMESTAMPDEBUG
fprintf(stderr, "message length:%d\n", *len);
- for(i = 0; i < *len; i++)
+ for (i = 0; i < *len; i++)
{
fprintf(stderr, "%02x ", new_msg[i]);
}
@@ -1250,7 +1260,7 @@ bind_rewrite_timestamp(POOL_CONNECTION_POOL * backend,
/* make A_Const of T_String from "SELECT <expression>"*/
static A_Const *
-makeStringConstFromQuery(POOL_CONNECTION_POOL * backend, char *expression)
+makeStringConstFromQuery(POOL_CONNECTION_POOL *backend, char *expression)
{
A_Const *con;
POOL_SELECT_RESULT *res;
@@ -1374,6 +1384,7 @@ raw_expression_tree_walker(Node *node,
/* Guard against stack overflow due to overly complex expressions */
+
/*
* check_stack_depth();
*/
diff --git a/src/sql/pgpool-recovery/pgpool-recovery.c b/src/sql/pgpool-recovery/pgpool-recovery.c
index 77f92753e..7607d13aa 100644
--- a/src/sql/pgpool-recovery/pgpool-recovery.c
+++ b/src/sql/pgpool-recovery/pgpool-recovery.c
@@ -83,23 +83,23 @@ pgpool_recovery(PG_FUNCTION_ARGS)
if (PG_NARGS() >= 7) /* Pgpool-II 4.3 or later */
{
char *primary_port = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(PG_GETARG_TEXT_P(3))));
+ PointerGetDatum(PG_GETARG_TEXT_P(3))));
int remote_node = PG_GETARG_INT32(4);
char *remote_port = DatumGetCString(DirectFunctionCall1(textout,
PointerGetDatum(PG_GETARG_TEXT_P(5))));
char *primary_host = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(PG_GETARG_TEXT_P(6))));
+ PointerGetDatum(PG_GETARG_TEXT_P(6))));
snprintf(recovery_script, sizeof(recovery_script), "\"%s/%s\" \"%s\" \"%s\" \"%s\" \"%s\" %d \"%s\" \"%s\"",
DataDir, script, DataDir, remote_host,
remote_data_directory, primary_port, remote_node, remote_port, primary_host);
}
- else if (PG_NARGS() >= 6) /* Pgpool-II 4.1 or 4.2 */
+ else if (PG_NARGS() >= 6) /* Pgpool-II 4.1 or 4.2 */
{
char *primary_port = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(PG_GETARG_TEXT_P(3))));
+ PointerGetDatum(PG_GETARG_TEXT_P(3))));
int remote_node = PG_GETARG_INT32(4);
char *remote_port = DatumGetCString(DirectFunctionCall1(textout,
@@ -109,10 +109,10 @@ pgpool_recovery(PG_FUNCTION_ARGS)
DataDir, script, DataDir, remote_host,
remote_data_directory, primary_port, remote_node, remote_port);
}
- else if (PG_NARGS() >= 5) /* Pgpool-II 4.0 */
+ else if (PG_NARGS() >= 5) /* Pgpool-II 4.0 */
{
char *primary_port = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(PG_GETARG_TEXT_P(3))));
+ PointerGetDatum(PG_GETARG_TEXT_P(3))));
int remote_node = PG_GETARG_INT32(4);
snprintf(recovery_script, sizeof(recovery_script), "\"%s/%s\" \"%s\" \"%s\" \"%s\" \"%s\" %d",
@@ -122,7 +122,7 @@ pgpool_recovery(PG_FUNCTION_ARGS)
else if (PG_NARGS() >= 4) /* Pgpool-II 3.4 - 3.7 */
{
char *primary_port = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(PG_GETARG_TEXT_P(3))));
+ PointerGetDatum(PG_GETARG_TEXT_P(3))));
snprintf(recovery_script, sizeof(recovery_script), "\"%s/%s\" \"%s\" \"%s\" \"%s\" \"%s\"",
DataDir, script, DataDir, remote_host,
@@ -356,6 +356,7 @@ get_function_oid(const char *funcname, const char *argtype, const char *nspname)
{
#if defined(PG_VERSION_NUM) && (PG_VERSION_NUM >= 120000)
Form_pg_proc proctup = (Form_pg_proc) GETSTRUCT(tup);
+
funcid = proctup->oid;
#else
funcid = HeapTupleGetOid(tup);
diff --git a/src/sql/pgpool_adm/pgpool_adm.c b/src/sql/pgpool_adm/pgpool_adm.c
index b1e2f5822..dca5a1260 100644
--- a/src/sql/pgpool_adm/pgpool_adm.c
+++ b/src/sql/pgpool_adm/pgpool_adm.c
@@ -38,7 +38,7 @@
static PCPConnInfo * connect_to_server(char *host, int port, char *user, char *pass);
static PCPConnInfo * connect_to_server_from_foreign_server(char *name);
-static Timestamp str2timestamp(char *str);
+static Timestamp str2timestamp(char *str);
/**
* Wrapper around pcp_connect
@@ -612,7 +612,7 @@ _pcp_health_check_stats(PG_FUNCTION_ARGS)
POOL_HEALTH_CHECK_STATS *stats;
Datum values[20]; /* values to build the returned tuple from */
bool nulls[] = {false, false, false, false, false, false, false, false, false, false,
- false, false, false, false, false, false, false, false, false, false};
+ false, false, false, false, false, false, false, false, false, false};
TupleDesc tupledesc;
HeapTuple tuple;
AttrNumber an;
@@ -711,7 +711,7 @@ _pcp_health_check_stats(PG_FUNCTION_ARGS)
values[i++] = Int64GetDatum(atol(stats->min_health_check_duration));
values[i++] = Float4GetDatum(atof(stats->average_health_check_duration));
- if (*stats->last_health_check =='\0' )
+ if (*stats->last_health_check == '\0')
nulls[i++] = true;
else
values[i++] = str2timestamp(stats->last_health_check);
@@ -771,7 +771,7 @@ _pcp_proc_info(PG_FUNCTION_ARGS)
int an;
-#define NUM_COLS 20 /* number of columns */
+#define NUM_COLS 20 /* number of columns */
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
diff --git a/src/streaming_replication/pool_worker_child.c b/src/streaming_replication/pool_worker_child.c
index 746659e0f..4f8f823a3 100644
--- a/src/streaming_replication/pool_worker_child.c
+++ b/src/streaming_replication/pool_worker_child.c
@@ -69,7 +69,7 @@
#include "watchdog/wd_internal_commands.h"
#include "watchdog/watchdog.h"
-static POOL_CONNECTION_POOL_SLOT * slots[MAX_NUM_BACKENDS];
+static POOL_CONNECTION_POOL_SLOT *slots[MAX_NUM_BACKENDS];
static volatile sig_atomic_t reload_config_request = 0;
static volatile sig_atomic_t restart_request = 0;
@@ -100,7 +100,7 @@ static void sr_check_will_die(int code, Datum arg);
#define PG10_SERVER_VERSION 100000 /* PostgreSQL 10 server version num */
#define PG91_SERVER_VERSION 90100 /* PostgreSQL 9.1 server version num */
-static volatile bool follow_primary_lock_acquired;
+static volatile bool follow_primary_lock_acquired;
/*
* worker child main loop
@@ -170,13 +170,14 @@ do_worker_child(void)
{
MemoryContextSwitchTo(WorkerMemoryContext);
MemoryContextResetAndDeleteChildren(WorkerMemoryContext);
+
/*
* Since WorkerMemoryContext is used for "slots", we need to clear it
* so that new slots are allocated later on.
*/
memset(slots, 0, sizeof(slots));
-
- bool watchdog_leader; /* true if I am the watchdog leader */
+
+ bool watchdog_leader; /* true if I am the watchdog leader */
CHECK_REQUEST;
@@ -193,11 +194,12 @@ do_worker_child(void)
if (pool_config->use_watchdog)
{
WD_STATES wd_status;
- WDPGBackendStatus *backendStatus;
+ WDPGBackendStatus *backendStatus;
wd_status = wd_internal_get_watchdog_local_node_state();
ereport(DEBUG1,
(errmsg("watchdog status: %d", wd_status)));
+
/*
* Ask the watchdog to get all the backend states from the
* Leader/Coordinator Pgpool-II node.
@@ -206,14 +208,15 @@ do_worker_child(void)
backendStatus = get_pg_backend_status_from_leader_wd_node();
if (!backendStatus)
+
/*
* Couldn't get leader status.
*/
watchdog_leader = false;
else
{
- int quorum = wd_internal_get_watchdog_quorum_state();
- int node_count = backendStatus->node_count;
+ int quorum = wd_internal_get_watchdog_quorum_state();
+ int node_count = backendStatus->node_count;
ereport(DEBUG1,
(errmsg("quorum: %d node_count: %d",
@@ -221,8 +224,8 @@ do_worker_child(void)
if (quorum >= 0 && backendStatus->node_count <= 0)
{
/*
- * Quorum exists and node_count <= 0.
- * Definitely I am the leader.
+ * Quorum exists and node_count <= 0. Definitely I am the
+ * leader.
*/
watchdog_leader = true;
}
@@ -234,14 +237,15 @@ do_worker_child(void)
}
/*
- * If streaming replication mode, do time lag checking
- * Also skip if failover/failback is ongoing.
+ * If streaming replication mode, do time lag checking Also skip if
+ * failover/failback is ongoing.
*/
if (pool_config->sr_check_period > 0 && STREAM &&
Req_info->switching == false)
{
/*
- * Acquire follow primary lock. If fail to acquire lock, try again.
+ * Acquire follow primary lock. If fail to acquire lock, try
+ * again.
*/
follow_primary_lock_acquired = false;
@@ -273,6 +277,7 @@ do_worker_child(void)
ereport(LOG,
(errmsg("pgpool_worker_child: invalid node found %d", i)));
+
/*
* If detach_false_primary is enabled, send
* degenerate request to detach invalid node.
@@ -291,17 +296,17 @@ do_worker_child(void)
(pool_config->use_watchdog && watchdog_leader))
{
n = i;
+
/*
* In the case watchdog enabled, we need
* to add REQ_DETAIL_CONFIRMED, which
- * means no quorum consensus is
- * required. If we do not add this, the
- * target node will remain quarantine
- * state since other node does not request
- * failover.
+ * means no quorum consensus is required.
+ * If we do not add this, the target node
+ * will remain quarantine state since
+ * other node does not request failover.
*/
degenerate_backend_set(&n, 1,
- REQ_DETAIL_SWITCHOVER|REQ_DETAIL_CONFIRMED);
+ REQ_DETAIL_SWITCHOVER | REQ_DETAIL_CONFIRMED);
}
else if (pool_config->use_watchdog)
ereport(LOG,
@@ -396,15 +401,15 @@ check_replication_time_lag(void)
int i;
POOL_SELECT_RESULT *res;
POOL_SELECT_RESULT *res_rep; /* query results of pg_stat_replication */
- uint64 lsn[MAX_NUM_BACKENDS];
+ uint64 lsn[MAX_NUM_BACKENDS];
char *query;
char *stat_rep_query;
BackendInfo *bkinfo;
- uint64 lag;
- uint64 delay_threshold_by_time;
+ uint64 lag;
+ uint64 delay_threshold_by_time;
ErrorContextCallback callback;
- int active_standby_node;
- bool replication_delay_by_time;
+ int active_standby_node;
+ bool replication_delay_by_time;
/* clear replication state */
for (i = 0; i < NUM_BACKENDS; i++)
@@ -525,7 +530,7 @@ check_replication_time_lag(void)
*/
if (slots[PRIMARY_NODE_ID] && stat_rep_query != NULL)
{
- int status;
+ int status;
status = get_query_result(slots, PRIMARY_NODE_ID, stat_rep_query, &res_rep);
@@ -547,27 +552,27 @@ check_replication_time_lag(void)
if (status == 0)
{
- int j;
- char *s;
+ int j;
+ char *s;
#define NUM_COLS 4
for (j = 0; j < res_rep->numrows; j++)
{
- if (strcmp(res_rep->data[j*NUM_COLS], bkinfo->backend_application_name) == 0)
+ if (strcmp(res_rep->data[j * NUM_COLS], bkinfo->backend_application_name) == 0)
{
/*
- * If sr_check_user has enough privilege, it should return
- * some string. If not, NULL pointer will be returned for
- * res_rep->data[1] and [2]. So we need to prepare for the
- * latter case.
+ * If sr_check_user has enough privilege, it should
+ * return some string. If not, NULL pointer will be
+ * returned for res_rep->data[1] and [2]. So we need
+ * to prepare for the latter case.
*/
- s = res_rep->data[j*NUM_COLS+1]? res_rep->data[j*NUM_COLS+1] : "";
+ s = res_rep->data[j * NUM_COLS + 1] ? res_rep->data[j * NUM_COLS + 1] : "";
strlcpy(bkinfo->replication_state, s, NAMEDATALEN);
- s = res_rep->data[j*NUM_COLS+2]? res_rep->data[j*NUM_COLS+2] : "";
+ s = res_rep->data[j * NUM_COLS + 2] ? res_rep->data[j * NUM_COLS + 2] : "";
strlcpy(bkinfo->replication_sync_state, s, NAMEDATALEN);
- s = res_rep->data[j*NUM_COLS+3];
+ s = res_rep->data[j * NUM_COLS + 3];
if (s)
{
bkinfo->standby_delay = atol(s);
@@ -622,7 +627,9 @@ check_replication_time_lag(void)
{
lag = bkinfo->standby_delay;
delay_threshold_by_time = pool_config->delay_threshold_by_time;
- delay_threshold_by_time *= 1000; /* convert from milli seconds to micro seconds */
+ delay_threshold_by_time *= 1000; /* convert from milli
+ * seconds to micro
+ * seconds */
/* Log delay if necessary */
if ((pool_config->log_standby_delay == LSD_ALWAYS && lag > 0) ||
@@ -631,7 +638,7 @@ check_replication_time_lag(void)
{
ereport(LOG,
(errmsg("Replication of node: %d is behind %.6f second(s) from the primary server (node: %d)",
- i, ((float)lag)/1000000, PRIMARY_NODE_ID)));
+ i, ((float) lag) / 1000000, PRIMARY_NODE_ID)));
}
}
else
@@ -643,7 +650,7 @@ check_replication_time_lag(void)
{
ereport(LOG,
(errmsg("Replication of node: %d is behind " UINT64_FORMAT " bytes from the primary server (node: %d)",
- i, (uint64)(lsn[PRIMARY_NODE_ID] - lsn[i]), PRIMARY_NODE_ID)));
+ i, (uint64) (lsn[PRIMARY_NODE_ID] - lsn[i]), PRIMARY_NODE_ID)));
}
}
}
@@ -674,7 +681,7 @@ text_to_lsn(char *text)
unsigned int xrecoff;
unsigned long long int lsn;
- if (sscanf(text, "%X/%X", &xlogid, &xrecoff) !=2)
+ if (sscanf(text, "%X/%X", &xlogid, &xrecoff) != 2)
{
ereport(ERROR,
(errmsg("invalid LSN format"),
@@ -753,7 +760,7 @@ reload_config(void)
* is guaranteed that no exception occurs within this function.
*/
int
-get_query_result(POOL_CONNECTION_POOL_SLOT * *slots, int backend_id, char *query, POOL_SELECT_RESULT * *res)
+get_query_result(POOL_CONNECTION_POOL_SLOT **slots, int backend_id, char *query, POOL_SELECT_RESULT **res)
{
int sts = -1;
MemoryContext oldContext = CurrentMemoryContext;
diff --git a/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c b/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c
index 89a36f2c0..917ca8803 100644
--- a/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c
+++ b/src/test/regression/tests/010.rewrite_timestamp/timestamp/main.c
@@ -16,19 +16,19 @@ POOL_REQUEST_INFO *Req_info = &_req_info;
POOL_CONFIG _pool_config;
POOL_CONFIG *pool_config = &_pool_config;
-bool redirection_done = false;
+bool redirection_done = false;
typedef struct
{
char *attrname; /* attribute name */
char *adsrc; /* default value expression */
int use_timestamp;
-} TSAttr;
+} TSAttr;
typedef struct
{
int relnatts;
TSAttr attr[4];
-} TSRel;
+} TSRel;
TSRel rc[2] = {
@@ -78,10 +78,10 @@ pool_get_major_version(void)
PGVersion *
-Pgversion(POOL_CONNECTION_POOL * backend)
+Pgversion(POOL_CONNECTION_POOL *backend)
{
#define VERSION_BUF_SIZE 10
- static PGVersion pgversion;
+ static PGVersion pgversion;
pgversion.major = 12;
pgversion.minor = 0;
@@ -97,7 +97,7 @@ pool_create_relcache(int cachesize, char *sql, func_ptr register_func, func_ptr
/* dummy result of relcache (attrname, adsrc, usetimestamp)*/
void *
-pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, char *table)
+pool_search_relcache(POOL_RELCACHE *relcache, POOL_CONNECTION_POOL *backend, char *table)
{
if (strcmp(table, "\"rel1\"") == 0)
return (void *) &(rc[0]);
@@ -107,7 +107,7 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
/* dummy result of "SELECT now()" */
void
-do_query(POOL_CONNECTION * backend, char *query, POOL_SELECT_RESULT * *result, int major)
+do_query(POOL_CONNECTION *backend, char *query, POOL_SELECT_RESULT **result, int major)
{
static POOL_SELECT_RESULT res;
static char *data[1] = {
@@ -153,7 +153,7 @@ main(int argc, char **argv)
exit(1);
}
- tree = raw_parser(argv[1], RAW_PARSE_DEFAULT, strlen(argv[1]), &error, false);
+ tree = raw_parser(argv[1], RAW_PARSE_DEFAULT, strlen(argv[1]), &error, false);
if (tree == NULL)
{
printf("syntax error: %s\n", argv[1]);
@@ -178,7 +178,7 @@ main(int argc, char **argv)
}
void
-free_select_result(POOL_SELECT_RESULT * result)
+free_select_result(POOL_SELECT_RESULT *result)
{
}
POOL_SESSION_CONTEXT *
diff --git a/src/tools/fe_port.c b/src/tools/fe_port.c
index 3704c2730..1416a8f9d 100644
--- a/src/tools/fe_port.c
+++ b/src/tools/fe_port.c
@@ -162,7 +162,8 @@ nowsec(void)
return strbuf;
}
-bool errstart(int elevel, const char *filename, int lineno,
+bool
+errstart(int elevel, const char *filename, int lineno,
const char *funcname, const char *domain)
{
_fe_error_level = elevel;
diff --git a/src/tools/pcp/pcp_frontend_client.c b/src/tools/pcp/pcp_frontend_client.c
index ecc922b93..9f63a78f4 100644
--- a/src/tools/pcp/pcp_frontend_client.c
+++ b/src/tools/pcp/pcp_frontend_client.c
@@ -48,10 +48,10 @@ static void output_watchdog_info_result(PCPResultInfo * pcpResInfo, bool verbose
static void output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose);
static void output_proccount_result(PCPResultInfo * pcpResInfo, bool verbose);
static void output_poolstatus_result(PCPResultInfo * pcpResInfo, bool verbose);
-static void output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose);
+static void output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose);
static void output_health_check_stats_result(PCPResultInfo * pcpResInfo, bool verbose);
static void output_nodecount_result(PCPResultInfo * pcpResInfo, bool verbose);
-static char *backend_status_to_string(BackendInfo * bi);
+static char *backend_status_to_string(BackendInfo *bi);
static char *format_titles(const char **titles, const char **types, int ntitles);
typedef enum
@@ -96,9 +96,9 @@ struct AppTypes AllAppTypes[] =
{"pcp_recovery_node", PCP_RECOVERY_NODE, "n:h:p:U:wWvd", "recover a node"},
{"pcp_stop_pgpool", PCP_STOP_PGPOOL, "m:h:p:U:s:wWvda", "terminate pgpool-II"},
{"pcp_watchdog_info", PCP_WATCHDOG_INFO, "n:h:p:U:wWvd", "display a pgpool-II watchdog's information"},
- {"pcp_reload_config",PCP_RELOAD_CONFIG,"h:p:U:s:wWvd", "reload a pgpool-II config file"},
- {"pcp_log_rotate",PCP_LOG_ROTATE,"h:p:U:s:wWvd", "rotate the Pgpool-II's log file"},
- {"pcp_invalidate_query_cache",PCP_INVALIDATE_QUERY_CACHE,"h:p:U:s:wWvd", "invalidate query cache"},
+ {"pcp_reload_config", PCP_RELOAD_CONFIG, "h:p:U:s:wWvd", "reload a pgpool-II config file"},
+ {"pcp_log_rotate", PCP_LOG_ROTATE, "h:p:U:s:wWvd", "rotate the Pgpool-II's log file"},
+ {"pcp_invalidate_query_cache", PCP_INVALIDATE_QUERY_CACHE, "h:p:U:s:wWvd", "invalidate query cache"},
{NULL, UNKNOWN, NULL, NULL},
};
struct AppTypes *current_app_type;
@@ -399,7 +399,7 @@ main(int argc, char **argv)
else if (current_app_type->app_type == PCP_LOG_ROTATE)
{
- pcpResInfo = pcp_log_rotate(pcpConn,command_scope);
+ pcpResInfo = pcp_log_rotate(pcpConn, command_scope);
}
else if (current_app_type->app_type == PCP_NODE_COUNT)
@@ -452,7 +452,7 @@ main(int argc, char **argv)
else if (current_app_type->app_type == PCP_RELOAD_CONFIG)
{
- pcpResInfo = pcp_reload_config(pcpConn,command_scope);
+ pcpResInfo = pcp_reload_config(pcpConn, command_scope);
}
else if (current_app_type->app_type == PCP_STOP_PGPOOL)
@@ -535,7 +535,7 @@ output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
char last_status_change[20];
struct tm tm;
char *frmt;
- int array_size = pcp_result_slot_count(pcpResInfo);
+ int array_size = pcp_result_slot_count(pcpResInfo);
char standby_delay_str[64];
if (verbose)
@@ -543,7 +543,7 @@ output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
const char *titles[] = {"Hostname", "Port", "Status", "Weight", "Status Name", "Backend Status Name", "Role", "Backend Role", "Replication Delay", "Replication State", "Replication Sync State", "Last Status Change"};
const char *types[] = {"s", "d", "d", "f", "s", "s", "s", "s", "s", "s", "s", "s"};
- frmt = format_titles(titles, types, sizeof(titles)/sizeof(char *));
+ frmt = format_titles(titles, types, sizeof(titles) / sizeof(char *));
}
else
{
@@ -566,10 +566,10 @@ output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
if (backend_info->standby_delay_by_time)
{
- snprintf(standby_delay_str, sizeof(standby_delay_str), "%.6f", ((float)backend_info->standby_delay)/1000000);
+ snprintf(standby_delay_str, sizeof(standby_delay_str), "%.6f", ((float) backend_info->standby_delay) / 1000000);
if (verbose)
{
- if (backend_info->standby_delay >= 2*1000*1000)
+ if (backend_info->standby_delay >= 2 * 1000 * 1000)
strcat(standby_delay_str, " seconds");
else
strcat(standby_delay_str, " second");
@@ -605,21 +605,21 @@ output_nodeinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
static void
output_health_check_stats_result(PCPResultInfo * pcpResInfo, bool verbose)
{
- POOL_HEALTH_CHECK_STATS *stats = (POOL_HEALTH_CHECK_STATS *)pcp_get_binary_data(pcpResInfo, 0);
+ POOL_HEALTH_CHECK_STATS *stats = (POOL_HEALTH_CHECK_STATS *) pcp_get_binary_data(pcpResInfo, 0);
if (verbose)
{
const char *titles[] = {"Node Id", "Host Name", "Port", "Status", "Role", "Last Status Change",
- "Total Count", "Success Count", "Fail Count", "Skip Count", "Retry Count",
- "Average Retry Count", "Max Retry Count", "Max Health Check Duration",
- "Minimum Health Check Duration", "Average Health Check Duration",
- "Last Health Check", "Last Successful Health Check",
- "Last Skip Health Check", "Last Failed Health Check"};
+ "Total Count", "Success Count", "Fail Count", "Skip Count", "Retry Count",
+ "Average Retry Count", "Max Retry Count", "Max Health Check Duration",
+ "Minimum Health Check Duration", "Average Health Check Duration",
+ "Last Health Check", "Last Successful Health Check",
+ "Last Skip Health Check", "Last Failed Health Check"};
const char *types[] = {"s", "s", "s", "s", "s", "s", "s", "s", "s", "s",
- "s", "s", "s", "s", "s", "s", "s", "s", "s", "s"};
- char *format_string;
+ "s", "s", "s", "s", "s", "s", "s", "s", "s", "s"};
+ char *format_string;
- format_string = format_titles(titles, types, sizeof(titles)/sizeof(char *));
+ format_string = format_titles(titles, types, sizeof(titles) / sizeof(char *));
printf(format_string,
stats->node_id,
stats->hostname,
@@ -735,7 +735,7 @@ output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
"Database", "Username", "Start time", "Client connection count",
"Major", "Minor", "Backend connection time", "Client connection time",
"Client idle duration", "Client disconnection time", "Pool Counter", "Backend PID",
- "Connected", "PID", "Backend ID", "Status", "Load balance node",
+ "Connected", "PID", "Backend ID", "Status", "Load balance node",
"client_host", "client_port", "statement"
};
const char *types[] = {
@@ -748,7 +748,7 @@ output_procinfo_result(PCPResultInfo * pcpResInfo, bool all, bool verbose)
if (verbose)
- format = format_titles(titles, types, sizeof(titles)/sizeof(char *));
+ format = format_titles(titles, types, sizeof(titles) / sizeof(char *));
else
{
format = "%s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s %s\n";
@@ -993,7 +993,7 @@ get_progname(const char *argv0)
* the function returns the constant string so should not be freed
*/
static char *
-backend_status_to_string(BackendInfo * bi)
+backend_status_to_string(BackendInfo *bi)
{
char *statusName;
@@ -1049,22 +1049,23 @@ role_to_str(SERVER_ROLE role)
static char *
format_titles(const char **titles, const char **types, int ntitles)
{
- int i;
- int maxlen = 0;
- static char formatbuf[8192];
+ int i;
+ int maxlen = 0;
+ static char formatbuf[8192];
- for(i = 0; i < ntitles; i++)
+ for (i = 0; i < ntitles; i++)
{
- int l = strlen(titles[i]);
- maxlen = (l > maxlen)? l : maxlen;
+ int l = strlen(titles[i]);
+
+ maxlen = (l > maxlen) ? l : maxlen;
}
*formatbuf = '\0';
- for(i = 0; i < ntitles; i++)
+ for (i = 0; i < ntitles; i++)
{
- char buf[64];
- char buf2[64];
+ char buf[64];
+ char buf2[64];
snprintf(buf, sizeof(buf), "%%-%ds : %%%%%s", maxlen, types[i]);
snprintf(buf2, sizeof(buf2), buf, titles[i], types[i]);
diff --git a/src/tools/pgenc/pg_enc.c b/src/tools/pgenc/pg_enc.c
index d548fd6df..83c6d3b4f 100644
--- a/src/tools/pgenc/pg_enc.c
+++ b/src/tools/pgenc/pg_enc.c
@@ -295,42 +295,42 @@ main(int argc, char *argv[])
static void
print_encrypted_password(char *pg_pass, char *pool_key)
{
- unsigned char ciphertext[MAX_ENCODED_PASSWD_LEN];
- unsigned char b64_enc[MAX_ENCODED_PASSWD_LEN];
- int len;
- int cypher_len;
+ unsigned char ciphertext[MAX_ENCODED_PASSWD_LEN];
+ unsigned char b64_enc[MAX_ENCODED_PASSWD_LEN];
+ int len;
+ int cypher_len;
- cypher_len = aes_encrypt_with_password((unsigned char *) pg_pass,
- strlen(pg_pass), pool_key, ciphertext);
+ cypher_len = aes_encrypt_with_password((unsigned char *) pg_pass,
+ strlen(pg_pass), pool_key, ciphertext);
- /* generate the hash for the given username */
- len = pg_b64_encode((const char *) ciphertext, cypher_len, (char *) b64_enc);
- b64_enc[len] = 0;
- fprintf(stdout, "\n%s\n", b64_enc);
- fprintf(stdout, "pool_passwd string: AES%s\n", b64_enc);
+ /* generate the hash for the given username */
+ len = pg_b64_encode((const char *) ciphertext, cypher_len, (char *) b64_enc);
+ b64_enc[len] = 0;
+ fprintf(stdout, "\n%s\n", b64_enc);
+ fprintf(stdout, "pool_passwd string: AES%s\n", b64_enc);
#ifdef DEBUG_ENCODING
- unsigned char b64_dec[MAX_ENCODED_PASSWD_LEN];
- unsigned char plaintext[MAX_PGPASS_LEN];
+ unsigned char b64_dec[MAX_ENCODED_PASSWD_LEN];
+ unsigned char plaintext[MAX_PGPASS_LEN];
- len = pg_b64_decode(b64_enc, len, b64_dec);
- len = aes_decrypt_with_password(b64_dec, len,
- pool_key, plaintext);
- plaintext[len] = 0;
+ len = pg_b64_decode(b64_enc, len, b64_dec);
+ len = aes_decrypt_with_password(b64_dec, len,
+ pool_key, plaintext);
+ plaintext[len] = 0;
#endif
}
static void
process_input_file(char *conf_file, char *input_file, char *key, bool updatepasswd)
{
- FILE *input_file_fd;
- char *buf = NULL;
- char username[MAX_USER_NAME_LEN + 1];
- char password[MAX_PGPASS_LEN + 1];
- char *pch;
- size_t len=0;
- int nread = 0;
- int line_count;
+ FILE *input_file_fd;
+ char *buf = NULL;
+ char username[MAX_USER_NAME_LEN + 1];
+ char password[MAX_PGPASS_LEN + 1];
+ char *pch;
+ size_t len = 0;
+ int nread = 0;
+ int line_count;
fprintf(stdout, "trying to read username:password pairs from file %s\n", input_file);
input_file_fd = fopen(input_file, "r");
@@ -357,7 +357,7 @@ process_input_file(char *conf_file, char *input_file, char *key, bool updatepass
/* Split username and passwords */
pch = buf;
- while( pch && pch != buf + nread && *pch != ':')
+ while (pch && pch != buf + nread && *pch != ':')
pch++;
if (*pch == ':')
pch++;
@@ -367,19 +367,19 @@ process_input_file(char *conf_file, char *input_file, char *key, bool updatepass
goto clear_buffer;
}
- if( (pch-buf) > sizeof(username))
+ if ((pch - buf) > sizeof(username))
{
- fprintf(stderr, "LINE#%02d: input exceeds maximum username length %d\n",line_count, MAX_USER_NAME_LEN);
+ fprintf(stderr, "LINE#%02d: input exceeds maximum username length %d\n", line_count, MAX_USER_NAME_LEN);
goto clear_buffer;
}
strncpy(username, buf, pch - buf - 1);
if (strlen(pch) >= sizeof(password))
{
- fprintf(stderr, "LINE#%02d: input exceeds maximum password length %d\n",line_count, MAX_PGPASS_LEN);
+ fprintf(stderr, "LINE#%02d: input exceeds maximum password length %d\n", line_count, MAX_PGPASS_LEN);
goto clear_buffer;
}
- strncpy(password, pch, sizeof(password) -1);
+ strncpy(password, pch, sizeof(password) - 1);
if (updatepasswd)
update_pool_passwd(conf_file, username, password, key);
diff --git a/src/tools/pgmd5/pg_md5.c b/src/tools/pgmd5/pg_md5.c
index e7d002757..11cdd4f9f 100644
--- a/src/tools/pgmd5/pg_md5.c
+++ b/src/tools/pgmd5/pg_md5.c
@@ -213,15 +213,15 @@ main(int argc, char *argv[])
static void
update_pool_passwd_from_file(char *conf_file, char *input_file, bool md5auth)
{
- FILE *fp;
+ FILE *fp;
- char buf[MAX_BUFFER_SIZE + 1];
- char username[MAX_INPUT_SIZE + 1];
- char password[MAX_INPUT_SIZE + 1];
- char md5[MD5_PASSWD_LEN + 1];
- char *pch;
- int len;
- int line_count;
+ char buf[MAX_BUFFER_SIZE + 1];
+ char username[MAX_INPUT_SIZE + 1];
+ char password[MAX_INPUT_SIZE + 1];
+ char md5[MD5_PASSWD_LEN + 1];
+ char *pch;
+ int len;
+ int line_count;
fprintf(stdout, "trying to read username:password pairs from file %s\n", input_file);
@@ -257,7 +257,7 @@ update_pool_passwd_from_file(char *conf_file, char *input_file, bool md5auth)
/* Split username and passwords */
pch = buf;
- while( pch && pch != buf + len && *pch != ':')
+ while (pch && pch != buf + len && *pch != ':')
pch++;
if (*pch == ':')
pch++;
@@ -268,7 +268,7 @@ update_pool_passwd_from_file(char *conf_file, char *input_file, bool md5auth)
continue;
}
- if( (pch-buf) > sizeof(username))
+ if ((pch - buf) > sizeof(username))
{
fprintf(stdout, "input exceeds maximum username length %d\n\n", MAX_USER_NAME_LEN);
continue;
@@ -283,7 +283,7 @@ update_pool_passwd_from_file(char *conf_file, char *input_file, bool md5auth)
strncpy(password, pch, sizeof(password));
fprintf(stdout, "USER: <%s>\n", username);
- if(md5auth)
+ if (md5auth)
update_pool_passwd(conf_file, username, password);
else
{
@@ -319,11 +319,11 @@ update_pool_passwd(char *conf_file, char *username, char *password)
strlcpy(dirnamebuf, conf_file, sizeof(dirnamebuf));
dirp = dirname(dirnamebuf);
snprintf(pool_passwd, sizeof(pool_passwd), "%s/%s",
- dirp, pool_config->pool_passwd);
+ dirp, pool_config->pool_passwd);
}
else
strlcpy(pool_passwd, pool_config->pool_passwd,
- sizeof(pool_passwd));
+ sizeof(pool_passwd));
pool_init_pool_passwd(pool_passwd, POOL_PASSWD_RW);
diff --git a/src/tools/pgproto/extended_query.c b/src/tools/pgproto/extended_query.c
index c8f694750..5d5dc4f61 100644
--- a/src/tools/pgproto/extended_query.c
+++ b/src/tools/pgproto/extended_query.c
@@ -218,7 +218,7 @@ process_bind(char *buf, PGconn *conn)
fprintf(stderr, "\n");
send_char('B', conn);
- send_int(len, conn); /* message length */
+ send_int(len, conn); /* message length */
send_string(portal, conn); /* portal name */
free(portal);
send_string(stmt, conn); /* statement name */
@@ -226,17 +226,17 @@ process_bind(char *buf, PGconn *conn)
send_int16(ncodes, conn); /* number of format codes */
for (i = 0; i < ncodes; i++)
{
- send_int16(codes[i], conn); /* format code */
+ send_int16(codes[i], conn); /* format code */
}
send_int16(nparams, conn); /* number of params */
for (i = 0; i < nparams; i++)
{
- int paramlen = paramlens[i];
+ int paramlen = paramlens[i];
send_int(paramlen, conn);
- if (paramlen != -1) /* NULL? */
+ if (paramlen != -1) /* NULL? */
{
if (ncodes == 0 || codes[i] == 0)
{
diff --git a/src/tools/pgproto/main.c b/src/tools/pgproto/main.c
index 8b58ead2c..18e7f59a9 100644
--- a/src/tools/pgproto/main.c
+++ b/src/tools/pgproto/main.c
@@ -201,7 +201,7 @@ connect_db(char *host, char *port, char *user, char *database)
char conninfo[1024];
PGconn *conn;
size_t n;
- char *app_name_str = " application_name=pgproto";
+ char *app_name_str = " application_name=pgproto";
conninfo[0] = '\0';
n = sizeof(conninfo);
diff --git a/src/tools/pgproto/read.c b/src/tools/pgproto/read.c
index fff8e53d9..7dc44aa36 100644
--- a/src/tools/pgproto/read.c
+++ b/src/tools/pgproto/read.c
@@ -97,8 +97,9 @@ read_until_ready_for_query(PGconn *conn, int timeout, int wait_for_ready_for_que
kind = read_char(conn);
switch (kind)
{
- char *channel, *payload;
- int pid;
+ char *channel,
+ *payload;
+ int pid;
case '1': /* Parse complete */
fprintf(stderr, "<= BE ParseComplete\n");
@@ -225,7 +226,7 @@ read_until_ready_for_query(PGconn *conn, int timeout, int wait_for_ready_for_que
case 'A': /* Notification response */
len = read_int32(conn);
- (void)len;
+ (void) len;
pid = read_int32(conn);
channel = read_string(conn);
@@ -248,7 +249,7 @@ read_until_ready_for_query(PGconn *conn, int timeout, int wait_for_ready_for_que
else
{
fprintf(stderr, "<= BE Notification response. pid: %d\n",
- pid);
+ pid);
}
break;
@@ -375,8 +376,8 @@ read_string(PGconn *conn)
#define PROTO_ALLOC_SIZE 512
int sts;
- char *buf;
- char *p;
+ char *buf;
+ char *p;
int alloc_factor = 1;
int len;
diff --git a/src/tools/pgproto/send.c b/src/tools/pgproto/send.c
index 1ada7ff14..2f359291b 100644
--- a/src/tools/pgproto/send.c
+++ b/src/tools/pgproto/send.c
@@ -29,7 +29,7 @@
#include "pgproto/read.h"
#include "pgproto/send.h"
-static void write_it(int fd, void *buf, int len);
+static void write_it(int fd, void *buf, int len);
/*
* Send a character to the connection.
@@ -84,9 +84,11 @@ send_byte(char *buf, int len, PGconn *conn)
* Wrapper for write(2).
*/
static
-void write_it(int fd, void *buf, int len)
+void
+write_it(int fd, void *buf, int len)
{
- int errsave = errno;
+ int errsave = errno;
+
errno = 0;
if (write(fd, buf, len) < 0)
{
@@ -94,4 +96,3 @@ void write_it(int fd, void *buf, int len)
}
errno = errsave;
}
-
diff --git a/src/tools/watchdog/wd_cli.c b/src/tools/watchdog/wd_cli.c
index 2dee8ce0a..33d919d38 100644
--- a/src/tools/watchdog/wd_cli.c
+++ b/src/tools/watchdog/wd_cli.c
@@ -61,31 +61,31 @@ LifeCheckCluster *gslifeCheckCluster = NULL;
static void usage(void);
-static bool validate_number(char* ptr);
+static bool validate_number(char *ptr);
const char *get_progname(const char *argv0);
static void print_lifecheck_cluster(bool include_nodes, bool verbose);
-static void print_node_info(LifeCheckNode* lifeCheckNode, bool verbose);
+static void print_node_info(LifeCheckNode *lifeCheckNode, bool verbose);
static bool fetch_watchdog_nodes_data(char *authkey, bool debug);
-static bool inform_node_is_alive(LifeCheckNode * node, char *message, char* authkey);
-static bool inform_node_is_dead(LifeCheckNode * node, char *message, char* authkey);
+static bool inform_node_is_alive(LifeCheckNode *node, char *message, char *authkey);
+static bool inform_node_is_dead(LifeCheckNode *node, char *message, char *authkey);
static void load_watchdog_nodes_from_json(char *json_data, int len);
static char *get_node_status_change_json(int nodeID, int nodeStatus, char *message, char *authKey);
-static void print_node_info(LifeCheckNode* lifeCheckNode, bool verbose);
-static LifeCheckNode* get_node_by_options(char *node_name, char* node_host, int node_port, int node_id);
+static void print_node_info(LifeCheckNode *lifeCheckNode, bool verbose);
+static LifeCheckNode *get_node_by_options(char *node_name, char *node_host, int node_port, int node_id);
int
main(int argc, char **argv)
{
- LifeCheckNode* lifeCheckNode;
- char* conf_file_path = NULL;
- char* node_host = NULL;
- char* node_name = NULL;
- char* wd_authkey = NULL;
- char* socket_dir = NULL;
- char* message = NULL;
+ LifeCheckNode *lifeCheckNode;
+ char *conf_file_path = NULL;
+ char *node_host = NULL;
+ char *node_name = NULL;
+ char *wd_authkey = NULL;
+ char *socket_dir = NULL;
+ char *message = NULL;
int node_wd_port = -1;
int node_id = -1;
int port = -1;
@@ -98,7 +98,7 @@ main(int argc, char **argv)
bool all_nodes = false;
bool status_ALIVE = false;
bool status_DEAD = false;
-
+
/* here we put all the allowed long options for all utilities */
static struct option long_options[] = {
{"help", no_argument, NULL, '?'},
@@ -150,15 +150,15 @@ main(int argc, char **argv)
all_nodes = true;
break;
- case 'i': /* Info Request */
- {
- info_req = true;
- if (inform_status)
+ case 'i': /* Info Request */
{
- fprintf(stderr, "ERROR: Invalid option, 'info' and 'inform' are mutually exclusive options\n");
- exit(EXIT_FAILURE);
+ info_req = true;
+ if (inform_status)
+ {
+ fprintf(stderr, "ERROR: Invalid option, 'info' and 'inform' are mutually exclusive options\n");
+ exit(EXIT_FAILURE);
+ }
}
- }
break;
case 'I':
@@ -173,13 +173,13 @@ main(int argc, char **argv)
fprintf(stderr, "ERROR: Invalid option, 'info' and 'inform' are mutually exclusive options\n");
exit(EXIT_FAILURE);
}
- if (strcasecmp("DEAD",optarg) == 0)
+ if (strcasecmp("DEAD", optarg) == 0)
status_DEAD = true;
- else if (strcasecmp("ALIVE",optarg) == 0)
+ else if (strcasecmp("ALIVE", optarg) == 0)
status_ALIVE = true;
else
{
- fprintf(stderr, "ERROR: Invalid node status \"%s\", Allowed options are DEAD or ALIVE''\n",optarg);
+ fprintf(stderr, "ERROR: Invalid node status \"%s\", Allowed options are DEAD or ALIVE''\n", optarg);
exit(EXIT_FAILURE);
}
break;
@@ -192,7 +192,7 @@ main(int argc, char **argv)
}
if (validate_number(optarg) == false)
{
- fprintf(stderr, "ERROR: Invalid value %s, node-id can only contain numeric values\n",optarg);
+ fprintf(stderr, "ERROR: Invalid value %s, node-id can only contain numeric values\n", optarg);
exit(EXIT_FAILURE);
}
node_id = atoi(optarg);
@@ -224,7 +224,7 @@ main(int argc, char **argv)
}
if (validate_number(optarg) == false)
{
- fprintf(stderr, "ERROR: Invalid value %s, node-port can only contain numeric values\n",optarg);
+ fprintf(stderr, "ERROR: Invalid value %s, node-port can only contain numeric values\n", optarg);
exit(EXIT_FAILURE);
}
node_wd_port = atoi(optarg);
@@ -278,7 +278,7 @@ main(int argc, char **argv)
case 'p':
if (validate_number(optarg) == false)
{
- fprintf(stderr, "ERROR: Invalid value %s, port can only contain numeric values\n",optarg);
+ fprintf(stderr, "ERROR: Invalid value %s, port can only contain numeric values\n", optarg);
exit(EXIT_FAILURE);
}
port = atoi(optarg);
@@ -291,7 +291,7 @@ main(int argc, char **argv)
case '?':
default:
-
+
/*
* getopt_long should already have emitted a complaint
*/
@@ -334,11 +334,11 @@ main(int argc, char **argv)
}
if (debug)
- printf("DEBUG: From config %s:%d\n",pool_config->wd_ipc_socket_dir,
- pool_config->wd_nodes.wd_node_info[pool_config->pgpool_node_id].wd_port);
+ printf("DEBUG: From config %s:%d\n", pool_config->wd_ipc_socket_dir,
+ pool_config->wd_nodes.wd_node_info[pool_config->pgpool_node_id].wd_port);
pfree(conf_file_path);
- /* only use values from pg_config that are not provided explicitly*/
+ /* only use values from pg_config that are not provided explicitly */
if (wd_authkey == NULL)
wd_authkey = pstrdup(pool_config->wd_authkey);
if (port < 0)
@@ -353,29 +353,31 @@ main(int argc, char **argv)
if (socket_dir == NULL)
socket_dir = pstrdup("/tmp");
- if(debug)
+ if (debug)
{
- fprintf(stderr, "DEBUG: setting IPC address to %s:%d\n",socket_dir,port);
+ fprintf(stderr, "DEBUG: setting IPC address to %s:%d\n", socket_dir, port);
}
- wd_set_ipc_address(socket_dir,port);
+ wd_set_ipc_address(socket_dir, port);
wd_ipc_conn_initialize();
- if(debug)
+ if (debug)
{
- char c_node_id[10],c_wd_port[10];
- snprintf(c_node_id, sizeof(c_node_id), "%d",node_id);
- snprintf(c_wd_port, sizeof(c_wd_port), "%d",node_wd_port);
+ char c_node_id[10],
+ c_wd_port[10];
+
+ snprintf(c_node_id, sizeof(c_node_id), "%d", node_id);
+ snprintf(c_wd_port, sizeof(c_wd_port), "%d", node_wd_port);
fprintf(stderr, "DEBUG: OPERATION:%s ALL NODE CRITERIA = %s\n",
- info_req?"\"INFO REQUEST\"":"\"INFORM NODE STATUS\"",
- all_nodes?"TRUE":"FALSE"
- );
+ info_req ? "\"INFO REQUEST\"" : "\"INFORM NODE STATUS\"",
+ all_nodes ? "TRUE" : "FALSE"
+ );
fprintf(stderr, "DEBUG: Search criteria:[ID=%s AND Name=%s AND Host=%s AND WDPort=%s]\n",
- (node_id < 0)?"ANY":c_node_id,
- node_name?node_name:"ANY",
- node_host?node_host:"ANY",
- (node_wd_port < 0)?"ANY":c_wd_port);
+ (node_id < 0) ? "ANY" : c_node_id,
+ node_name ? node_name : "ANY",
+ node_host ? node_host : "ANY",
+ (node_wd_port < 0) ? "ANY" : c_wd_port);
}
fetch_watchdog_nodes_data(wd_authkey, debug);
@@ -398,15 +400,17 @@ main(int argc, char **argv)
lifeCheckNode = get_node_by_options(node_name, node_host, node_wd_port, node_id);
if (!lifeCheckNode)
{
- char c_node_id[10],c_wd_port[10];
+ char c_node_id[10],
+ c_wd_port[10];
+
fprintf(stderr, "ERROR: unable to find the node with the requested criteria\n");
- snprintf(c_node_id, sizeof(c_node_id), "%d",node_id);
- snprintf(c_wd_port, sizeof(c_wd_port), "%d",node_wd_port);
+ snprintf(c_node_id, sizeof(c_node_id), "%d", node_id);
+ snprintf(c_wd_port, sizeof(c_wd_port), "%d", node_wd_port);
fprintf(stderr, "Criteria:[ID=%s AND Name=%s AND Host=%s AND WDPort=%s]\n",
- (node_id < 0)?"ANY":c_node_id,
- node_name?node_name:"ANY",
- node_host?node_host:"ANY",
- (node_wd_port < 0)?"ANY":c_wd_port);
+ (node_id < 0) ? "ANY" : c_node_id,
+ node_name ? node_name : "ANY",
+ node_host ? node_host : "ANY",
+ (node_wd_port < 0) ? "ANY" : c_wd_port);
exit(EXIT_FAILURE);
}
@@ -414,27 +418,27 @@ main(int argc, char **argv)
{
print_lifecheck_cluster(false, verbose);
print_node_info(lifeCheckNode, verbose);
- exit (0);
+ exit(0);
}
if (status_DEAD)
{
if (inform_node_is_dead(lifeCheckNode, message, wd_authkey))
{
- fprintf(stderr,"INFO: informed watchdog about node id %d is dead\n",node_id);
+ fprintf(stderr, "INFO: informed watchdog about node id %d is dead\n", node_id);
exit(0);
}
- fprintf(stderr,"ERROR: failed to inform watchdog about node id %d is dead\n",node_id);
+ fprintf(stderr, "ERROR: failed to inform watchdog about node id %d is dead\n", node_id);
exit(EXIT_FAILURE);
}
else if (status_ALIVE)
{
if (inform_node_is_alive(lifeCheckNode, message, wd_authkey))
{
- fprintf(stderr,"INFO: informed watchdog about node id %d is alive\n",node_id);
+ fprintf(stderr, "INFO: informed watchdog about node id %d is alive\n", node_id);
exit(0);
}
- fprintf(stderr,"ERROR: failed to inform watchdog about node id %d is alive\n",node_id);
+ fprintf(stderr, "ERROR: failed to inform watchdog about node id %d is alive\n", node_id);
exit(EXIT_FAILURE);
}
@@ -449,7 +453,7 @@ get_progname(const char *argv0)
static bool
-validate_number(char* ptr)
+validate_number(char *ptr)
{
while (*ptr)
{
@@ -461,7 +465,7 @@ validate_number(char* ptr)
}
static bool
-inform_node_status(LifeCheckNode * node, char *message, char* authkey)
+inform_node_status(LifeCheckNode *node, char *message, char *authkey)
{
int node_status,
x;
@@ -482,8 +486,8 @@ inform_node_status(LifeCheckNode * node, char *message, char* authkey)
else
return false;
- fprintf(stderr,"INFO: informing the node status change to watchdog");
- fprintf(stderr,"INFO: node id :%d status = \"%s\" message:\"%s\"", node->ID, new_status, message);
+ fprintf(stderr, "INFO: informing the node status change to watchdog");
+ fprintf(stderr, "INFO: node id :%d status = \"%s\" message:\"%s\"", node->ID, new_status, message);
json_data = get_node_status_change_json(node->ID, node_status, message, authkey);
if (json_data == NULL)
@@ -517,8 +521,8 @@ fetch_watchdog_nodes_data(char *authkey, bool debug)
return false;
}
- if(debug)
- printf("DEBUG:************\n%s\n************\n",json_data);
+ if (debug)
+ printf("DEBUG:************\n%s\n************\n", json_data);
load_watchdog_nodes_from_json(json_data, strlen(json_data));
pfree(json_data);
@@ -599,23 +603,24 @@ load_watchdog_nodes_from_json(char *json_data, int len)
static bool
-inform_node_is_dead(LifeCheckNode * node, char *message, char* authkey)
+inform_node_is_dead(LifeCheckNode *node, char *message, char *authkey)
{
node->nodeState = NODE_DEAD;
return inform_node_status(node, message, authkey);
}
static bool
-inform_node_is_alive(LifeCheckNode * node, char *message, char* authkey)
+inform_node_is_alive(LifeCheckNode *node, char *message, char *authkey)
{
node->nodeState = NODE_ALIVE;
return inform_node_status(node, message, authkey);
}
-static LifeCheckNode*
-get_node_by_options(char *node_name, char* node_host, int node_port, int node_id)
+static LifeCheckNode *
+get_node_by_options(char *node_name, char *node_host, int node_port, int node_id)
{
- int i;
+ int i;
+
if (!gslifeCheckCluster)
return NULL;
for (i = 0; i < gslifeCheckCluster->nodeCount; i++)
@@ -624,9 +629,9 @@ get_node_by_options(char *node_name, char* node_host, int node_port, int node_id
continue;
if (node_port >= 0 && node_port != gslifeCheckCluster->lifeCheckNodes[i].wdPort)
continue;
- if (node_name && strcasecmp(gslifeCheckCluster->lifeCheckNodes[i].nodeName, node_name) != 0 )
+ if (node_name && strcasecmp(gslifeCheckCluster->lifeCheckNodes[i].nodeName, node_name) != 0)
continue;
- if (node_host && strcasecmp(gslifeCheckCluster->lifeCheckNodes[i].hostName, node_host) != 0 )
+ if (node_host && strcasecmp(gslifeCheckCluster->lifeCheckNodes[i].hostName, node_host) != 0)
continue;
return &gslifeCheckCluster->lifeCheckNodes[i];
@@ -639,15 +644,16 @@ static void
print_lifecheck_cluster(bool include_nodes, bool verbose)
{
int i;
+
if (!gslifeCheckCluster)
{
- fprintf(stdout,"ERROR: node information not found\n");
+ fprintf(stdout, "ERROR: node information not found\n");
return;
}
- fprintf(stdout,"Total Watchdog nodes configured for lifecheck: %d\n", gslifeCheckCluster->nodeCount);
+ fprintf(stdout, "Total Watchdog nodes configured for lifecheck: %d\n", gslifeCheckCluster->nodeCount);
if (verbose)
- fprintf(stdout,"*****************\n");
- if(!include_nodes)
+ fprintf(stdout, "*****************\n");
+ if (!include_nodes)
return;
for (i = 0; i < gslifeCheckCluster->nodeCount; i++)
@@ -657,28 +663,28 @@ print_lifecheck_cluster(bool include_nodes, bool verbose)
static void
-print_node_info(LifeCheckNode* lifeCheckNode, bool verbose)
+print_node_info(LifeCheckNode *lifeCheckNode, bool verbose)
{
if (verbose)
{
- fprintf(stdout,"Node ID: %d\n",lifeCheckNode->ID);
- fprintf(stdout,"Node Status code: %d\n",lifeCheckNode->wdState);
- fprintf(stdout,"Node Status: %s\n",lifeCheckNode->stateName);
- fprintf(stdout,"Node Name: %s\n",lifeCheckNode->nodeName);
- fprintf(stdout,"Node Host: %s\n",lifeCheckNode->hostName);
- fprintf(stdout,"Node WD Port: %d\n",lifeCheckNode->wdPort);
- fprintf(stdout,"Node Pgpool Port: %d\n\n",lifeCheckNode->pgpoolPort);
+ fprintf(stdout, "Node ID: %d\n", lifeCheckNode->ID);
+ fprintf(stdout, "Node Status code: %d\n", lifeCheckNode->wdState);
+ fprintf(stdout, "Node Status: %s\n", lifeCheckNode->stateName);
+ fprintf(stdout, "Node Name: %s\n", lifeCheckNode->nodeName);
+ fprintf(stdout, "Node Host: %s\n", lifeCheckNode->hostName);
+ fprintf(stdout, "Node WD Port: %d\n", lifeCheckNode->wdPort);
+ fprintf(stdout, "Node Pgpool Port: %d\n\n", lifeCheckNode->pgpoolPort);
}
else
{
- fprintf(stdout,"%d %d \"%s\"", lifeCheckNode->ID,
+ fprintf(stdout, "%d %d \"%s\"", lifeCheckNode->ID,
lifeCheckNode->nodeState,
lifeCheckNode->stateName),
- fprintf(stdout,"\"%s\"",lifeCheckNode->nodeName),
- fprintf(stdout,"\"%s\" %d %d\n",
- lifeCheckNode->hostName,
- lifeCheckNode->wdPort,
- lifeCheckNode->pgpoolPort);
+ fprintf(stdout, "\"%s\"", lifeCheckNode->nodeName),
+ fprintf(stdout, "\"%s\" %d %d\n",
+ lifeCheckNode->hostName,
+ lifeCheckNode->wdPort,
+ lifeCheckNode->pgpoolPort);
}
}
@@ -687,10 +693,10 @@ get_node_status_change_json(int nodeID, int nodeStatus, char *message, char *aut
{
char *json_str;
JsonNode *jNode = jw_create_with_object(true);
-
+
if (authKey != NULL && strlen(authKey) > 0)
jw_put_string(jNode, WD_IPC_AUTH_KEY, authKey); /* put the auth key */
-
+
/* add the node ID */
jw_put_int(jNode, "NodeID", nodeID);
/* add the node status */
@@ -698,7 +704,7 @@ get_node_status_change_json(int nodeID, int nodeStatus, char *message, char *aut
/* add the node message if any */
if (message)
jw_put_string(jNode, "Message", message);
-
+
jw_finish_document(jNode);
json_str = pstrdup(jw_get_json_string(jNode));
jw_destroy(jNode);
@@ -708,12 +714,12 @@ get_node_status_change_json(int nodeID, int nodeStatus, char *message, char *aut
static void
usage(void)
{
-
+
fprintf(stderr, "\nWatchdog CLI for ");
fprintf(stderr, "%s version %s (%s)\n", PACKAGE, VERSION, PGPOOLVERSION);
fprintf(stderr, "\nUsage:\n");
- fprintf(stderr, " %s [ operation] [ options] [node search criteria]\n",progname);
+ fprintf(stderr, " %s [ operation] [ options] [node search criteria]\n", progname);
fprintf(stderr, "\n Operations:\n");
fprintf(stderr, " -i, --info Get the node status for nodes based on node search criteria\n");
diff --git a/src/utils/error/elog.c b/src/utils/error/elog.c
index ff978e7e6..d729d500c 100644
--- a/src/utils/error/elog.c
+++ b/src/utils/error/elog.c
@@ -224,6 +224,7 @@ is_log_level_output(int elevel, int log_min_level)
return false;
}
+
/*
* should_output_to_server --- should message of given elevel go to the log?
*/
@@ -249,7 +250,7 @@ should_output_to_client(int elevel)
* during authentication.
*/
return (elevel >= pool_config->client_min_messages ||
- elevel == INFO || elevel == FRONTEND_ONLY_ERROR);
+ elevel == INFO || elevel == FRONTEND_ONLY_ERROR);
}
return false;
}
@@ -274,6 +275,7 @@ message_level_is_interesting(int elevel)
return true;
return false;
}
+
/*
* in_error_recursion_trouble --- are we at risk of infinite error recursion?
*
@@ -1739,7 +1741,7 @@ write_eventlog(int level, const char *line, int len)
NULL,
1,
0,
- (LPCWSTR *) &utf16,
+ (LPCWSTR *) & utf16,
NULL);
/* XXX Try ReportEventA() when ReportEventW() fails? */
@@ -2143,7 +2145,8 @@ log_line_prefix(StringInfo buf, const char *line_prefix, ErrorData *edata)
{
case 'a': /* application name */
{
- char *appname;
+ char *appname;
+
appname = get_application_name();
if (appname == NULL || *appname == '\0')
@@ -2215,8 +2218,8 @@ log_line_prefix(StringInfo buf, const char *line_prefix, ErrorData *edata)
case 'm':
{
struct timeval timeval;
- time_t seconds;
- struct tm *now;
+ time_t seconds;
+ struct tm *now;
char msbuf[13];
gettimeofday(&timeval, NULL);
diff --git a/src/utils/json.c b/src/utils/json.c
index 319c8fdbf..8d156439c 100644
--- a/src/utils/json.c
+++ b/src/utils/json.c
@@ -120,7 +120,7 @@ typedef struct
unsigned int cur_line,
cur_col;
-} json_state;
+} json_state;
static void *
default_alloc(size_t size, int zero, void *user_data)
@@ -135,7 +135,7 @@ default_free(void *ptr, void *user_data)
}
static void *
-json_alloc(json_state * state, unsigned long size, int zero)
+json_alloc(json_state *state, unsigned long size, int zero)
{
if ((state->ulong_max - state->used_memory) < size)
return 0;
@@ -150,8 +150,8 @@ json_alloc(json_state * state, unsigned long size, int zero)
}
static int
-new_value(json_state * state,
- json_value * *top, json_value * *root, json_value * *alloc,
+new_value(json_state *state,
+ json_value **top, json_value **root, json_value **alloc,
json_type type)
{
json_value *value;
@@ -172,7 +172,7 @@ new_value(json_state * state,
if (value->u.array.length == 0)
break;
- if (!(value->u.array.values = (json_value * *) json_alloc
+ if (!(value->u.array.values = (json_value **) json_alloc
(state, value->u.array.length * sizeof(json_value *), 0)))
{
return 0;
@@ -270,7 +270,7 @@ static const long
flag_block_comment = 1 << 14;
json_value *
-json_parse_ex(json_settings * settings,
+json_parse_ex(json_settings *settings,
const json_char * json,
size_t length,
char *error_buf)
@@ -1065,7 +1065,7 @@ json_parse(const json_char * json, size_t length)
}
void
-json_value_free_ex(json_settings * settings, json_value * value)
+json_value_free_ex(json_settings *settings, json_value *value)
{
json_value *cur_value;
@@ -1116,7 +1116,7 @@ json_value_free_ex(json_settings * settings, json_value * value)
}
void
-json_value_free(json_value * value)
+json_value_free(json_value *value)
{
json_settings settings = {0};
@@ -1131,7 +1131,7 @@ json_value_free(json_value * value)
* search node with key from json object
*/
json_value *
-json_get_value_for_key(json_value * source, const char *key)
+json_get_value_for_key(json_value *source, const char *key)
{
if (source->type == json_object)
{
@@ -1157,7 +1157,7 @@ json_get_value_for_key(json_value * source, const char *key)
*/
int
-json_get_bool_value_for_key(json_value * source, const char *key, bool *value)
+json_get_bool_value_for_key(json_value *source, const char *key, bool *value)
{
json_value *jNode;
@@ -1177,7 +1177,7 @@ json_get_bool_value_for_key(json_value * source, const char *key, bool *value)
int
-json_get_int_value_for_key(json_value * source, const char *key, int *value)
+json_get_int_value_for_key(json_value *source, const char *key, int *value)
{
json_value *jNode;
@@ -1191,7 +1191,7 @@ json_get_int_value_for_key(json_value * source, const char *key, int *value)
}
int
-json_get_long_value_for_key(json_value * source, const char *key, long *value)
+json_get_long_value_for_key(json_value *source, const char *key, long *value)
{
json_value *jNode;
@@ -1212,7 +1212,7 @@ json_get_long_value_for_key(json_value * source, const char *key, long *value)
*/
char *
-json_get_string_value_for_key(json_value * source, const char *key)
+json_get_string_value_for_key(json_value *source, const char *key)
{
json_value *jNode;
diff --git a/src/utils/json_writer.c b/src/utils/json_writer.c
index 7737390ce..cb8d50040 100644
--- a/src/utils/json_writer.c
+++ b/src/utils/json_writer.c
@@ -26,10 +26,10 @@
#include "utils/palloc.h"
#include "utils/json_writer.h"
-static void jw_put_string_escape(JsonNode * jNode, char *string);
-static inline int jw_get_current_element_count(JsonNode * jNode);
-static inline void jw_inc_current_element_count(JsonNode * jNode);
-static inline JWElementType jw_get_current_element_type(JsonNode * jNode);
+static void jw_put_string_escape(JsonNode *jNode, char *string);
+static inline int jw_get_current_element_count(JsonNode *jNode);
+static inline void jw_inc_current_element_count(JsonNode *jNode);
+static inline JWElementType jw_get_current_element_type(JsonNode *jNode);
JsonNode *
@@ -57,7 +57,7 @@ jw_create_with_object(bool pretty_output)
}
bool
-jw_put_string(JsonNode * jNode, char *key, char *value)
+jw_put_string(JsonNode *jNode, char *key, char *value)
{
if (key == NULL || value == NULL)
return false;
@@ -76,9 +76,9 @@ jw_put_string(JsonNode * jNode, char *key, char *value)
}
static void
-jw_put_string_escape(JsonNode * jNode, char *string)
+jw_put_string_escape(JsonNode *jNode, char *string)
{
- int i;
+ int i;
appendStringInfoChar(jNode->buf, '"');
for (i = 0; string[i] != '\0'; i++)
@@ -116,13 +116,13 @@ jw_put_string_escape(JsonNode * jNode, char *string)
/* for compatibility reasons we pack bool in int*/
bool
-jw_put_bool(JsonNode * jNode, char *key, bool value)
+jw_put_bool(JsonNode *jNode, char *key, bool value)
{
return jw_put_int(jNode, key, value ? 1 : 0);
}
bool
-jw_put_int(JsonNode * jNode, char *key, int value)
+jw_put_int(JsonNode *jNode, char *key, int value)
{
if (key == NULL)
return false;
@@ -139,7 +139,7 @@ jw_put_int(JsonNode * jNode, char *key, int value)
}
bool
-jw_put_long(JsonNode * jNode, char *key, long value)
+jw_put_long(JsonNode *jNode, char *key, long value)
{
if (key == NULL)
return false;
@@ -156,7 +156,7 @@ jw_put_long(JsonNode * jNode, char *key, long value)
}
bool
-jw_put_null(JsonNode * jNode, char *key)
+jw_put_null(JsonNode *jNode, char *key)
{
if (key == NULL)
return false;
@@ -173,7 +173,7 @@ jw_put_null(JsonNode * jNode, char *key)
}
bool
-jw_put_string_value(JsonNode * jNode, char *value)
+jw_put_string_value(JsonNode *jNode, char *value)
{
if (value == NULL)
return false;
@@ -190,13 +190,13 @@ jw_put_string_value(JsonNode * jNode, char *value)
}
bool
-jw_put_bool_value(JsonNode * jNode, bool value)
+jw_put_bool_value(JsonNode *jNode, bool value)
{
return jw_put_int_value(jNode, value ? 1 : 0);
}
bool
-jw_put_int_value(JsonNode * jNode, int value)
+jw_put_int_value(JsonNode *jNode, int value)
{
if (jw_get_current_element_count(jNode) < 0)
return false;
@@ -211,7 +211,7 @@ jw_put_int_value(JsonNode * jNode, int value)
}
bool
-jw_put_long_value(JsonNode * jNode, long value)
+jw_put_long_value(JsonNode *jNode, long value)
{
if (jw_get_current_element_count(jNode) < 0)
return false;
@@ -226,7 +226,7 @@ jw_put_long_value(JsonNode * jNode, long value)
}
bool
-jw_put_null_value(JsonNode * jNode)
+jw_put_null_value(JsonNode *jNode)
{
if (jw_get_current_element_count(jNode) < 0)
return false;
@@ -241,7 +241,7 @@ jw_put_null_value(JsonNode * jNode)
}
bool
-jw_start_element(JsonNode * jNode, JWElementType element, char *key)
+jw_start_element(JsonNode *jNode, JWElementType element, char *key)
{
char ch;
@@ -275,7 +275,7 @@ jw_start_element(JsonNode * jNode, JWElementType element, char *key)
}
bool
-jw_start_array(JsonNode * jNode, char *key)
+jw_start_array(JsonNode *jNode, char *key)
{
if (jw_get_current_element_type(jNode) == JWARRAY)
return jw_start_element(jNode, JWARRAY, NULL);
@@ -283,7 +283,7 @@ jw_start_array(JsonNode * jNode, char *key)
}
bool
-jw_start_object(JsonNode * jNode, char *key)
+jw_start_object(JsonNode *jNode, char *key)
{
if (jw_get_current_element_type(jNode) == JWARRAY)
return jw_start_element(jNode, JWOBJECT, NULL);
@@ -291,7 +291,7 @@ jw_start_object(JsonNode * jNode, char *key)
}
bool
-jw_end_element(JsonNode * jNode)
+jw_end_element(JsonNode *jNode)
{
char ch;
JWElementType element;
@@ -311,7 +311,7 @@ jw_end_element(JsonNode * jNode)
}
bool
-jw_finish_document(JsonNode * jNode)
+jw_finish_document(JsonNode *jNode)
{
while (jNode->stack_ptr > 0)
{
@@ -322,19 +322,19 @@ jw_finish_document(JsonNode * jNode)
}
char *
-jw_get_json_string(JsonNode * jNode)
+jw_get_json_string(JsonNode *jNode)
{
return jNode->buf->data;
}
int
-jw_get_json_length(JsonNode * jNode)
+jw_get_json_length(JsonNode *jNode)
{
return jNode->buf->len;
}
void
-jw_destroy(JsonNode * jNode)
+jw_destroy(JsonNode *jNode)
{
pfree(jNode->buf->data);
pfree(jNode->buf);
@@ -342,7 +342,7 @@ jw_destroy(JsonNode * jNode)
}
static inline int
-jw_get_current_element_count(JsonNode * jNode)
+jw_get_current_element_count(JsonNode *jNode)
{
if (jNode->stack_ptr <= 0)
return -1;
@@ -350,14 +350,15 @@ jw_get_current_element_count(JsonNode * jNode)
}
static inline void
-jw_inc_current_element_count(JsonNode * jNode)
+jw_inc_current_element_count(JsonNode *jNode)
{
if (jNode->stack_ptr <= 0)
return;
jNode->stack[jNode->stack_ptr - 1].elementCount++;
}
-static inline JWElementType jw_get_current_element_type(JsonNode * jNode)
+static inline JWElementType
+jw_get_current_element_type(JsonNode *jNode)
{
if (jNode->stack_ptr <= 0)
return -1;
diff --git a/src/utils/mmgr/aset.c b/src/utils/mmgr/aset.c
index 7dbe589a6..d0422a15c 100644
--- a/src/utils/mmgr/aset.c
+++ b/src/utils/mmgr/aset.c
@@ -210,7 +210,7 @@ static void AllocSetDelete(MemoryContext context);
static Size AllocSetGetChunkSpace(MemoryContext context, void *pointer);
static bool AllocSetIsEmpty(MemoryContext context);
static void AllocSetStats(MemoryContext context, int level, bool print,
- MemoryContextCounters *totals);
+ MemoryContextCounters *totals);
#ifdef MEMORY_CONTEXT_CHECKING
static void AllocSetCheck(MemoryContext context);
diff --git a/src/utils/mmgr/mcxt.c b/src/utils/mmgr/mcxt.c
index fcfea985f..0fdc44db2 100644
--- a/src/utils/mmgr/mcxt.c
+++ b/src/utils/mmgr/mcxt.c
@@ -55,8 +55,8 @@ MemoryContext QueryContext = NULL;
static void MemoryContextCallResetCallbacks(MemoryContext context);
static void MemoryContextStatsInternal(MemoryContext context, int level,
- bool print, int max_children,
- MemoryContextCounters *totals);
+ bool print, int max_children,
+ MemoryContextCounters *totals);
/*
* You should not do memory allocations within a critical section, because
diff --git a/src/utils/pcp/pcp_stream.c b/src/utils/pcp/pcp_stream.c
index e639bc361..d25f74f1d 100644
--- a/src/utils/pcp/pcp_stream.c
+++ b/src/utils/pcp/pcp_stream.c
@@ -39,9 +39,9 @@
#include "utils/fe_ports.h"
#endif
-static int consume_pending_data(PCP_CONNECTION * pc, void *data, int len);
-static int save_pending_data(PCP_CONNECTION * pc, void *data, int len);
-static int pcp_check_fd(PCP_CONNECTION * pc);
+static int consume_pending_data(PCP_CONNECTION *pc, void *data, int len);
+static int save_pending_data(PCP_CONNECTION *pc, void *data, int len);
+static int pcp_check_fd(PCP_CONNECTION *pc);
/* --------------------------------
* pcp_open - allocate read & write buffers for PCP_CONNECTION
@@ -83,7 +83,7 @@ pcp_open(int fd)
* --------------------------------
*/
void
-pcp_close(PCP_CONNECTION * pc)
+pcp_close(PCP_CONNECTION *pc)
{
close(pc->fd);
pfree(pc->wbuf);
@@ -98,7 +98,7 @@ pcp_close(PCP_CONNECTION * pc)
* --------------------------------
*/
int
-pcp_read(PCP_CONNECTION * pc, void *buf, int len)
+pcp_read(PCP_CONNECTION *pc, void *buf, int len)
{
static char readbuf[READBUFSZ];
@@ -151,7 +151,7 @@ pcp_read(PCP_CONNECTION * pc, void *buf, int len)
* --------------------------------
*/
int
-pcp_write(PCP_CONNECTION * pc, void *buf, int len)
+pcp_write(PCP_CONNECTION *pc, void *buf, int len)
{
int reqlen;
@@ -195,7 +195,7 @@ pcp_write(PCP_CONNECTION * pc, void *buf, int len)
* --------------------------------
*/
int
-pcp_flush(PCP_CONNECTION * pc)
+pcp_flush(PCP_CONNECTION *pc)
{
int sts;
int wlen;
@@ -256,7 +256,7 @@ pcp_flush(PCP_CONNECTION * pc)
* --------------------------------
*/
static int
-consume_pending_data(PCP_CONNECTION * pc, void *data, int len)
+consume_pending_data(PCP_CONNECTION *pc, void *data, int len)
{
int consume_size;
@@ -282,7 +282,7 @@ consume_pending_data(PCP_CONNECTION * pc, void *data, int len)
* --------------------------------
*/
static int
-save_pending_data(PCP_CONNECTION * pc, void *data, int len)
+save_pending_data(PCP_CONNECTION *pc, void *data, int len)
{
int reqlen;
size_t realloc_size;
@@ -326,7 +326,7 @@ save_pending_data(PCP_CONNECTION * pc, void *data, int len)
* --------------------------------
*/
static int
-pcp_check_fd(PCP_CONNECTION * pc)
+pcp_check_fd(PCP_CONNECTION *pc)
{
fd_set readmask;
fd_set exceptmask;
diff --git a/src/utils/pool_health_check_stats.c b/src/utils/pool_health_check_stats.c
index bb0ca97e8..3a2b768bd 100644
--- a/src/utils/pool_health_check_stats.c
+++ b/src/utils/pool_health_check_stats.c
@@ -29,9 +29,10 @@
* to be shared by both PCP server and clients.
* Number of struct members will be stored in *n.
*/
-int * pool_health_check_stats_offsets(int *n)
+int *
+pool_health_check_stats_offsets(int *n)
{
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_HEALTH_CHECK_STATS, node_id),
offsetof(POOL_HEALTH_CHECK_STATS, hostname),
offsetof(POOL_HEALTH_CHECK_STATS, port),
@@ -54,7 +55,7 @@ int * pool_health_check_stats_offsets(int *n)
offsetof(POOL_HEALTH_CHECK_STATS, last_failed_health_check),
};
- *n = sizeof(offsettbl)/sizeof(int);
+ *n = sizeof(offsettbl) / sizeof(int);
return offsettbl;
}
@@ -64,10 +65,11 @@ int * pool_health_check_stats_offsets(int *n)
* shared by both PCP server and clients. Number of struct members will be
* stored in *n.
*/
-int * pool_report_pools_offsets(int *n)
+int *
+pool_report_pools_offsets(int *n)
{
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_REPORT_POOLS, pool_pid),
offsetof(POOL_REPORT_POOLS, process_start_time),
offsetof(POOL_REPORT_POOLS, client_connection_count),
@@ -91,6 +93,6 @@ int * pool_report_pools_offsets(int *n)
offsetof(POOL_REPORT_POOLS, statement)
};
- *n = sizeof(offsettbl)/sizeof(int);
+ *n = sizeof(offsettbl) / sizeof(int);
return offsettbl;
}
diff --git a/src/utils/pool_ip.c b/src/utils/pool_ip.c
index 3f431597d..11d2671a6 100644
--- a/src/utils/pool_ip.c
+++ b/src/utils/pool_ip.c
@@ -52,22 +52,22 @@
#include "utils/pool_ip.h"
#include "pool_config.h"
#include "utils/elog.h"
-static int rangeSockAddrAF_INET(const struct sockaddr_in *addr,
- const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask);
+static int rangeSockAddrAF_INET(const struct sockaddr_in *addr,
+ const struct sockaddr_in *netaddr,
+ const struct sockaddr_in *netmask);
-static int rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask);
+static int rangeSockAddrAF_INET6(const struct sockaddr_in6 *addr,
+ const struct sockaddr_in6 *netaddr,
+ const struct sockaddr_in6 *netmask);
-static int getaddrinfo_unix(const char *path,
- const struct addrinfo *hintsp,
- struct addrinfo **result);
+static int getaddrinfo_unix(const char *path,
+ const struct addrinfo *hintsp,
+ struct addrinfo **result);
-static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
/*
* pool_getnameinfo_all - get name info for Unix, IPv4 and IPv6 sockets
@@ -424,12 +424,12 @@ SockAddr_cidr_mask(struct sockaddr_storage *mask, char *numbits, int family)
return -1;
}
- switch (family)
+ switch (family)
{
case AF_INET:
{
struct sockaddr_in mask4;
- long maskl;
+ long maskl;
if (bits < 0 || bits > 32)
return -1;
diff --git a/src/utils/pool_params.c b/src/utils/pool_params.c
index 43bdb7c5c..41b90a8d2 100644
--- a/src/utils/pool_params.c
+++ b/src/utils/pool_params.c
@@ -39,7 +39,7 @@
* initialize parameter structure
*/
int
-pool_init_params(ParamStatus * params)
+pool_init_params(ParamStatus *params)
{
MemoryContext oldContext = MemoryContextSwitchTo(TopMemoryContext);
@@ -56,7 +56,7 @@ pool_init_params(ParamStatus * params)
* discard parameter structure
*/
void
-pool_discard_params(ParamStatus * params)
+pool_discard_params(ParamStatus *params)
{
int i;
@@ -81,7 +81,7 @@ pool_discard_params(ParamStatus * params)
* if not found, NULL is returned
*/
char *
-pool_find_name(ParamStatus * params, char *name, int *pos)
+pool_find_name(ParamStatus *params, char *name, int *pos)
{
int i;
@@ -100,7 +100,7 @@ pool_find_name(ParamStatus * params, char *name, int *pos)
* return name and value by index.
*/
int
-pool_get_param(ParamStatus * params, int index, char **name, char **value)
+pool_get_param(ParamStatus *params, int index, char **name, char **value)
{
if (index < 0 || index >= params->num)
return -1;
@@ -115,7 +115,7 @@ pool_get_param(ParamStatus * params, int index, char **name, char **value)
* add or replace name/value pair
*/
int
-pool_add_param(ParamStatus * params, char *name, char *value)
+pool_add_param(ParamStatus *params, char *name, char *value)
{
int pos;
MemoryContext oldContext = MemoryContextSwitchTo(TopMemoryContext);
@@ -152,7 +152,7 @@ pool_add_param(ParamStatus * params, char *name, char *value)
}
void
-pool_param_debug_print(ParamStatus * params)
+pool_param_debug_print(ParamStatus *params)
{
int i;
diff --git a/src/utils/pool_path.c b/src/utils/pool_path.c
index 38220ac2c..44a5ee960 100644
--- a/src/utils/pool_path.c
+++ b/src/utils/pool_path.c
@@ -38,8 +38,8 @@
static void trim_directory(char *path);
static void trim_trailing_separator(char *path);
-static int pqGetpwuid(uid_t uid, struct passwd *resultbuf, char *buffer,
- size_t buflen, struct passwd **result);
+static int pqGetpwuid(uid_t uid, struct passwd *resultbuf, char *buffer,
+ size_t buflen, struct passwd **result);
/*
* get_parent_directory
diff --git a/src/utils/pool_process_reporting.c b/src/utils/pool_process_reporting.c
index ce4117a4b..8e41ccafd 100644
--- a/src/utils/pool_process_reporting.c
+++ b/src/utils/pool_process_reporting.c
@@ -37,16 +37,16 @@
#include <time.h>
#include <netinet/in.h>
-static void send_row_description_and_data_rows(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+static void send_row_description_and_data_rows(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
short num_fields, char **field_names, int *offsettbl,
char *data, int row_size, int nrows);
-static void write_one_field(POOL_CONNECTION * frontend, char *field);
-static void write_one_field_v2(POOL_CONNECTION * frontend, char *field);
+static void write_one_field(POOL_CONNECTION *frontend, char *field);
+static void write_one_field_v2(POOL_CONNECTION *frontend, char *field);
static char *db_node_status(int node);
static char *db_node_role(int node);
void
-send_row_description(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
+send_row_description(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
short num_fields, char **field_names)
{
static char *cursorname = "blank";
@@ -124,7 +124,7 @@ send_row_description(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
* to the command complete message.
*/
void
-send_complete_and_ready(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *message, const int num_rows)
+send_complete_and_ready(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *message, const int num_rows)
{
int len;
int msg_len;
@@ -1209,7 +1209,7 @@ get_config(int *nrows)
}
void
-send_config_var_detail_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *name, const char *value, const char *description)
+send_config_var_detail_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *name, const char *value, const char *description)
{
int size;
int hsize;
@@ -1272,7 +1272,7 @@ send_config_var_detail_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * ba
}
void
-send_config_var_value_only_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend, const char *value)
+send_config_var_value_only_row(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend, const char *value)
{
int size;
int hsize;
@@ -1314,7 +1314,7 @@ send_config_var_value_only_row(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL
}
void
-config_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+config_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"item", "value", "description"};
static unsigned char nullmap[2] = {0xff, 0xff};
@@ -1437,8 +1437,8 @@ get_nodes(int *nrows, int node_id)
if (bi->standby_delay_by_time)
{
- snprintf(nodes[i].delay, POOLCONFIG_MAXWEIGHTLEN, "%.6f", ((float)bi->standby_delay)/1000000);
- if (bi->standby_delay >= 2*1000*1000)
+ snprintf(nodes[i].delay, POOLCONFIG_MAXWEIGHTLEN, "%.6f", ((float) bi->standby_delay) / 1000000);
+ if (bi->standby_delay >= 2 * 1000 * 1000)
strcat(nodes[i].delay, " seconds");
else
strcat(nodes[i].delay, " second");
@@ -1484,13 +1484,13 @@ get_nodes(int *nrows, int node_id)
* SHOW pool_nodes;
*/
void
-nodes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+nodes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"node_id", "hostname", "port", "status", "pg_status", "lb_weight", "role",
- "pg_role", "select_cnt", "load_balance_node", "replication_delay",
- "replication_state", "replication_sync_state", "last_status_change"};
+ "pg_role", "select_cnt", "load_balance_node", "replication_delay",
+ "replication_state", "replication_sync_state", "last_status_change"};
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_REPORT_NODES, node_id),
offsetof(POOL_REPORT_NODES, hostname),
offsetof(POOL_REPORT_NODES, port),
@@ -1507,7 +1507,7 @@ nodes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
offsetof(POOL_REPORT_NODES, last_status_change)
};
- int nrows;
+ int nrows;
short num_fields;
POOL_REPORT_NODES *nodes;
@@ -1515,7 +1515,7 @@ nodes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
nodes = get_nodes(&nrows, -1);
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)nodes, sizeof(POOL_REPORT_NODES), nrows);
+ (char *) nodes, sizeof(POOL_REPORT_NODES), nrows);
pfree(nodes);
}
@@ -1538,12 +1538,12 @@ get_pools(int *nrows)
int lines = 0;
POOL_REPORT_POOLS *pools = palloc0(
- pool_config->num_init_children * pool_config->max_pool * NUM_BACKENDS * sizeof(POOL_REPORT_POOLS)
- );
+ pool_config->num_init_children * pool_config->max_pool * NUM_BACKENDS * sizeof(POOL_REPORT_POOLS)
+ );
for (child = 0; child < pool_config->num_init_children; child++)
{
- int exist_live_connection = 0;
+ int exist_live_connection = 0;
pi = &process_info[child];
proc_id = pi->pid;
@@ -1561,9 +1561,9 @@ get_pools(int *nrows)
for (pool = 0; pool < pool_config->max_pool; pool++)
{
- int idle_duration = pi->connection_info[pool * MAX_NUM_BACKENDS].client_idle_duration;
- int load_balancing_node_id = pi->connection_info[pool * MAX_NUM_BACKENDS].load_balancing_node;
- int client_idle_time = pool_config->client_idle_limit;
+ int idle_duration = pi->connection_info[pool * MAX_NUM_BACKENDS].client_idle_duration;
+ int load_balancing_node_id = pi->connection_info[pool * MAX_NUM_BACKENDS].load_balancing_node;
+ int client_idle_time = pool_config->client_idle_limit;
if (pool_config->client_idle_limit > 0)
{
@@ -1581,8 +1581,8 @@ get_pools(int *nrows)
&& (pi->connected)
&& (!exist_live_connection))
{
- char proc_start_time[POOLCONFIG_MAXDATELEN + 1];
- int wait_for_connect_time = pool_config->child_life_time - pi->wait_for_connect;
+ char proc_start_time[POOLCONFIG_MAXDATELEN + 1];
+ int wait_for_connect_time = pool_config->child_life_time - pi->wait_for_connect;
strftime(proc_start_time, sizeof(proc_start_time),
"%Y-%m-%d %H:%M:%S", localtime(&pi->start_time));
@@ -1614,7 +1614,7 @@ get_pools(int *nrows)
else
{
strftime(pools[lines].client_connection_time, sizeof(pools[lines].client_connection_time),
- "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].client_connection_time));
+ "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].client_connection_time));
}
if (pi->connection_info[poolBE].client_disconnection_time == 0)
@@ -1624,7 +1624,7 @@ get_pools(int *nrows)
else
{
strftime(pools[lines].client_disconnection_time, sizeof(pools[lines].client_disconnection_time),
- "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].client_disconnection_time));
+ "%Y-%m-%d %H:%M:%S", localtime(&pi->connection_info[poolBE].client_disconnection_time));
}
if ((pool_config->client_idle_limit > 0)
@@ -1665,7 +1665,7 @@ get_pools(int *nrows)
snprintf(pools[lines].pool_connected, sizeof(pools[lines].pool_connected), "%d",
pi->connection_info[poolBE].connected);
- switch(pi->status)
+ switch (pi->status)
{
case WAIT_FOR_CONNECT:
StrNCpy(pools[lines].status, "Wait for connection", POOLCONFIG_MAXPROCESSSTATUSLEN);
@@ -1695,8 +1695,8 @@ get_pools(int *nrows)
StrNCpy(pools[lines].client_port, pi->client_port, NI_MAXSERV);
/*
- * If this the statement was sent to backend id
- * report the statement.
+ * If this the statement was sent to backend id report the
+ * statement.
*/
if (is_pi_set(pi->node_ids, backend_id))
{
@@ -1717,17 +1717,17 @@ get_pools(int *nrows)
* SHOW pool_pools;
*/
void
-pools_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+pools_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
- short num_fields;
+ short num_fields;
static char *field_names[] = {"pool_pid", "start_time", "client_connection_count", "pool_id",
- "backend_id", "database", "username", "backend_connection_time",
- "client_connection_time", "client_disconnection_time", "client_idle_duration",
- "majorversion", "minorversion", "pool_counter", "pool_backendpid", "pool_connected",
- "status", "load_balance_node", "client_host", "client_port", "statement"};
- int n;
- int *offsettbl;
- int nrows;
+ "backend_id", "database", "username", "backend_connection_time",
+ "client_connection_time", "client_disconnection_time", "client_idle_duration",
+ "majorversion", "minorversion", "pool_counter", "pool_backendpid", "pool_connected",
+ "status", "load_balance_node", "client_host", "client_port", "statement"};
+ int n;
+ int *offsettbl;
+ int nrows;
POOL_REPORT_POOLS *pools;
num_fields = sizeof(field_names) / sizeof(char *);
@@ -1735,7 +1735,7 @@ pools_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
pools = get_pools(&nrows);
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)pools, sizeof(POOL_REPORT_POOLS), nrows);
+ (char *) pools, sizeof(POOL_REPORT_POOLS), nrows);
pfree(pools);
}
@@ -1756,7 +1756,7 @@ get_processes(int *nrows)
for (child = 0; child < pool_config->num_init_children; child++)
{
- int exist_live_connection = 0;
+ int exist_live_connection = 0;
pi = &process_info[child];
proc_id = pi->pid;
@@ -1776,8 +1776,8 @@ get_processes(int *nrows)
&& (pi->connected)
&& (!exist_live_connection))
{
- char proc_start_time[POOLCONFIG_MAXDATELEN + 1];
- int wait_for_connect_time = pool_config->child_life_time - pi->wait_for_connect;
+ char proc_start_time[POOLCONFIG_MAXDATELEN + 1];
+ int wait_for_connect_time = pool_config->child_life_time - pi->wait_for_connect;
strftime(proc_start_time, sizeof(proc_start_time),
"%Y-%m-%d %H:%M:%S", localtime(&pi->start_time));
@@ -1811,7 +1811,7 @@ get_processes(int *nrows)
snprintf(processes[child].pool_counter, POOLCONFIG_MAXCOUNTLEN, "%d", pi->connection_info[poolBE].counter);
}
}
- switch(pi->status)
+ switch (pi->status)
{
case WAIT_FOR_CONNECT:
StrNCpy(processes[child].status, "Wait for connection", POOLCONFIG_MAXPROCESSSTATUSLEN);
@@ -1842,12 +1842,12 @@ get_processes(int *nrows)
* SHOW pool_processes
*/
void
-processes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+processes_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"pool_pid", "start_time", "client_connection_count",
- "database", "username", "backend_connection_time", "pool_counter", "status"};
+ "database", "username", "backend_connection_time", "pool_counter", "status"};
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_REPORT_PROCESSES, pool_pid),
offsetof(POOL_REPORT_PROCESSES, process_start_time),
offsetof(POOL_REPORT_PROCESSES, client_connection_count),
@@ -1858,7 +1858,7 @@ processes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
offsetof(POOL_REPORT_PROCESSES, status),
};
- int nrows;
+ int nrows;
short num_fields;
POOL_REPORT_PROCESSES *processes;
@@ -1866,7 +1866,7 @@ processes_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
processes = get_processes(&nrows);
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)processes, sizeof(POOL_REPORT_PROCESSES), nrows);
+ (char *) processes, sizeof(POOL_REPORT_PROCESSES), nrows);
pfree(processes);
}
@@ -1885,14 +1885,14 @@ get_version(void)
* SHOW pool_version;
*/
void
-version_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+version_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"pool_version"};
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_REPORT_VERSION, version)
};
- int nrows = 1;
+ int nrows = 1;
short num_fields;
POOL_REPORT_VERSION *version;
@@ -1900,7 +1900,7 @@ version_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
version = get_version();
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)version, sizeof(POOL_REPORT_VERSION), nrows);
+ (char *) version, sizeof(POOL_REPORT_VERSION), nrows);
pfree(version);
}
@@ -1909,7 +1909,7 @@ version_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
* Show in memory cache reporting
*/
void
-cache_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+cache_reporting(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"num_cache_hits", "num_selects", "cache_hit_ratio", "num_hash_entries", "used_hash_entries", "num_cache_entries", "used_cache_entries_size", "free_cache_entries_size", "fragment_cache_entries_size"};
short num_fields = sizeof(field_names) / sizeof(char *);
@@ -1920,7 +1920,7 @@ cache_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
int hsize;
static unsigned char nullmap[2] = {0xff, 0xff};
int nbytes = (num_fields + 7) / 8;
- volatile POOL_SHMEM_STATS *mystats;
+ volatile POOL_SHMEM_STATS *mystats;
pool_sigset_t oldmask;
double ratio;
@@ -1929,7 +1929,7 @@ cache_reporting(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
{
int len; /* length of string excluding null terminate */
char string[POOL_CACHE_STATS_MAX_STRING_LEN + 1];
- } MY_STRING_CACHE_STATS;
+ } MY_STRING_CACHE_STATS;
MY_STRING_CACHE_STATS *strp;
@@ -2071,7 +2071,7 @@ get_health_check_stats(int *nrows)
/* status last changed */
t = bi->status_changed_time;
- ereport(LOG,(errmsg("status_changed_time %ld", t)));
+ ereport(LOG, (errmsg("status_changed_time %ld", t)));
strftime(stats[i].last_status_change, POOLCONFIG_MAXDATELEN, "%F %T", localtime(&t));
snprintf(stats[i].total_count, POOLCONFIG_MAXLONGCOUNTLEN, UINT64_FORMAT, health_check_stats[i].total_count);
@@ -2082,14 +2082,14 @@ get_health_check_stats(int *nrows)
snprintf(stats[i].max_retry_count, POOLCONFIG_MAXCOUNTLEN, "%d", health_check_stats[i].max_retry_count);
if (pool_config->health_check_params[i].health_check_period > 0)
- f = (double)health_check_stats[i].retry_count /
+ f = (double) health_check_stats[i].retry_count /
(health_check_stats[i].total_count - health_check_stats[i].skip_count);
else
f = 0.0;
snprintf(stats[i].average_retry_count, POOLCONFIG_MAXWEIGHTLEN, "%f", f);
if (pool_config->health_check_params[i].health_check_period > 0)
- f = (double)health_check_stats[i].total_health_check_duration /
+ f = (double) health_check_stats[i].total_health_check_duration /
(health_check_stats[i].total_count - health_check_stats[i].skip_count);
else
f = 0.0;
@@ -2124,14 +2124,14 @@ get_health_check_stats(int *nrows)
* SHOW health_check_stats;
*/
void
-show_health_check_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+show_health_check_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"node_id", "hostname", "port", "status", "role", "last_status_change",
- "total_count", "success_count", "fail_count", "skip_count", "retry_count",
- "average_retry_count", "max_retry_count", "max_duration", "min_duration",
- "average_duration", "last_health_check", "last_successful_health_check",
- "last_skip_health_check", "last_failed_health_check"};
- static int offsettbl[] = {
+ "total_count", "success_count", "fail_count", "skip_count", "retry_count",
+ "average_retry_count", "max_retry_count", "max_duration", "min_duration",
+ "average_duration", "last_health_check", "last_successful_health_check",
+ "last_skip_health_check", "last_failed_health_check"};
+ static int offsettbl[] = {
offsetof(POOL_HEALTH_CHECK_STATS, node_id),
offsetof(POOL_HEALTH_CHECK_STATS, hostname),
offsetof(POOL_HEALTH_CHECK_STATS, port),
@@ -2154,7 +2154,7 @@ show_health_check_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backe
offsetof(POOL_HEALTH_CHECK_STATS, last_failed_health_check),
};
- int nrows;
+ int nrows;
short num_fields;
POOL_HEALTH_CHECK_STATS *stats;
@@ -2162,7 +2162,7 @@ show_health_check_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backe
stats = get_health_check_stats(&nrows);
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)stats, sizeof(POOL_HEALTH_CHECK_STATS), nrows);
+ (char *) stats, sizeof(POOL_HEALTH_CHECK_STATS), nrows);
pfree(stats);
}
@@ -2226,13 +2226,13 @@ get_backend_stats(int *nrows)
* SHOW backend_stats;
*/
void
-show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
+show_backend_stats(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend)
{
static char *field_names[] = {"node_id", "hostname", "port", "status", "role",
- "select_cnt", "insert_cnt", "update_cnt", "delete_cnt", "ddl_cnt", "other_cnt",
- "panic_cnt", "fatal_cnt", "error_cnt"};
+ "select_cnt", "insert_cnt", "update_cnt", "delete_cnt", "ddl_cnt", "other_cnt",
+ "panic_cnt", "fatal_cnt", "error_cnt"};
- static int offsettbl[] = {
+ static int offsettbl[] = {
offsetof(POOL_BACKEND_STATS, node_id),
offsetof(POOL_BACKEND_STATS, hostname),
offsetof(POOL_BACKEND_STATS, port),
@@ -2249,7 +2249,7 @@ show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
offsetof(POOL_BACKEND_STATS, error_cnt),
};
- int nrows;
+ int nrows;
short num_fields;
POOL_BACKEND_STATS *backend_stats;
@@ -2257,7 +2257,7 @@ show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
backend_stats = get_backend_stats(&nrows);
send_row_description_and_data_rows(frontend, backend, num_fields, field_names, offsettbl,
- (char *)backend_stats, sizeof(POOL_BACKEND_STATS), nrows);
+ (char *) backend_stats, sizeof(POOL_BACKEND_STATS), nrows);
pfree(backend_stats);
}
@@ -2280,13 +2280,15 @@ show_backend_stats(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend)
*
* row_size: byte length of data for 1 row.
*
- * nrows: number of rows in data.
+ * nrows: number of rows in data.
*/
-static void send_row_description_and_data_rows(POOL_CONNECTION * frontend, POOL_CONNECTION_POOL * backend,
- short num_fields, char **field_names, int *offsettbl,
- char *data, int row_size, int nrows)
+static void
+send_row_description_and_data_rows(POOL_CONNECTION *frontend, POOL_CONNECTION_POOL *backend,
+ short num_fields, char **field_names, int *offsettbl,
+ char *data, int row_size, int nrows)
{
- int i, j;
+ int i,
+ j;
short s;
int len;
unsigned char *nullmap;
@@ -2342,9 +2344,11 @@ static void send_row_description_and_data_rows(POOL_CONNECTION * frontend, POOL_
}
/* Write one field to frontend (v3) */
-static void write_one_field(POOL_CONNECTION * frontend, char *field)
+static void
+write_one_field(POOL_CONNECTION *frontend, char *field)
{
- int size, hsize;
+ int size,
+ hsize;
size = strlen(field);
hsize = htonl(size);
@@ -2353,9 +2357,11 @@ static void write_one_field(POOL_CONNECTION * frontend, char *field)
}
/* Write one field to frontend (v2) */
-static void write_one_field_v2(POOL_CONNECTION * frontend, char *field)
+static void
+write_one_field_v2(POOL_CONNECTION *frontend, char *field)
{
- int size, hsize;
+ int size,
+ hsize;
size = strlen(field);
hsize = htonl(size + 4);
@@ -2368,17 +2374,18 @@ static void write_one_field_v2(POOL_CONNECTION * frontend, char *field)
* when health check is not enabled).
*/
static
-char *db_node_status(int node)
+char *
+db_node_status(int node)
{
#ifdef HAVE_PQPINGPARAMS
BackendInfo *bkinfo;
- int i;
- char portstr[32];
- char timeoutstr[32];
+ int i;
+ char portstr[32];
+ char timeoutstr[32];
#define PARAMS_ARRAY_SIZE 8
const char *keywords[PARAMS_ARRAY_SIZE];
const char *values[PARAMS_ARRAY_SIZE];
- PGPing ret;
+ PGPing ret;
#endif
/*
@@ -2391,7 +2398,7 @@ char *db_node_status(int node)
#ifdef HAVE_PQPINGPARAMS
i = 0;
-
+
keywords[i] = "user";
values[i] = pool_config->health_check_params[node].health_check_user;
i++;
@@ -2401,7 +2408,7 @@ char *db_node_status(int node)
*/
keywords[i] = "dbname";
if (*pool_config->health_check_params[node].health_check_database == '\0')
- values[i] = "postgres";
+ values[i] = "postgres";
else
values[i] = pool_config->health_check_params[node].health_check_database;
i++;
@@ -2443,17 +2450,18 @@ char *db_node_status(int node)
* when sr check is not enabled).
*/
static
-char *db_node_role(int node)
+char *
+db_node_role(int node)
{
BackendInfo *bkinfo;
POOL_CONNECTION_POOL_SLOT *slots[MAX_NUM_BACKENDS];
POOL_SELECT_RESULT *res;
- char *user;
- char *password;
- char *dbname;
- char *host;
- int port;
- char *sts;
+ char *user;
+ char *password;
+ char *dbname;
+ char *host;
+ int port;
+ char *sts;
if (pool_config->sr_check_period == 0)
{
diff --git a/src/utils/pool_relcache.c b/src/utils/pool_relcache.c
index 32362fc6b..bea2ac396 100644
--- a/src/utils/pool_relcache.c
+++ b/src/utils/pool_relcache.c
@@ -40,7 +40,7 @@
static void SearchRelCacheErrorCb(void *arg);
static POOL_SELECT_RESULT *query_cache_to_relation_cache(char *data, size_t size);
-static char *relation_cache_to_query_cache(POOL_SELECT_RESULT *res,size_t *size);
+static char *relation_cache_to_query_cache(POOL_SELECT_RESULT *res, size_t *size);
/*
@@ -88,7 +88,7 @@ pool_create_relcache(int cachesize, char *sql,
* Discard relation cache.
*/
void
-pool_discard_relcache(POOL_RELCACHE * relcache)
+pool_discard_relcache(POOL_RELCACHE *relcache)
{
int i;
@@ -105,7 +105,7 @@ pool_discard_relcache(POOL_RELCACHE * relcache)
* If not found in cache, do the query and store the result into cache and return it.
*/
void *
-pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, char *table)
+pool_search_relcache(POOL_RELCACHE *relcache, POOL_CONNECTION_POOL *backend, char *table)
{
char *dbname;
int i;
@@ -115,12 +115,12 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
int index = 0;
int local_session_id;
time_t now;
- void *result;
+ void *result;
ErrorContextCallback callback;
- pool_sigset_t oldmask;
- bool locked;
+ pool_sigset_t oldmask;
+ bool locked;
int query_cache_not_found = 1;
- char *query_cache_data = NULL;
+ char *query_cache_data = NULL;
size_t query_cache_len;
POOL_SESSION_CONTEXT *session_context;
int node_id;
@@ -133,9 +133,9 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
/*
* Obtain database name and node id to be sent query. If
- * relcache_query_target is RELQTARGET_LOAD_BALANCE_NODE, we consider
- * load balance node id to be used to send queries.
- *
+ * relcache_query_target is RELQTARGET_LOAD_BALANCE_NODE, we consider load
+ * balance node id to be used to send queries.
+ *
* Note that we need to use VALID_BACKEND_RAW, rather than VALID_BACKEND
* since pool_is_node_to_be_sent_in_current_query(being called by
* VALID_BACKEND) assumes that if query context exists, where_to_send map
@@ -200,7 +200,7 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
ereport(DEBUG1,
(errmsg("hit local relation cache"),
- errdetail("query:%s", relcache->sql)));
+ errdetail("query:%s", relcache->sql)));
return relcache->cache[i].data;
}
@@ -220,10 +220,11 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
error_context_stack = &callback;
locked = pool_is_shmem_lock();
+
/*
* if enable_shared_relcache is true, search query cache.
*/
- if (pool_config->enable_shared_relcache)
+ if (pool_config->enable_shared_relcache)
{
/* if shmem is not locked by this process, get the lock */
if (!locked)
@@ -231,16 +232,16 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
POOL_SETMASK2(&BlockSig, &oldmask);
pool_shmem_lock(POOL_MEMQ_SHARED_LOCK);
}
- PG_TRY();
+ PG_TRY();
{
/* search catalog cache in query cache */
query_cache_not_found = pool_fetch_cache(backend, query, &query_cache_data, &query_cache_len);
}
- PG_CATCH();
+ PG_CATCH();
{
pool_shmem_unlock();
POOL_SETMASK(&oldmask);
- PG_RE_THROW();
+ PG_RE_THROW();
}
PG_END_TRY();
}
@@ -249,25 +250,26 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
{
ereport(DEBUG1,
(errmsg("not hit local relation cache and query cache"),
- errdetail("query:%s", query)));
+ errdetail("query:%s", query)));
do_query(CONNECTION(backend, node_id), query, &res, MAJOR(backend));
/* Register cache */
result = (*relcache->register_func) (res);
/* save local catalog cache in query cache */
- if (pool_config->enable_shared_relcache)
+ if (pool_config->enable_shared_relcache)
{
query_cache_data = relation_cache_to_query_cache(res, &query_cache_len);
+
/*
- * So far, we have already obtained a lock. But to register
- * a query cache entry, we need to acquire an exclusive lock.
- * Unfortunately:
- * (1) we don't know if we already acquired an exclusive or not.
- * (2) we cannot escalate a shared lock to an exclusive lock.
- * So we release the lock and obtain an exclusive lock. This may
- * create a window and thus we might try to register duplicate
- * query cache entry if other process is trying to register the same entry
- * in the window. But it should be harmless.
+ * So far, we have already obtained a lock. But to register a
+ * query cache entry, we need to acquire an exclusive lock.
+ * Unfortunately: (1) we don't know if we already acquired an
+ * exclusive or not. (2) we cannot escalate a shared lock to an
+ * exclusive lock. So we release the lock and obtain an exclusive
+ * lock. This may create a window and thus we might try to
+ * register duplicate query cache entry if other process is trying
+ * to register the same entry in the window. But it should be
+ * harmless.
*/
pool_shmem_unlock();
pool_shmem_lock(POOL_MEMQ_EXCLUSIVE_LOCK);
@@ -278,10 +280,10 @@ pool_search_relcache(POOL_RELCACHE * relcache, POOL_CONNECTION_POOL * backend, c
{
ereport(DEBUG1,
(errmsg("hit query cache"),
- errdetail("query:%s", query)));
+ errdetail("query:%s", query)));
/* catalog cache found in query_cache, copy local relation cache */
- res = query_cache_to_relation_cache(query_cache_data,query_cache_len);
+ res = query_cache_to_relation_cache(query_cache_data, query_cache_len);
result = (*relcache->register_func) (res);
}
/* if shmem is locked by this function, unlock it */
@@ -498,11 +500,11 @@ remove_quotes_and_schema_from_relname(char *table)
List *names;
rawstring = pstrdup(table);
- if(SplitIdentifierString(rawstring, '.', (Node **) &names) && names != NIL)
+ if (SplitIdentifierString(rawstring, '.', (Node **) &names) && names != NIL)
{
/*
- * Since table name is always the last one in the list,
- * we use llast() to get table name.
+ * Since table name is always the last one in the list, we use llast()
+ * to get table name.
*/
strlcpy(rel, llast(names), sizeof(rel));
}
@@ -522,7 +524,7 @@ remove_quotes_and_schema_from_relname(char *table)
* query. Returns row count.
*/
void *
-int_register_func(POOL_SELECT_RESULT * res)
+int_register_func(POOL_SELECT_RESULT *res)
{
if (res->numrows >= 1)
return (void *) atol(res->data[0]);
@@ -537,7 +539,7 @@ int_unregister_func(void *data)
}
void *
-string_register_func(POOL_SELECT_RESULT * res)
+string_register_func(POOL_SELECT_RESULT *res)
{
return (res->numrows > 0) ? strdup(res->data[0]) : NULL;
}
@@ -554,9 +556,9 @@ static POOL_SELECT_RESULT *
query_cache_to_relation_cache(char *data, size_t size)
{
POOL_SELECT_RESULT *res;
- char *p;
- int i;
- int len;
+ char *p;
+ int i;
+ int len;
p = data;
@@ -564,11 +566,11 @@ query_cache_to_relation_cache(char *data, size_t size)
res->rowdesc = palloc0(sizeof(RowDesc));
/* rowdesc */
- res->rowdesc->num_attrs = *((int *)p);
+ res->rowdesc->num_attrs = *((int *) p);
p += sizeof(int);
/* numrows */
- res->numrows = *((int *)p);
+ res->numrows = *((int *) p);
p += sizeof(int);
len = res->rowdesc->num_attrs * res->numrows;
@@ -579,13 +581,13 @@ query_cache_to_relation_cache(char *data, size_t size)
/* nullflags */
for (i = 0; i < len; i++)
{
- res->nullflags[i] = *((int *)p);
+ res->nullflags[i] = *((int *) p);
p += sizeof(int);
}
/* data */
for (i = 0; i < len; i++)
{
- if ( res->nullflags[i] > 0)
+ if (res->nullflags[i] > 0)
{
res->data[i] = palloc(res->nullflags[i] + 1);
memcpy(res->data[i], p, res->nullflags[i]);
@@ -598,29 +600,29 @@ query_cache_to_relation_cache(char *data, size_t size)
}
static char *
-relation_cache_to_query_cache(POOL_SELECT_RESULT *res,size_t *size)
+relation_cache_to_query_cache(POOL_SELECT_RESULT *res, size_t *size)
{
- char * data;
- char * p;
+ char *data;
+ char *p;
- int i;
- int array_size;
- int mysize;
+ int i;
+ int array_size;
+ int mysize;
mysize = 0;
/* RoeDesc *rowdesc */
- /* int res->rowdesc->num_attrs;*/
- mysize += sizeof(int); /* only rodesc->num_attrs */
+ /* int res->rowdesc->num_attrs; */
+ mysize += sizeof(int); /* only rodesc->num_attrs */
/* int numrows */
mysize += sizeof(int);
/* int *nullflags */
mysize += sizeof(int) * res->rowdesc->num_attrs * res->numrows;
- /* char **data */
+ /* char **data */
/* res->rowdesc->num_attrs * res->numrows */
for (i = 0; i < res->rowdesc->num_attrs * res->numrows; i++)
{
- if(res->nullflags[i] > 0)
+ if (res->nullflags[i] > 0)
{
mysize += res->nullflags[i];
}
@@ -644,7 +646,7 @@ relation_cache_to_query_cache(POOL_SELECT_RESULT *res,size_t *size)
for (i = 0; i < array_size; i++)
{
- if( res->nullflags[i] > 0) /* NOT NULL? */
+ if (res->nullflags[i] > 0) /* NOT NULL? */
{
memcpy(p, res->data[i], res->nullflags[i]);
p += res->nullflags[i];
diff --git a/src/utils/pool_select_walker.c b/src/utils/pool_select_walker.c
index b9c2cef87..d48247aab 100644
--- a/src/utils/pool_select_walker.c
+++ b/src/utils/pool_select_walker.c
@@ -34,7 +34,8 @@
/*
* Possible argument (property) values for function_volatile_property
*/
-typedef enum {
+typedef enum
+{
FUNC_VOLATILE,
FUNC_STABLE,
FUNC_IMMUTABLE
@@ -71,6 +72,7 @@ pool_has_function_call(Node *node)
if (IsA(node, PrepareStmt))
{
PrepareStmt *prepare_statement = (PrepareStmt *) node;
+
node = (Node *) (prepare_statement->query);
}
@@ -326,7 +328,7 @@ static char *
strip_quote(char *str)
{
char *after;
- int len;
+ int len;
int i = 0;
len = strlen(str);
@@ -404,8 +406,9 @@ function_call_walker(Node *node, void *context)
}
/*
- * If both read_only_function_list and write_function_list is empty,
- * check volatile property of the function in the system catalog.
+ * If both read_only_function_list and write_function_list is
+ * empty, check volatile property of the function in the system
+ * catalog.
*/
if (pool_config->num_read_only_function_list == 0 &&
pool_config->num_write_function_list == 0)
@@ -624,7 +627,7 @@ is_system_catalog(char *table_name)
#define ISBELONGTOPGCATALOGQUERY3 "SELECT count(*) FROM pg_catalog.pg_class AS c, pg_catalog.pg_namespace AS n WHERE c.oid = pg_catalog.to_regclass('\"%s\"') AND c.relnamespace = n.oid AND n.nspname = 'pg_catalog'"
bool result;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
if (table_name == NULL)
@@ -691,7 +694,7 @@ is_system_catalog(char *table_name)
/*
* Returns true if table_name is a temporary table.
*/
-static POOL_RELCACHE * is_temp_table_relcache;
+static POOL_RELCACHE *is_temp_table_relcache;
static bool
is_temp_table(char *table_name)
@@ -732,7 +735,7 @@ is_temp_table(char *table_name)
if (pool_config->check_temp_table == CHECK_TEMP_TRACE)
{
- POOL_TEMP_TABLE *temp_table;
+ POOL_TEMP_TABLE *temp_table;
temp_table = pool_temp_tables_find(table_name);
if (temp_table && (temp_table->state == TEMP_TABLE_CREATE_COMMITTED ||
@@ -766,7 +769,8 @@ is_temp_table(char *table_name)
{
ereport(WARNING,
(errmsg("is_temp_table: unexpected PostgreSQL version: %s", Pgversion(backend)->version_string)));
- return false; /* fall back to assume that the table is not a temporary table. */
+ return false; /* fall back to assume that the table is not a
+ * temporary table. */
}
/*
@@ -827,9 +831,9 @@ is_unlogged_table(char *table_name)
#define ISUNLOGGEDQUERY3 "SELECT count(*) FROM pg_catalog.pg_class AS c WHERE c.oid = pg_catalog.to_regclass('%s') AND c.relpersistence = 'u'"
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
- int major;
+ int major;
if (table_name == NULL)
{
@@ -909,7 +913,7 @@ is_view(char *table_name)
#define ISVIEWQUERY3 "SELECT count(*) FROM pg_catalog.pg_class AS c WHERE c.oid = pg_catalog.to_regclass('%s') AND (c.relkind = 'v' OR c.relkind = 'm')"
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
bool result;
char *query;
@@ -973,7 +977,7 @@ row_security_enabled(char *table_name)
*/
#define ISROWSECURITYQUERY "SELECT count(*) FROM pg_catalog.pg_class AS c WHERE c.oid = pg_catalog.to_regclass('%s') AND c.relrowsecurity"
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
bool result;
char *query;
@@ -1026,7 +1030,7 @@ pool_has_pgpool_regclass(void)
#define HASPGPOOL_REGCLASSQUERY "SELECT count(*) from (SELECT pg_catalog.has_function_privilege('%s', 'pgpool_regclass(cstring)', 'execute') WHERE EXISTS(SELECT * FROM pg_catalog.pg_proc AS p WHERE p.proname = 'pgpool_regclass')) AS s"
bool result;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
char *user;
@@ -1057,7 +1061,7 @@ bool
pool_has_to_regclass(void)
{
POOL_CONNECTION_POOL *backend;
- PGVersion *pgversion;
+ PGVersion *pgversion;
backend = pool_get_session_context(false)->backend;
pgversion = Pgversion(backend);
@@ -1080,7 +1084,7 @@ insertinto_or_locking_clause_walker(Node *node, void *context)
if (node == NULL)
return false;
- if (IsA(node, IntoClause) ||IsA(node, LockingClause))
+ if (IsA(node, IntoClause) || IsA(node, LockingClause))
{
ctx->has_insertinto_or_locking_clause = true;
return false;
@@ -1163,7 +1167,10 @@ non_immutable_function_call_walker(Node *node, void *context)
/* Check type cast */
else if (IsA(node, TypeCast))
{
- /* TIMESTAMP WITH TIME ZONE and TIME WITH TIME ZONE should not be cached. */
+ /*
+ * TIMESTAMP WITH TIME ZONE and TIME WITH TIME ZONE should not be
+ * cached.
+ */
TypeCast *tc = (TypeCast *) node;
if (isSystemType((Node *) tc->typeName, "timestamptz") ||
@@ -1201,7 +1208,8 @@ is_immutable_function(char *fname)
* Note that "fname" can be schema qualified.
*/
static
-bool function_volatile_property(char *fname, FUNC_VOLATILE_PROPERTY property)
+bool
+function_volatile_property(char *fname, FUNC_VOLATILE_PROPERTY property)
{
/*
* Query to know if function's volatile property.
@@ -1211,15 +1219,15 @@ bool function_volatile_property(char *fname, FUNC_VOLATILE_PROPERTY property)
char query[1024];
char *rawstring = NULL;
List *names = NIL;
- POOL_CONNECTION_POOL *backend;
- static POOL_RELCACHE *relcache;
- char prop_volatile;
+ POOL_CONNECTION_POOL *backend;
+ static POOL_RELCACHE *relcache;
+ char prop_volatile;
/* We need a modifiable copy of the input string. */
rawstring = pstrdup(fname);
/* split "schemaname.funcname" */
- if(!SplitIdentifierString(rawstring, '.', (Node **) &names) ||
+ if (!SplitIdentifierString(rawstring, '.', (Node **) &names) ||
names == NIL)
{
pfree(rawstring);
@@ -1250,7 +1258,7 @@ bool function_volatile_property(char *fname, FUNC_VOLATILE_PROPERTY property)
}
/* with schema qualification */
- if(list_length(names) == 2)
+ if (list_length(names) == 2)
{
snprintf(query, sizeof(query), VOLATILE_FUNCTION_QUERY, (char *) llast(names),
"=", (char *) linitial(names), prop_volatile);
@@ -1315,7 +1323,7 @@ pool_table_name_to_oid(char *table_name)
#define TABLE_TO_OID_QUERY3 "SELECT COALESCE(pg_catalog.to_regclass('%s')::oid, 0)"
int oid = 0;
- static POOL_RELCACHE * relcache;
+ static POOL_RELCACHE *relcache;
POOL_CONNECTION_POOL *backend;
char *query;
@@ -1378,7 +1386,7 @@ pool_table_name_to_oid(char *table_name)
* discarded by next call to this function.
*/
int
-pool_extract_table_oids_from_select_stmt(Node *node, SelectContext * ctx)
+pool_extract_table_oids_from_select_stmt(Node *node, SelectContext *ctx)
{
if (!node)
return 0;
@@ -1490,13 +1498,13 @@ make_function_name_from_funccall(FuncCall *fcall)
/*
* Function name. Max size is calculated as follows: schema
* name(NAMEDATALEN byte) + quotation marks for schema name(2 byte) +
- * period(1 byte) + table name (NAMEDATALEN byte) + quotation marks
- * for table name(2 byte) + NULL(1 byte)
+ * period(1 byte) + table name (NAMEDATALEN byte) + quotation marks for
+ * table name(2 byte) + NULL(1 byte)
*/
static char funcname[NAMEDATALEN * 2 + 1 + 2 * 2 + 1];
- List *names;
+ List *names;
- if(fcall == NULL)
+ if (fcall == NULL)
{
ereport(WARNING,
(errmsg("FuncCall argument is NULL, while getting function name from FuncCall")));
@@ -1557,10 +1565,10 @@ char *
make_table_name_from_rangevar(RangeVar *rangevar)
{
/*
- * Table name. Max size is calculated as follows: schema
- * name(NAMEDATALEN byte) + quotation marks for schema name(2 byte) +
- * period(1 byte) + table name (NAMEDATALEN byte) + quotation marks
- * for table name(2 byte) + NULL(1 byte)
+ * Table name. Max size is calculated as follows: schema name(NAMEDATALEN
+ * byte) + quotation marks for schema name(2 byte) + period(1 byte) +
+ * table name (NAMEDATALEN byte) + quotation marks for table name(2 byte)
+ * + NULL(1 byte)
*/
static char tablename[NAMEDATALEN * 2 + 1 + 2 * 2 + 1];
@@ -1612,7 +1620,8 @@ make_table_name_from_rangevar(RangeVar *rangevar)
* functions match, return true.
*/
static
-bool function_has_return_type(char *fname, char *typename)
+bool
+function_has_return_type(char *fname, char *typename)
{
/*
* Query to count the number of records matching given function name and type name.
@@ -1622,14 +1631,14 @@ bool function_has_return_type(char *fname, char *typename)
char query[1024];
char *rawstring = NULL;
List *names = NIL;
- POOL_CONNECTION_POOL *backend;
- static POOL_RELCACHE *relcache;
+ POOL_CONNECTION_POOL *backend;
+ static POOL_RELCACHE *relcache;
/* We need a modifiable copy of the input string. */
rawstring = pstrdup(fname);
/* split "schemaname.funcname" */
- if(!SplitIdentifierString(rawstring, '.', (Node **) &names) ||
+ if (!SplitIdentifierString(rawstring, '.', (Node **) &names) ||
names == NIL)
{
pfree(rawstring);
diff --git a/src/utils/pool_shmem.c b/src/utils/pool_shmem.c
index 6612c8468..2563a29fa 100644
--- a/src/utils/pool_shmem.c
+++ b/src/utils/pool_shmem.c
@@ -36,8 +36,8 @@
#define PG_SHMAT_FLAGS 0
#endif
-static void* shared_mem_chunk = NULL;
-static char* shared_mem_free_pos = NULL;
+static void *shared_mem_chunk = NULL;
+static char *shared_mem_free_pos = NULL;
static size_t chunk_size = 0;
static void IpcMemoryDetach(int status, Datum shmaddr);
@@ -58,10 +58,10 @@ initialize_shared_memory_main_segment(size_t size)
return;
ereport(LOG,
- (errmsg("allocating shared memory segment of size: %zu ",size)));
+ (errmsg("allocating shared memory segment of size: %zu ", size)));
shared_mem_chunk = pool_shared_memory_create(size);
- shared_mem_free_pos = (char*)shared_mem_chunk;
+ shared_mem_free_pos = (char *) shared_mem_chunk;
chunk_size = size;
memset(shared_mem_chunk, 0, size);
}
@@ -69,7 +69,8 @@ initialize_shared_memory_main_segment(size_t size)
void *
pool_shared_memory_segment_get_chunk(size_t size)
{
- void *ptr = NULL;
+ void *ptr = NULL;
+
if (mypid != getpid())
{
/* should never happen */
@@ -78,18 +79,18 @@ pool_shared_memory_segment_get_chunk(size_t size)
return NULL;
}
/* check if we have enough space left in chunk */
- if ((shared_mem_free_pos - (char*)shared_mem_chunk) + MAXALIGN(size) > chunk_size)
+ if ((shared_mem_free_pos - (char *) shared_mem_chunk) + MAXALIGN(size) > chunk_size)
{
ereport(ERROR,
(errmsg("no space left in shared memory segment")));
return NULL;
}
+
/*
- * return the current shared_mem_free_pos pointer
- * and advance it by size
+ * return the current shared_mem_free_pos pointer and advance it by size
*/
- ptr = (void*)shared_mem_free_pos;
+ ptr = (void *) shared_mem_free_pos;
shared_mem_free_pos += MAXALIGN(size);
return ptr;
}
diff --git a/src/utils/pool_ssl.c b/src/utils/pool_ssl.c
index 8d3c8cafc..dda5bd0fb 100644
--- a/src/utils/pool_ssl.c
+++ b/src/utils/pool_ssl.c
@@ -45,16 +45,16 @@
static SSL_CTX *SSL_frontend_context = NULL;
static bool SSL_initialized = false;
static bool dummy_ssl_passwd_cb_called = false;
-static int dummy_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata);
-static int ssl_external_passwd_cb(char *buf, int size, int rwflag, void *userdata);
+static int dummy_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata);
+static int ssl_external_passwd_cb(char *buf, int size, int rwflag, void *userdata);
static int verify_cb(int ok, X509_STORE_CTX *ctx);
static const char *SSLerrmessage(unsigned long ecode);
-static void fetch_pool_ssl_cert(POOL_CONNECTION * cp);
-static DH *load_dh_file(char *filename);
-static DH *load_dh_buffer(const char *, size_t);
+static void fetch_pool_ssl_cert(POOL_CONNECTION *cp);
+static DH *load_dh_file(char *filename);
+static DH *load_dh_buffer(const char *, size_t);
static bool initialize_dh(SSL_CTX *context);
static bool initialize_ecdh(SSL_CTX *context);
-static int run_ssl_passphrase_command(const char *prompt, char *buf, int size);
+static int run_ssl_passphrase_command(const char *prompt, char *buf, int size);
static void pool_ssl_make_absolute_path(char *artifact_path, char *config_dir, char *absolute_path);
#define SSL_RETURN_VOID_IF(cond, msg) \
@@ -85,7 +85,7 @@ enum ssl_conn_type
};
/* perform per-connection ssl initialization. returns nonzero on error */
-static int init_ssl_ctx(POOL_CONNECTION * cp, enum ssl_conn_type conntype);
+static int init_ssl_ctx(POOL_CONNECTION *cp, enum ssl_conn_type conntype);
/* OpenSSL error message */
static void perror_ssl(const char *context);
@@ -94,7 +94,7 @@ static void perror_ssl(const char *context);
* between pgpool-II and PostgreSQL backends
*/
void
-pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
+pool_ssl_negotiate_clientserver(POOL_CONNECTION *cp)
{
int ssl_packet[2] = {htonl(sizeof(int) * 2), htonl(NEGOTIATE_SSL_CODE)};
char server_response;
@@ -125,9 +125,9 @@ pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
case 'S':
/*
- * At this point the server read buffer must be empty. Otherwise it
- * is possible that a man-in-the-middle attack is ongoing.
- * So we immediately close the communication channel.
+ * At this point the server read buffer must be empty. Otherwise
+ * it is possible that a man-in-the-middle attack is ongoing. So
+ * we immediately close the communication channel.
*/
if (!pool_read_buffer_is_empty(cp))
{
@@ -153,6 +153,7 @@ pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
errdetail("server doesn't want to talk SSL")));
break;
case 'E':
+
/*
* Server failure of some sort, such as failure to fork a backend
* process. Don't bother retrieving the error message; we should
@@ -174,7 +175,7 @@ pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
* between frontend and Pgpool-II
*/
void
-pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp)
+pool_ssl_negotiate_serverclient(POOL_CONNECTION *cp)
{
cp->ssl_active = -1;
@@ -191,8 +192,8 @@ pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp)
/*
* At this point the frontend read buffer must be empty. Otherwise it
- * is possible that a man-in-the-middle attack is ongoing.
- * So we immediately close the communication channel.
+ * is possible that a man-in-the-middle attack is ongoing. So we
+ * immediately close the communication channel.
*/
if (!pool_read_buffer_is_empty(cp))
{
@@ -210,7 +211,7 @@ pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp)
}
void
-pool_ssl_close(POOL_CONNECTION * cp)
+pool_ssl_close(POOL_CONNECTION *cp)
{
if (cp->ssl)
{
@@ -223,7 +224,7 @@ pool_ssl_close(POOL_CONNECTION * cp)
}
int
-pool_ssl_read(POOL_CONNECTION * cp, void *buf, int size)
+pool_ssl_read(POOL_CONNECTION *cp, void *buf, int size)
{
int n;
int err;
@@ -267,12 +268,14 @@ retry:
n = -1;
break;
case SSL_ERROR_ZERO_RETURN:
- /* SSL manual says:
+
+ /*
+ * SSL manual says:
* -------------------------------------------------------------
- * The TLS/SSL peer has closed the connection for
- * writing by sending the close_notify alert. No more data can be
- * read. Note that SSL_ERROR_ZERO_RETURN does not necessarily
- * indicate that the underlying transport has been closed.
+ * The TLS/SSL peer has closed the connection for writing by
+ * sending the close_notify alert. No more data can be read. Note
+ * that SSL_ERROR_ZERO_RETURN does not necessarily indicate that
+ * the underlying transport has been closed.
* -------------------------------------------------------------
* We don't want to trigger failover but it is also possible that
* the connection has been closed. So returns 0 to ask pool_read()
@@ -300,7 +303,7 @@ retry:
}
int
-pool_ssl_write(POOL_CONNECTION * cp, const void *buf, int size)
+pool_ssl_write(POOL_CONNECTION *cp, const void *buf, int size)
{
int n;
int err;
@@ -352,18 +355,18 @@ retry:
}
static int
-init_ssl_ctx(POOL_CONNECTION * cp, enum ssl_conn_type conntype)
+init_ssl_ctx(POOL_CONNECTION *cp, enum ssl_conn_type conntype)
{
int error = 0;
char *cacert = NULL,
*cacert_dir = NULL;
- char ssl_cert_path[POOLMAXPATHLEN + 1] = "";
- char ssl_key_path[POOLMAXPATHLEN + 1] = "";
- char ssl_ca_cert_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_cert_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_key_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_ca_cert_path[POOLMAXPATHLEN + 1] = "";
- char *conf_file_copy = pstrdup(get_config_file_name());
- char *conf_dir = dirname(conf_file_copy);
+ char *conf_file_copy = pstrdup(get_config_file_name());
+ char *conf_dir = dirname(conf_file_copy);
pool_ssl_make_absolute_path(pool_config->ssl_cert, conf_dir, ssl_cert_path);
pool_ssl_make_absolute_path(pool_config->ssl_key, conf_dir, ssl_key_path);
@@ -481,7 +484,7 @@ SSLerrmessage(unsigned long ecode)
* Return true if SSL layer has any pending data in buffer
*/
bool
-pool_ssl_pending(POOL_CONNECTION * cp)
+pool_ssl_pending(POOL_CONNECTION *cp)
{
if (cp->ssl_active > 0 && SSL_pending(cp->ssl) > 0)
return true;
@@ -489,7 +492,7 @@ pool_ssl_pending(POOL_CONNECTION * cp)
}
static void
-fetch_pool_ssl_cert(POOL_CONNECTION * cp)
+fetch_pool_ssl_cert(POOL_CONNECTION *cp)
{
int len;
X509 *peer = SSL_get_peer_certificate(cp->ssl);
@@ -587,15 +590,15 @@ verify_cb(int ok, X509_STORE_CTX *ctx)
int
SSL_ServerSide_init(void)
{
- STACK_OF(X509_NAME) *root_cert_list = NULL;
+ STACK_OF(X509_NAME) * root_cert_list = NULL;
SSL_CTX *context;
struct stat buf;
- char ssl_cert_path[POOLMAXPATHLEN + 1] = "";
- char ssl_key_path[POOLMAXPATHLEN + 1] = "";
- char ssl_ca_cert_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_cert_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_key_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_ca_cert_path[POOLMAXPATHLEN + 1] = "";
- char *conf_file_copy = pstrdup(get_config_file_name());
- char *conf_dir = dirname(conf_file_copy);
+ char *conf_file_copy = pstrdup(get_config_file_name());
+ char *conf_dir = dirname(conf_file_copy);
pool_ssl_make_absolute_path(pool_config->ssl_cert, conf_dir, ssl_cert_path);
pool_ssl_make_absolute_path(pool_config->ssl_key, conf_dir, ssl_key_path);
@@ -645,7 +648,7 @@ SSL_ServerSide_init(void)
/*
* prompt for password for passphrase-protected files
*/
- if(pool_config->ssl_passphrase_command && strlen(pool_config->ssl_passphrase_command))
+ if (pool_config->ssl_passphrase_command && strlen(pool_config->ssl_passphrase_command))
SSL_CTX_set_default_passwd_cb(context, ssl_external_passwd_cb);
else
SSL_CTX_set_default_passwd_cb(context, dummy_ssl_passwd_cb);
@@ -781,7 +784,8 @@ SSL_ServerSide_init(void)
*/
if (pool_config->ssl_crl_file && strlen(pool_config->ssl_crl_file))
{
- char ssl_crl_path[POOLMAXPATHLEN + 1] = "";
+ char ssl_crl_path[POOLMAXPATHLEN + 1] = "";
+
pool_ssl_make_absolute_path(pool_config->ssl_crl_file, conf_dir, ssl_crl_path);
X509_STORE *cvstore = SSL_CTX_get_cert_store(context);
@@ -866,7 +870,7 @@ error:
static bool
initialize_dh(SSL_CTX *context)
{
- DH *dh = NULL;
+ DH *dh = NULL;
SSL_CTX_set_options(context, SSL_OP_SINGLE_DH_USE);
@@ -928,10 +932,10 @@ initialize_ecdh(SSL_CTX *context)
* to verify that the DBA-generated DH parameters file contains
* what we expect it to contain.
*/
-static DH *
+static DH *
load_dh_file(char *filename)
{
- FILE *fp;
+ FILE *fp;
DH *dh = NULL;
int codes;
@@ -1017,7 +1021,7 @@ run_ssl_passphrase_command(const char *prompt, char *buf, int size)
int loglevel = ERROR;
StringInfoData command;
char *p;
- FILE *fh;
+ FILE *fh;
int pclose_rc;
size_t len = 0;
@@ -1099,7 +1103,7 @@ pool_ssl_make_absolute_path(char *artifact_path, char *config_dir, char *absolut
{
if (artifact_path && strlen(artifact_path))
{
- if(is_absolute_path(artifact_path))
+ if (is_absolute_path(artifact_path))
strncpy(absolute_path, artifact_path, POOLMAXPATHLEN);
else
snprintf(absolute_path, POOLMAXPATHLEN, "%s/%s", config_dir, artifact_path);
@@ -1110,7 +1114,7 @@ pool_ssl_make_absolute_path(char *artifact_path, char *config_dir, char *absolut
* it's not available */
void
-pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp)
+pool_ssl_negotiate_serverclient(POOL_CONNECTION *cp)
{
ereport(DEBUG1,
(errmsg("SSL is requested but SSL support is not available")));
@@ -1119,7 +1123,7 @@ pool_ssl_negotiate_serverclient(POOL_CONNECTION * cp)
}
void
-pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
+pool_ssl_negotiate_clientserver(POOL_CONNECTION *cp)
{
ereport(DEBUG1,
@@ -1129,13 +1133,13 @@ pool_ssl_negotiate_clientserver(POOL_CONNECTION * cp)
}
void
-pool_ssl_close(POOL_CONNECTION * cp)
+pool_ssl_close(POOL_CONNECTION *cp)
{
return;
}
int
-pool_ssl_read(POOL_CONNECTION * cp, void *buf, int size)
+pool_ssl_read(POOL_CONNECTION *cp, void *buf, int size)
{
ereport(WARNING,
(errmsg("pool_ssl: SSL i/o called but SSL support is not available")));
@@ -1145,7 +1149,7 @@ pool_ssl_read(POOL_CONNECTION * cp, void *buf, int size)
}
int
-pool_ssl_write(POOL_CONNECTION * cp, const void *buf, int size)
+pool_ssl_write(POOL_CONNECTION *cp, const void *buf, int size)
{
ereport(WARNING,
(errmsg("pool_ssl: SSL i/o called but SSL support is not available")));
@@ -1161,7 +1165,7 @@ SSL_ServerSide_init(void)
}
bool
-pool_ssl_pending(POOL_CONNECTION * cp)
+pool_ssl_pending(POOL_CONNECTION *cp)
{
return false;
}
diff --git a/src/utils/pool_stream.c b/src/utils/pool_stream.c
index b210f23f2..cfa3308a4 100644
--- a/src/utils/pool_stream.c
+++ b/src/utils/pool_stream.c
@@ -45,13 +45,13 @@
static int mystrlen(char *str, int upper, int *flag);
static int mystrlinelen(char *str, int upper, int *flag);
-static int save_pending_data(POOL_CONNECTION * cp, void *data, int len);
-static int consume_pending_data(POOL_CONNECTION * cp, void *data, int len);
+static int save_pending_data(POOL_CONNECTION *cp, void *data, int len);
+static int consume_pending_data(POOL_CONNECTION *cp, void *data, int len);
static MemoryContext SwitchToConnectionContext(bool backend_connection);
#ifdef DEBUG
static void dump_buffer(char *buf, int len);
#endif
-static int pool_write_flush(POOL_CONNECTION * cp, void *buf, int len);
+static int pool_write_flush(POOL_CONNECTION *cp, void *buf, int len);
/* timeout sec for pool_check_fd */
static int timeoutsec = -1;
@@ -112,7 +112,7 @@ pool_open(int fd, bool backend_connection)
* close read/write file descriptors.
*/
void
-pool_close(POOL_CONNECTION * cp)
+pool_close(POOL_CONNECTION *cp)
{
/*
* shutdown connection to the client so that pgpool is not blocked
@@ -137,7 +137,7 @@ pool_close(POOL_CONNECTION * cp)
}
void
-pool_read_with_error(POOL_CONNECTION * cp, void *buf, int len,
+pool_read_with_error(POOL_CONNECTION *cp, void *buf, int len,
const char *err_context)
{
if (pool_read(cp, buf, len) < 0)
@@ -153,7 +153,7 @@ pool_read_with_error(POOL_CONNECTION * cp, void *buf, int len,
* returns 0 on success otherwise throws an ereport.
*/
int
-pool_read(POOL_CONNECTION * cp, void *buf, int len)
+pool_read(POOL_CONNECTION *cp, void *buf, int len)
{
static char readbuf[READBUFSZ];
@@ -234,8 +234,7 @@ pool_read(POOL_CONNECTION * cp, void *buf, int len)
}
/*
- * if failover_on_backend_error is true, then trigger
- * failover
+ * if failover_on_backend_error is true, then trigger failover
*/
if (pool_config->failover_on_backend_error)
{
@@ -308,7 +307,7 @@ pool_read(POOL_CONNECTION * cp, void *buf, int len)
* returns buffer address on success otherwise NULL.
*/
char *
-pool_read2(POOL_CONNECTION * cp, int len)
+pool_read2(POOL_CONNECTION *cp, int len)
{
char *buf;
int req_size;
@@ -390,8 +389,7 @@ pool_read2(POOL_CONNECTION * cp, int len)
}
/*
- * if failover_on_backend_error is true, then trigger
- * failover
+ * if failover_on_backend_error is true, then trigger failover
*/
if (pool_config->failover_on_backend_error)
{
@@ -450,7 +448,7 @@ pool_read2(POOL_CONNECTION * cp, int len)
* returns 0 on success otherwise -1.
*/
int
-pool_write_noerror(POOL_CONNECTION * cp, void *buf, int len)
+pool_write_noerror(POOL_CONNECTION *cp, void *buf, int len)
{
if (len < 0)
return -1;
@@ -528,7 +526,7 @@ pool_write_noerror(POOL_CONNECTION * cp, void *buf, int len)
* returns 0 on success otherwise ereport.
*/
int
-pool_write(POOL_CONNECTION * cp, void *buf, int len)
+pool_write(POOL_CONNECTION *cp, void *buf, int len)
{
if (len < 0)
ereport(ERROR,
@@ -549,7 +547,7 @@ pool_write(POOL_CONNECTION * cp, void *buf, int len)
* This function does not throws an ereport in case of an error
*/
static int
-pool_write_flush(POOL_CONNECTION * cp, void *buf, int len)
+pool_write_flush(POOL_CONNECTION *cp, void *buf, int len)
{
int sts;
int wlen;
@@ -639,7 +637,7 @@ pool_write_flush(POOL_CONNECTION * cp, void *buf, int len)
* This function does not throws an ereport in case of an error
*/
int
-pool_flush_it(POOL_CONNECTION * cp)
+pool_flush_it(POOL_CONNECTION *cp)
{
int sts;
int wlen;
@@ -732,7 +730,7 @@ pool_flush_it(POOL_CONNECTION * cp)
* flush write buffer and degenerate/failover if error occurs
*/
int
-pool_flush(POOL_CONNECTION * cp)
+pool_flush(POOL_CONNECTION *cp)
{
if (pool_flush_it(cp) == -1)
{
@@ -788,7 +786,7 @@ pool_flush(POOL_CONNECTION * cp)
* same as pool_flush() but returns -ve value instead of ereport in case of failure
*/
int
-pool_flush_noerror(POOL_CONNECTION * cp)
+pool_flush_noerror(POOL_CONNECTION *cp)
{
if (pool_flush_it(cp) == -1)
{
@@ -840,7 +838,7 @@ pool_flush_noerror(POOL_CONNECTION * cp)
* combo of pool_write and pool_flush
*/
void
-pool_write_and_flush(POOL_CONNECTION * cp, void *buf, int len)
+pool_write_and_flush(POOL_CONNECTION *cp, void *buf, int len)
{
pool_write(cp, buf, len);
pool_flush(cp);
@@ -850,7 +848,7 @@ pool_write_and_flush(POOL_CONNECTION * cp, void *buf, int len)
* same as pool_write_and_flush() but does not throws ereport when error occurs
*/
int
-pool_write_and_flush_noerror(POOL_CONNECTION * cp, void *buf, int len)
+pool_write_and_flush_noerror(POOL_CONNECTION *cp, void *buf, int len)
{
int ret;
@@ -865,7 +863,7 @@ pool_write_and_flush_noerror(POOL_CONNECTION * cp, void *buf, int len)
* if line is not 0, read until new line is encountered.
*/
char *
-pool_read_string(POOL_CONNECTION * cp, int *len, int line)
+pool_read_string(POOL_CONNECTION *cp, int *len, int line)
{
int readp;
int readsize;
@@ -1054,7 +1052,7 @@ pool_read_string(POOL_CONNECTION * cp, int *len, int line)
* Set db node id to connection.
*/
void
-pool_set_db_node_id(POOL_CONNECTION * con, int db_node_id)
+pool_set_db_node_id(POOL_CONNECTION *con, int db_node_id)
{
if (!con)
return;
@@ -1124,7 +1122,7 @@ mystrlinelen(char *str, int upper, int *flag)
* save pending data
*/
static int
-save_pending_data(POOL_CONNECTION * cp, void *data, int len)
+save_pending_data(POOL_CONNECTION *cp, void *data, int len)
{
int reqlen;
size_t realloc_size;
@@ -1161,7 +1159,7 @@ save_pending_data(POOL_CONNECTION * cp, void *data, int len)
* consume pending data. returns actually consumed data length.
*/
static int
-consume_pending_data(POOL_CONNECTION * cp, void *data, int len)
+consume_pending_data(POOL_CONNECTION *cp, void *data, int len)
{
int consume_size;
@@ -1184,7 +1182,7 @@ consume_pending_data(POOL_CONNECTION * cp, void *data, int len)
* pool_unread: Put back data to input buffer
*/
int
-pool_unread(POOL_CONNECTION * cp, void *data, int len)
+pool_unread(POOL_CONNECTION *cp, void *data, int len)
{
void *p = cp->hp;
int n = cp->len + len;
@@ -1226,7 +1224,7 @@ pool_unread(POOL_CONNECTION * cp, void *data, int len)
* pool_push: Push data into buffer stack.
*/
int
-pool_push(POOL_CONNECTION * cp, void *data, int len)
+pool_push(POOL_CONNECTION *cp, void *data, int len)
{
char *p;
@@ -1258,7 +1256,7 @@ pool_push(POOL_CONNECTION * cp, void *data, int len)
* pool_unread.
*/
void
-pool_pop(POOL_CONNECTION * cp, int *len)
+pool_pop(POOL_CONNECTION *cp, int *len)
{
if (cp->bufsz3 == 0)
{
@@ -1282,7 +1280,7 @@ pool_pop(POOL_CONNECTION * cp, int *len)
* pool_unread.
*/
int
-pool_stacklen(POOL_CONNECTION * cp)
+pool_stacklen(POOL_CONNECTION *cp)
{
return cp->bufsz3;
}
@@ -1332,7 +1330,7 @@ pool_get_timeout(void)
* return values: 0: normal 1: data is not ready -1: error
*/
int
-pool_check_fd(POOL_CONNECTION * cp)
+pool_check_fd(POOL_CONNECTION *cp)
{
fd_set readmask;
fd_set exceptmask;
diff --git a/src/utils/pqexpbuffer.c b/src/utils/pqexpbuffer.c
index e220cd71c..5bf694d30 100644
--- a/src/utils/pqexpbuffer.c
+++ b/src/utils/pqexpbuffer.c
@@ -415,4 +415,3 @@ appendBinaryPQExpBuffer(PQExpBuffer str, const char *data, size_t datalen)
#undef malloc
#undef free
-
diff --git a/src/utils/ps_status.c b/src/utils/ps_status.c
index bedcfc6bf..381f851d2 100644
--- a/src/utils/ps_status.c
+++ b/src/utils/ps_status.c
@@ -45,7 +45,8 @@
extern char **environ;
bool update_process_title = true;
-char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for set_ps_display */
+char remote_ps_data[NI_MAXHOST + NI_MAXSERV + 2]; /* used for
+ * set_ps_display */
/*
@@ -392,7 +393,7 @@ get_ps_display(int *displen)
* Show ps idle status
*/
void
-pool_ps_idle_display(POOL_CONNECTION_POOL * backend)
+pool_ps_idle_display(POOL_CONNECTION_POOL *backend)
{
StartupPacket *sp;
char psbuf[1024];
diff --git a/src/utils/regex_array.c b/src/utils/regex_array.c
index 358e298c7..753787d36 100644
--- a/src/utils/regex_array.c
+++ b/src/utils/regex_array.c
@@ -55,7 +55,7 @@ create_regex_array(void)
* Add an regular expression pattern
*/
int
-add_regex_array(RegArray * ar, char *pattern)
+add_regex_array(RegArray *ar, char *pattern)
{
int regex_flags;
regex_t *regex;
@@ -129,7 +129,7 @@ add_regex_array(RegArray * ar, char *pattern)
* Execute regex matching. Returns matched array index.
*/
int
-regex_array_match(RegArray * ar, char *pattern)
+regex_array_match(RegArray *ar, char *pattern)
{
int i;
@@ -159,7 +159,7 @@ regex_array_match(RegArray * ar, char *pattern)
* Destroy RegArray object
*/
void
-destroy_regex_array(RegArray * ar)
+destroy_regex_array(RegArray *ar)
{
pfree(ar->regex);
pfree(ar);
@@ -187,7 +187,7 @@ create_lrtoken_array(void)
* Nnumber of tokens is set to *n.
*/
void
-extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens * lrtokens)
+extract_string_tokens2(char *str, char *delimi, char delimi2, Left_right_tokens *lrtokens)
{
char *token;
char *mystr;
diff --git a/src/utils/ssl_utils.c b/src/utils/ssl_utils.c
index d560dafe7..297728235 100644
--- a/src/utils/ssl_utils.c
+++ b/src/utils/ssl_utils.c
@@ -41,11 +41,11 @@
#ifdef USE_SSL
static int aes_get_key(const char *password, unsigned char *key, unsigned char *iv);
-static int aes_encrypt(unsigned char *plaintext, int plaintext_len, unsigned char *key,
- unsigned char *iv, unsigned char *ciphertext);
+static int aes_encrypt(unsigned char *plaintext, int plaintext_len, unsigned char *key,
+ unsigned char *iv, unsigned char *ciphertext);
-static int aes_decrypt(unsigned char *ciphertext, int ciphertext_len, unsigned char *key,
- unsigned char *iv, unsigned char *plaintext);
+static int aes_decrypt(unsigned char *ciphertext, int ciphertext_len, unsigned char *key,
+ unsigned char *iv, unsigned char *plaintext);
#endif
#ifdef USE_SSL
diff --git a/src/utils/statistics.c b/src/utils/statistics.c
index 045c28255..7075cd3c8 100644
--- a/src/utils/statistics.c
+++ b/src/utils/statistics.c
@@ -37,7 +37,7 @@ typedef struct
uint64 panic_cnt; /* number of PANIC messages */
uint64 fatal_cnt; /* number of FATAL messages */
uint64 error_cnt; /* number of ERROR messages */
-} PER_NODE_STAT;
+} PER_NODE_STAT;
static volatile PER_NODE_STAT *per_node_stat;
@@ -113,23 +113,23 @@ stat_count_up(int backend_node_id, Node *parse_tree)
else
{
- switch(nodeTag(parse_tree))
+ switch (nodeTag(parse_tree))
{
- case(T_CheckPointStmt):
- case(T_DeallocateStmt):
- case(T_DiscardStmt):
- case(T_ExecuteStmt):
- case(T_ExplainStmt):
- case(T_ListenStmt):
- case(T_LoadStmt):
- case(T_LockStmt):
- case(T_NotifyStmt):
- case(T_PrepareStmt):
- case(T_TransactionStmt):
- case(T_UnlistenStmt):
- case(T_VacuumStmt):
- case(T_VariableSetStmt):
- case(T_VariableShowStmt):
+ case (T_CheckPointStmt):
+ case (T_DeallocateStmt):
+ case (T_DiscardStmt):
+ case (T_ExecuteStmt):
+ case (T_ExplainStmt):
+ case (T_ListenStmt):
+ case (T_LoadStmt):
+ case (T_LockStmt):
+ case (T_NotifyStmt):
+ case (T_PrepareStmt):
+ case (T_TransactionStmt):
+ case (T_UnlistenStmt):
+ case (T_VacuumStmt):
+ case (T_VariableSetStmt):
+ case (T_VariableShowStmt):
per_node_stat[backend_node_id].other_cnt++;
break;
diff --git a/src/watchdog/watchdog.c b/src/watchdog/watchdog.c
index df3454b8a..d57d83526 100644
--- a/src/watchdog/watchdog.c
+++ b/src/watchdog/watchdog.c
@@ -79,15 +79,16 @@ typedef enum IPC_CMD_PROCESS_RES
IPC_CMD_ERROR,
IPC_CMD_OK,
IPC_CMD_TRY_AGAIN
-} IPC_CMD_PROCESS_RES;
+} IPC_CMD_PROCESS_RES;
#define MIN_SECS_CONNECTION_RETRY 10 /* Time in seconds to retry connection
* with node once it was failed */
#define MAX_SECS_ESC_PROC_EXIT_WAIT 5 /* maximum amount of seconds to wait
- * for escalation/de-escalation process
- * to exit normally before moving on */
+ * for escalation/de-escalation
+ * process to exit normally before
+ * moving on */
#define BEACON_MESSAGE_INTERVAL_SECONDS 10 /* interval between beacon
* messages */
@@ -96,21 +97,22 @@ typedef enum IPC_CMD_PROCESS_RES
* the reply from remote
* watchdog node */
-#define MAX_ALLOWED_SEND_FAILURES 3 /* number of times sending message failure
- * can be tolerated
- */
-#define MAX_ALLOWED_BEACON_REPLY_MISS 3 /* number of times missing beacon message reply
- * can be tolerated
- */
+#define MAX_ALLOWED_SEND_FAILURES 3 /* number of times sending
+ * message failure can be
+ * tolerated */
+#define MAX_ALLOWED_BEACON_REPLY_MISS 3 /* number of times missing
+ * beacon message reply can be
+ * tolerated */
#define FAILOVER_COMMAND_FINISH_TIMEOUT 15 /* timeout in seconds to wait
* for Pgpool-II to build
* consensus for failover */
-#define MIN_SECS_BETWEEN_BROADCAST_SRV_MSG 5 /* minimum amount of seconds to wait
- * before broadcasting the same cluster
- * service message */
+#define MIN_SECS_BETWEEN_BROADCAST_SRV_MSG 5 /* minimum amount of seconds
+ * to wait before broadcasting
+ * the same cluster service
+ * message */
/*
* Packet types. Used in WDPacketData->type.
@@ -162,7 +164,7 @@ typedef struct packet_types
{
char type;
char name[100];
-} packet_types;
+} packet_types;
packet_types all_packet_types[] = {
{WD_ADD_NODE_MESSAGE, "ADD NODE"},
@@ -241,7 +243,7 @@ char *wd_state_names[] = {
"NETWORK ISOLATION"
};
-char *wd_node_lost_reasons[] = {
+char *wd_node_lost_reasons[] = {
"UNKNOWN REASON",
"REPORTED BY LIFECHECK",
"SEND MESSAGE FAILURES",
@@ -251,22 +253,24 @@ char *wd_node_lost_reasons[] = {
"SHUTDOWN"
};
-char *wd_cluster_membership_status[] = {
+char *wd_cluster_membership_status[] = {
"MEMBER",
"REVOKED-SHUTDOWN",
"REVOKED-NO-SHOW",
"REVOKED-LOST"
};
+
/*
* Command packet definition.
*/
typedef struct WDPacketData
{
- char type; /* packet type. e.g. WD_ADD_NODE_MESSAGE. See #define above. */
- int command_id; /* command sequence number starting from 1 */
+ char type; /* packet type. e.g. WD_ADD_NODE_MESSAGE. See
+ * #define above. */
+ int command_id; /* command sequence number starting from 1 */
int len;
char *data;
-} WDPacketData;
+} WDPacketData;
typedef enum WDNodeCommandState
@@ -276,7 +280,7 @@ typedef enum WDNodeCommandState
COMMAND_STATE_REPLIED,
COMMAND_STATE_SEND_ERROR,
COMMAND_STATE_DO_NOT_SEND
-} WDNodeCommandState;
+} WDNodeCommandState;
typedef struct WDCommandNodeResult
{
@@ -285,7 +289,7 @@ typedef struct WDCommandNodeResult
char result_type;
int result_data_len;
char *result_data;
-} WDCommandNodeResult;
+} WDCommandNodeResult;
typedef enum WDCommandSource
{
@@ -293,7 +297,7 @@ typedef enum WDCommandSource
COMMAND_SOURCE_LOCAL,
COMMAND_SOURCE_REMOTE,
COMMAND_SOURCE_INTERNAL
-} WDCommandSource;
+} WDCommandSource;
/*
* Watchdog "function" descriptor. "function" is not a C-function, it's one
@@ -305,9 +309,9 @@ typedef struct WDFunctionCommandData
{
char commandType;
unsigned int commandID;
- char *funcName; /* function name */
+ char *funcName; /* function name */
WatchdogNode *wdNode;
-} WDFunctionCommandData;
+} WDFunctionCommandData;
typedef struct WDCommandTimerData
{
@@ -315,7 +319,7 @@ typedef struct WDCommandTimerData
unsigned int expire_sec;
bool need_tics;
WDFunctionCommandData *wd_func_command;
-} WDCommandTimerData;
+} WDCommandTimerData;
typedef enum WDCommandStatus
@@ -326,7 +330,7 @@ typedef enum WDCommandStatus
COMMAND_FINISHED_ALL_REPLIED,
COMMAND_FINISHED_NODE_REJECTED,
COMMAND_FINISHED_SEND_FAILED
-} WDCommandStatus;
+} WDCommandStatus;
typedef struct WDCommandData
{
@@ -346,14 +350,14 @@ typedef struct WDCommandData
char *errorMessage;
MemoryContext memoryContext;
void (*commandCompleteFunc) (struct WDCommandData *command);
-} WDCommandData;
+} WDCommandData;
typedef struct WDInterfaceStatus
{
char *if_name;
unsigned int if_index;
bool if_up;
-} WDInterfaceStatus;
+} WDInterfaceStatus;
typedef struct WDClusterLeader
{
@@ -361,7 +365,7 @@ typedef struct WDClusterLeader
WatchdogNode **standbyNodes;
int standby_nodes_count;
bool holding_vip;
-} WDClusterLeaderInfo;
+} WDClusterLeaderInfo;
typedef struct wd_cluster
{
@@ -369,7 +373,8 @@ typedef struct wd_cluster
WatchdogNode *remoteNodes;
WDClusterLeaderInfo clusterLeaderInfo;
int remoteNodeCount;
- int memberRemoteNodeCount; /* no of nodes that count towards quorum and consensus */
+ int memberRemoteNodeCount; /* no of nodes that count towards
+ * quorum and consensus */
int quorum_status;
unsigned int nextCommandID;
pid_t escalation_pid;
@@ -378,10 +383,10 @@ typedef struct wd_cluster
int network_monitor_sock;
bool clusterInitialized;
bool ipc_auth_needed;
- int current_failover_id;
- int failover_command_timeout;
- struct timeval last_bcast_srv_msg_time; /* timestamp when last packet was
- * broadcasted by the local node */
+ int current_failover_id;
+ int failover_command_timeout;
+ struct timeval last_bcast_srv_msg_time; /* timestamp when last packet was
+ * broadcasted by the local node */
char last_bcast_srv_msg;
List *unidentified_socks;
@@ -392,7 +397,7 @@ typedef struct wd_cluster
List *wd_timer_commands;
List *wdInterfaceToMonitor;
List *wdCurrentFailovers;
-} wd_cluster;
+} wd_cluster;
typedef struct WDFailoverObject
{
@@ -406,7 +411,7 @@ typedef struct WDFailoverObject
int request_count;
struct timeval startTime;
int state;
-} WDFailoverObject;
+} WDFailoverObject;
#ifdef WATCHDOG_DEBUG_OPTS
#if WATCHDOG_DEBUG_OPTS > 0
@@ -425,20 +430,20 @@ static bool check_debug_request_kill_all_senders(void);
static void load_watchdog_debug_test_option(void);
#endif
-static void process_remote_failover_command_on_coordinator(WatchdogNode * wdNode, WDPacketData * pkt);
-static WDFailoverObject * get_failover_object(POOL_REQUEST_KIND reqKind, int nodesCount, int *nodeList);
+static void process_remote_failover_command_on_coordinator(WatchdogNode *wdNode, WDPacketData *pkt);
+static WDFailoverObject *get_failover_object(POOL_REQUEST_KIND reqKind, int nodesCount, int *nodeList);
static bool does_int_array_contains_value(int *intArray, int count, int value);
static void clear_all_failovers(void);
-static void remove_failover_object(WDFailoverObject * failoverObj);
+static void remove_failover_object(WDFailoverObject *failoverObj);
static void service_expired_failovers(void);
-static WDFailoverObject * add_failover(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, WatchdogNode * wdNode,
- unsigned char flags, bool *duplicate);
+static WDFailoverObject *add_failover(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, WatchdogNode *wdNode,
+ unsigned char flags, bool *duplicate);
static WDFailoverCMDResults compute_failover_consensus(POOL_REQUEST_KIND reqKind, int *node_id_list,
- int node_count, unsigned char *flags, WatchdogNode * wdNode);
+ int node_count, unsigned char *flags, WatchdogNode *wdNode);
-static int send_command_packet_to_remote_nodes(WDCommandData * ipcCommand, bool source_included);
-static void wd_command_is_complete(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES wd_command_processor_for_node_lost_event(WDCommandData * ipcCommand, WatchdogNode * wdLostNode);
+static int send_command_packet_to_remote_nodes(WDCommandData *ipcCommand, bool source_included);
+static void wd_command_is_complete(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES wd_command_processor_for_node_lost_event(WDCommandData *ipcCommand, WatchdogNode *wdLostNode);
volatile sig_atomic_t reload_config_signal = 0;
volatile sig_atomic_t sigchld_request = 0;
@@ -454,162 +459,163 @@ static void wd_initialize_monitoring_interfaces(void);
static int wd_create_client_socket(char *hostname, int port, bool *connected);
static int connect_with_all_configured_nodes(void);
static void try_connecting_with_all_unreachable_nodes(void);
-static bool connect_to_node(WatchdogNode * wdNode);
-static bool is_socket_connection_connected(SocketConnection * conn);
+static bool connect_to_node(WatchdogNode *wdNode);
+static bool is_socket_connection_connected(SocketConnection *conn);
static void service_unreachable_nodes(void);
-static void allocate_resultNodes_in_command(WDCommandData * ipcCommand);
-static bool is_node_active_and_reachable(WatchdogNode * wdNode);
-static bool is_node_active(WatchdogNode * wdNode);
-static bool is_node_reachable(WatchdogNode * wdNode);
+static void allocate_resultNodes_in_command(WDCommandData *ipcCommand);
+static bool is_node_active_and_reachable(WatchdogNode *wdNode);
+static bool is_node_active(WatchdogNode *wdNode);
+static bool is_node_reachable(WatchdogNode *wdNode);
static int update_successful_outgoing_cons(fd_set *wmask, int pending_fds_count);
static int prepare_fds(fd_set *rmask, fd_set *wmask, fd_set *emask);
-static void set_next_commandID_in_message(WDPacketData * pkt);
-static void set_message_commandID(WDPacketData * pkt, unsigned int commandID);
-static void set_message_data(WDPacketData * pkt, const char *data, int len);
-static void set_message_type(WDPacketData * pkt, char type);
-static void free_packet(WDPacketData * pkt);
+static void set_next_commandID_in_message(WDPacketData *pkt);
+static void set_message_commandID(WDPacketData *pkt, unsigned int commandID);
+static void set_message_data(WDPacketData *pkt, const char *data, int len);
+static void set_message_type(WDPacketData *pkt, char type);
+static void free_packet(WDPacketData *pkt);
-static WDPacketData * get_empty_packet(void);
-static WDPacketData * read_packet_of_type(SocketConnection * conn, char ensure_type);
-static WDPacketData * read_packet(SocketConnection * conn);
-static WDPacketData * get_message_of_type(char type, WDPacketData * replyFor);
-static WDPacketData * get_addnode_message(void);
-static WDPacketData * get_beacon_message(char type, WDPacketData * replyFor);
-static WDPacketData * get_mynode_info_message(WDPacketData * replyFor);
-static WDPacketData * get_minimum_message(char type, WDPacketData * replyFor);
+static WDPacketData *get_empty_packet(void);
+static WDPacketData *read_packet_of_type(SocketConnection *conn, char ensure_type);
+static WDPacketData *read_packet(SocketConnection *conn);
+static WDPacketData *get_message_of_type(char type, WDPacketData *replyFor);
+static WDPacketData *get_addnode_message(void);
+static WDPacketData *get_beacon_message(char type, WDPacketData *replyFor);
+static WDPacketData *get_mynode_info_message(WDPacketData *replyFor);
+static WDPacketData *get_minimum_message(char type, WDPacketData *replyFor);
-static int issue_watchdog_internal_command(WatchdogNode * wdNode, WDPacketData * pkt, int timeout_sec);
+static int issue_watchdog_internal_command(WatchdogNode *wdNode, WDPacketData *pkt, int timeout_sec);
static void check_for_current_command_timeout(void);
-static bool watchdog_internal_command_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt);
+static bool watchdog_internal_command_packet_processor(WatchdogNode *wdNode, WDPacketData *pkt);
static bool service_lost_connections(void);
static void service_ipc_commands(void);
static void service_internal_command(void);
static unsigned int get_next_commandID(void);
-static WatchdogNode * parse_node_info_message(WDPacketData * pkt, char **authkey);
+static WatchdogNode *parse_node_info_message(WDPacketData *pkt, char **authkey);
static void update_quorum_status(void);
static int get_minimum_remote_nodes_required_for_quorum(void);
static int get_minimum_votes_to_resolve_consensus(void);
-static bool write_packet_to_socket(int sock, WDPacketData * pkt, bool ipcPacket);
+static bool write_packet_to_socket(int sock, WDPacketData *pkt, bool ipcPacket);
static int read_sockets(fd_set *rmask, int pending_fds_count);
static void set_timeout(unsigned int sec);
static int wd_create_command_server_socket(void);
-static void close_socket_connection(SocketConnection * conn);
-static bool send_message_to_connection(SocketConnection * conn, WDPacketData * pkt);
+static void close_socket_connection(SocketConnection *conn);
+static bool send_message_to_connection(SocketConnection *conn, WDPacketData *pkt);
-static int send_message(WatchdogNode * wdNode, WDPacketData * pkt);
-static bool send_message_to_node(WatchdogNode * wdNode, WDPacketData * pkt);
-static bool reply_with_minimal_message(WatchdogNode * wdNode, char type, WDPacketData * replyFor);
-static bool reply_with_message(WatchdogNode * wdNode, char type, char *data, int data_len, WDPacketData * replyFor);
-static int send_cluster_command(WatchdogNode * wdNode, char type, int timeout_sec);
-static int send_message_of_type(WatchdogNode * wdNode, char type, WDPacketData * replyFor);
+static int send_message(WatchdogNode *wdNode, WDPacketData *pkt);
+static bool send_message_to_node(WatchdogNode *wdNode, WDPacketData *pkt);
+static bool reply_with_minimal_message(WatchdogNode *wdNode, char type, WDPacketData *replyFor);
+static bool reply_with_message(WatchdogNode *wdNode, char type, char *data, int data_len, WDPacketData *replyFor);
+static int send_cluster_command(WatchdogNode *wdNode, char type, int timeout_sec);
+static int send_message_of_type(WatchdogNode *wdNode, char type, WDPacketData *replyFor);
-static bool send_cluster_service_message(WatchdogNode * wdNode, WDPacketData * replyFor, char message);
+static bool send_cluster_service_message(WatchdogNode *wdNode, WDPacketData *replyFor, char message);
static int accept_incoming_connections(fd_set *rmask, int pending_fds_count);
-static int standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt);
-static void cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt);
+static int standard_packet_processor(WatchdogNode *wdNode, WDPacketData *pkt);
+static void cluster_service_message_processor(WatchdogNode *wdNode, WDPacketData *pkt);
static int get_cluster_node_count(void);
-static void clear_command_node_result(WDCommandNodeResult * nodeResult);
+static void clear_command_node_result(WDCommandNodeResult *nodeResult);
static inline bool is_local_node_true_leader(void);
static inline WD_STATES get_local_node_state(void);
static int set_state(WD_STATES newState);
-static int watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_voting(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_loading(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-static int watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand);
-
-static int I_am_leader_and_cluster_in_split_brain(WatchdogNode * otherLeaderNode);
-static void handle_split_brain(WatchdogNode * otherLeaderNode, WDPacketData * pkt);
-static bool beacon_message_received_from_node(WatchdogNode * wdNode, WDPacketData * pkt);
-
-static void cleanUpIPCCommand(WDCommandData * ipcCommand);
+static int watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_voting(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_loading(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+static int watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand);
+
+static int I_am_leader_and_cluster_in_split_brain(WatchdogNode *otherLeaderNode);
+static void handle_split_brain(WatchdogNode *otherLeaderNode, WDPacketData *pkt);
+static bool beacon_message_received_from_node(WatchdogNode *wdNode, WDPacketData *pkt);
+
+static void cleanUpIPCCommand(WDCommandData *ipcCommand);
static bool read_ipc_socket_and_process(int socket, bool *remove_socket);
-static JsonNode * get_node_list_json(int id);
-static bool add_nodeinfo_to_json(JsonNode * jNode, WatchdogNode * node);
+static JsonNode *get_node_list_json(int id);
+static bool add_nodeinfo_to_json(JsonNode *jNode, WatchdogNode *node);
static bool fire_node_status_event(int nodeID, int nodeStatus);
static void resign_from_escalated_node(void);
static void start_escalated_node(void);
-static void init_wd_packet(WDPacketData * pkt);
-static void wd_packet_shallow_copy(WDPacketData * srcPkt, WDPacketData * dstPkt);
-static bool wd_commands_packet_processor(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt);
-
-static WDCommandData * get_wd_command_from_reply(List *commands, WDPacketData * pkt);
-static WDCommandData * get_wd_cluster_command_from_reply(WDPacketData * pkt);
-static WDCommandData * get_wd_IPC_command_from_reply(WDPacketData * pkt);
-static WDCommandData * get_wd_IPC_command_from_socket(int sock);
-
-static IPC_CMD_PROCESS_RES process_IPC_command(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_nodeStatusChange_command(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_nodeList_command(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_get_runtime_variable_value_request(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_online_recovery(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_failover_indication(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_data_request_from_leader(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_failover_command(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_failover_command_on_coordinator(WDCommandData * ipcCommand);
-static IPC_CMD_PROCESS_RES process_IPC_execute_cluster_command(WDCommandData * ipcCommand);
-
-static bool write_ipc_command_with_result_data(WDCommandData * ipcCommand, char type, char *data, int len);
+static void init_wd_packet(WDPacketData *pkt);
+static void wd_packet_shallow_copy(WDPacketData *srcPkt, WDPacketData *dstPkt);
+static bool wd_commands_packet_processor(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt);
+
+static WDCommandData *get_wd_command_from_reply(List *commands, WDPacketData *pkt);
+static WDCommandData *get_wd_cluster_command_from_reply(WDPacketData *pkt);
+static WDCommandData *get_wd_IPC_command_from_reply(WDPacketData *pkt);
+static WDCommandData *get_wd_IPC_command_from_socket(int sock);
+
+static IPC_CMD_PROCESS_RES process_IPC_command(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_nodeStatusChange_command(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_nodeList_command(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_get_runtime_variable_value_request(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_online_recovery(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_failover_indication(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_data_request_from_leader(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_failover_command(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_failover_command_on_coordinator(WDCommandData *ipcCommand);
+static IPC_CMD_PROCESS_RES process_IPC_execute_cluster_command(WDCommandData *ipcCommand);
+
+static bool write_ipc_command_with_result_data(WDCommandData *ipcCommand, char type, char *data, int len);
static void process_wd_func_commands_for_timer_events(void);
-static void add_wd_command_for_timer_events(unsigned int expire_secs, bool need_tics, WDFunctionCommandData * wd_func_command);
-static bool reply_is_received_for_pgpool_replicate_command(WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * ipcCommand);
+static void add_wd_command_for_timer_events(unsigned int expire_secs, bool need_tics, WDFunctionCommandData *wd_func_command);
+static bool reply_is_received_for_pgpool_replicate_command(WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *ipcCommand);
-static void process_remote_online_recovery_command(WatchdogNode * wdNode, WDPacketData * pkt);
+static void process_remote_online_recovery_command(WatchdogNode *wdNode, WDPacketData *pkt);
-static WDFailoverCMDResults failover_end_indication(WDCommandData * ipcCommand);
-static WDFailoverCMDResults failover_start_indication(WDCommandData * ipcCommand);
+static WDFailoverCMDResults failover_end_indication(WDCommandData *ipcCommand);
+static WDFailoverCMDResults failover_start_indication(WDCommandData *ipcCommand);
static void wd_system_will_go_down(int code, Datum arg);
-static void verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config);
+static void verify_pool_configurations(WatchdogNode *wdNode, POOL_CONFIG *config);
-static bool get_authhash_for_node(WatchdogNode * wdNode, char *authhash);
-static bool verify_authhash_for_node(WatchdogNode * wdNode, char *authhash);
+static bool get_authhash_for_node(WatchdogNode *wdNode, char *authhash);
+static bool verify_authhash_for_node(WatchdogNode *wdNode, char *authhash);
-static void print_watchdog_node_info(WatchdogNode * wdNode);
+static void print_watchdog_node_info(WatchdogNode *wdNode);
static List *wd_create_recv_socket(int port);
static void wd_check_config(void);
static pid_t watchdog_main(void);
static pid_t fork_watchdog_child(void);
-static bool check_IPC_client_authentication(json_value * rootObj, bool internal_client_only);
-static bool check_and_report_IPC_authentication(WDCommandData * ipcCommand);
+static bool check_IPC_client_authentication(json_value *rootObj, bool internal_client_only);
+static bool check_and_report_IPC_authentication(WDCommandData *ipcCommand);
-static void print_packet_node_info(WDPacketData * pkt, WatchdogNode * wdNode, bool sending);
-static void print_packet_info(WDPacketData * pkt, bool sending);
+static void print_packet_node_info(WDPacketData *pkt, WatchdogNode *wdNode, bool sending);
+static void print_packet_info(WDPacketData *pkt, bool sending);
static void update_interface_status(void);
static bool any_interface_available(void);
-static WDPacketData * process_data_request(WatchdogNode * wdNode, WDPacketData * pkt);
+static WDPacketData *process_data_request(WatchdogNode *wdNode, WDPacketData *pkt);
-static WatchdogNode * getLeaderWatchdogNode(void);
-static void set_cluster_leader_node(WatchdogNode * wdNode);
+static WatchdogNode *getLeaderWatchdogNode(void);
+static void set_cluster_leader_node(WatchdogNode *wdNode);
static void clear_standby_nodes_list(void);
-static int standby_node_left_cluster(WatchdogNode * wdNode);
-static int standby_node_join_cluster(WatchdogNode * wdNode);
+static int standby_node_left_cluster(WatchdogNode *wdNode);
+static int standby_node_join_cluster(WatchdogNode *wdNode);
static void reset_lost_timers(void);
-static int update_cluster_memberships(void);
-static int revoke_cluster_membership_of_node(WatchdogNode* wdNode, WD_NODE_MEMBERSHIP_STATUS revoke_status);
-static int restore_cluster_membership_of_node(WatchdogNode* wdNode);
-static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear);
-static void wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt);
-static void update_failover_timeout(WatchdogNode * wdNode, POOL_CONFIG *pool_config);
+static int update_cluster_memberships(void);
+static int revoke_cluster_membership_of_node(WatchdogNode *wdNode, WD_NODE_MEMBERSHIP_STATUS revoke_status);
+static int restore_cluster_membership_of_node(WatchdogNode *wdNode);
+static void update_missed_beacon_count(WDCommandData *ipcCommand, bool clear);
+static void wd_execute_cluster_command_processor(WatchdogNode *wdNode, WDPacketData *pkt);
+static void update_failover_timeout(WatchdogNode *wdNode, POOL_CONFIG *pool_config);
+
/* global variables */
wd_cluster g_cluster;
struct timeval g_tm_set_time;
@@ -778,11 +784,12 @@ wd_cluster_initialize(void)
g_cluster.memberRemoteNodeCount = g_cluster.remoteNodeCount;
if (g_cluster.remoteNodeCount == 0)
ereport(ERROR,
- (errmsg("invalid watchdog configuration. other pgpools setting is not defined")));
+ (errmsg("invalid watchdog configuration. other pgpools setting is not defined")));
ereport(LOG,
(errmsg("watchdog cluster is configured with %d remote nodes", g_cluster.remoteNodeCount)));
g_cluster.remoteNodes = palloc0((sizeof(WatchdogNode) * g_cluster.remoteNodeCount));
- int idx = 0;
+ int idx = 0;
+
for (i = 0; i < pool_config->wd_nodes.num_wd; i++)
{
if (i == pool_config->pgpool_node_id)
@@ -792,9 +799,9 @@ wd_cluster_initialize(void)
g_cluster.remoteNodes[idx].pgpool_node_id = i;
g_cluster.remoteNodes[idx].pgpool_port = pool_config->wd_nodes.wd_node_info[i].pgpool_port;
strcpy(g_cluster.remoteNodes[idx].hostname, pool_config->wd_nodes.wd_node_info[i].hostname);
- g_cluster.remoteNodes[idx].delegate_ip[0] = '\0'; /* this will be
- * populated by remote
- * node */
+ g_cluster.remoteNodes[idx].delegate_ip[0] = '\0'; /* this will be
+ * populated by remote
+ * node */
ereport(LOG,
(errmsg("watchdog remote node:%d on %s:%d", idx, g_cluster.remoteNodes[idx].hostname, g_cluster.remoteNodes[idx].wd_port)));
@@ -849,7 +856,7 @@ wd_cluster_initialize(void)
}
static void
-clear_command_node_result(WDCommandNodeResult * nodeResult)
+clear_command_node_result(WDCommandNodeResult *nodeResult)
{
nodeResult->result_type = WD_NO_MESSAGE;
nodeResult->result_data = NULL;
@@ -905,10 +912,10 @@ wd_create_recv_socket(int port)
for (walk = res; walk != NULL; walk = walk->ai_next)
{
- bool bind_is_done;
- int bind_tries;
- int ret;
- char buf[INET6_ADDRSTRLEN + 1];
+ bool bind_is_done;
+ int bind_tries;
+ int ret;
+ char buf[INET6_ADDRSTRLEN + 1];
memset(buf, 0, sizeof(buf));
if ((ret = getnameinfo((struct sockaddr *) walk->ai_addr, walk->ai_addrlen,
@@ -1169,7 +1176,7 @@ try_connecting_with_all_unreachable_nodes(void)
* false is returned in case of failure
*/
static bool
-connect_to_node(WatchdogNode * wdNode)
+connect_to_node(WatchdogNode *wdNode)
{
bool connected = false;
@@ -1701,7 +1708,7 @@ read_sockets(fd_set *rmask, int pending_fds_count)
pkt = read_packet_of_type(conn, WD_ADD_NODE_MESSAGE);
if (pkt)
{
- struct timeval previous_startup_time;
+ struct timeval previous_startup_time;
char *authkey = NULL;
WatchdogNode *tempNode = parse_node_info_message(pkt, &authkey);
@@ -1727,13 +1734,14 @@ read_sockets(fd_set *rmask, int pending_fds_count)
/* verify this node */
if (authenticated)
{
- WD_STATES oldNodeState = WD_DEAD;
+ WD_STATES oldNodeState = WD_DEAD;
+
for (i = 0; i < g_cluster.remoteNodeCount; i++)
{
wdNode = &(g_cluster.remoteNodes[i]);
if ((wdNode->wd_port == tempNode->wd_port && wdNode->pgpool_port == tempNode->pgpool_port &&
- wdNode->pgpool_node_id == tempNode->pgpool_node_id) &&
+ wdNode->pgpool_node_id == tempNode->pgpool_node_id) &&
((strcmp(wdNode->hostname, conn->addr) == 0) || (strcmp(wdNode->hostname, tempNode->hostname) == 0)))
{
/* We have found the match */
@@ -1778,20 +1786,24 @@ read_sockets(fd_set *rmask, int pending_fds_count)
if (oldNodeState == WD_SHUTDOWN)
{
ereport(LOG,
- (errmsg("The newly joined node:\"%s\" had left the cluster because it was shutdown",wdNode->nodeName)));
+ (errmsg("The newly joined node:\"%s\" had left the cluster because it was shutdown", wdNode->nodeName)));
watchdog_state_machine(WD_EVENT_PACKET_RCV, wdNode, pkt, NULL);
}
else if (oldNodeState == WD_LOST)
{
ereport(LOG,
- (errmsg("The newly joined node:\"%s\" had left the cluster because it was lost",wdNode->nodeName),
+ (errmsg("The newly joined node:\"%s\" had left the cluster because it was lost", wdNode->nodeName),
errdetail("lost reason was \"%s\" and startup time diff = %d",
wd_node_lost_reasons[wdNode->node_lost_reason],
- abs((int)(previous_startup_time.tv_sec - wdNode->startup_time.tv_sec)))));
+ abs((int) (previous_startup_time.tv_sec - wdNode->startup_time.tv_sec)))));
watchdog_state_machine(WD_EVENT_PACKET_RCV, wdNode, pkt, NULL);
- /* Since the node was lost. Fire node found event as well */
+
+ /*
+ * Since the node was lost. Fire node found
+ * event as well
+ */
watchdog_state_machine(WD_EVENT_REMOTE_NODE_FOUND, wdNode, NULL, NULL);
}
@@ -1972,7 +1984,7 @@ read_sockets(fd_set *rmask, int pending_fds_count)
}
static bool
-write_ipc_command_with_result_data(WDCommandData * ipcCommand, char type, char *data, int len)
+write_ipc_command_with_result_data(WDCommandData *ipcCommand, char type, char *data, int len)
{
WDPacketData pkt;
@@ -1990,13 +2002,14 @@ write_ipc_command_with_result_data(WDCommandData * ipcCommand, char type, char *
/* DEBUG AID */
if (ipcCommand->commandSource == COMMAND_SOURCE_REMOTE &&
(check_debug_request_kill_all_senders() ||
- check_debug_request_kill_all_communication()))
+ check_debug_request_kill_all_communication()))
return false;
return write_packet_to_socket(ipcCommand->sourceIPCSocket, &pkt, true);
}
-static WDCommandData * create_command_object(int packet_data_length)
+static WDCommandData *
+create_command_object(int packet_data_length)
{
MemoryContext mCxt,
oldCxt;
@@ -2137,7 +2150,8 @@ read_ipc_socket_and_process(int sock, bool *remove_socket)
return (res != IPC_CMD_ERROR);
}
-static IPC_CMD_PROCESS_RES process_IPC_command(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_command(WDCommandData *ipcCommand)
{
/* authenticate the client first */
if (check_and_report_IPC_authentication(ipcCommand) == false)
@@ -2195,17 +2209,17 @@ static IPC_CMD_PROCESS_RES process_IPC_command(WDCommandData * ipcCommand)
}
static IPC_CMD_PROCESS_RES
-process_IPC_execute_cluster_command(WDCommandData * ipcCommand)
+process_IPC_execute_cluster_command(WDCommandData *ipcCommand)
{
/* get the json for node list */
- char *clusterCommand = NULL;
- List *args_list = NULL;
+ char *clusterCommand = NULL;
+ List *args_list = NULL;
if (ipcCommand->sourcePacket.len <= 0 || ipcCommand->sourcePacket.data == NULL)
return IPC_CMD_ERROR;
if (!parse_wd_exec_cluster_command_json(ipcCommand->sourcePacket.data, ipcCommand->sourcePacket.len,
- &clusterCommand, &args_list))
+ &clusterCommand, &args_list))
{
goto ERROR_EXIT;
}
@@ -2244,8 +2258,8 @@ process_IPC_execute_cluster_command(WDCommandData * ipcCommand)
/*
* Just broadcast the execute command request to destination node
- * Processing the command on the local node is the responsibility of caller
- * process
+ * Processing the command on the local node is the responsibility of
+ * caller process
*/
reply_with_message(NULL, WD_EXECUTE_COMMAND_REQUEST,
ipcCommand->sourcePacket.data, ipcCommand->sourcePacket.len,
@@ -2265,7 +2279,8 @@ ERROR_EXIT:
return IPC_CMD_ERROR;
}
-static IPC_CMD_PROCESS_RES process_IPC_get_runtime_variable_value_request(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_get_runtime_variable_value_request(WDCommandData *ipcCommand)
{
/* get the json for node list */
JsonNode *jNode = NULL;
@@ -2330,7 +2345,8 @@ static IPC_CMD_PROCESS_RES process_IPC_get_runtime_variable_value_request(WDComm
return IPC_CMD_COMPLETE;
}
-static IPC_CMD_PROCESS_RES process_IPC_nodeList_command(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_nodeList_command(WDCommandData *ipcCommand)
{
/* get the json for node list */
JsonNode *jNode = NULL;
@@ -2365,7 +2381,8 @@ static IPC_CMD_PROCESS_RES process_IPC_nodeList_command(WDCommandData * ipcComma
return IPC_CMD_COMPLETE;
}
-static IPC_CMD_PROCESS_RES process_IPC_nodeStatusChange_command(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_nodeStatusChange_command(WDCommandData *ipcCommand)
{
int nodeStatus;
int nodeID;
@@ -2462,7 +2479,7 @@ fire_node_status_event(int nodeID, int nodeStatus)
* Free the failover object
*/
static void
-remove_failover_object(WDFailoverObject * failoverObj)
+remove_failover_object(WDFailoverObject *failoverObj)
{
ereport(DEBUG1,
(errmsg("removing failover request from %d nodes with ID:%d", failoverObj->request_count, failoverObj->failoverID)));
@@ -2516,24 +2533,32 @@ service_expired_failovers(void)
{
failovers_to_del = lappend(failovers_to_del, failoverObj);
ereport(DEBUG1,
- (errmsg("failover request from %d nodes with ID:%d is expired", failoverObj->request_count, failoverObj->failoverID),
+ (errmsg("failover request from %d nodes with ID:%d is expired", failoverObj->request_count, failoverObj->failoverID),
errdetail("marking the failover object for removal")));
if (!need_to_resign && failoverObj->reqKind == NODE_DOWN_REQUEST)
{
ListCell *lc;
- /* search the in the requesting node list if we are also the ones
- * who think the failover must have been done
+
+ /*
+ * search the in the requesting node list if we are also
+ * the ones who think the failover must have been done
*/
foreach(lc, failoverObj->requestingNodes)
{
WatchdogNode *reqWdNode = lfirst(lc);
+
if (g_cluster.localNode == reqWdNode)
{
- /* verify if that node requested by us is now quarantined */
- int i;
+ /*
+ * verify if that node requested by us is now
+ * quarantined
+ */
+ int i;
+
for (i = 0; i < failoverObj->nodesCount; i++)
{
- int node_id = failoverObj->nodeList[i];
+ int node_id = failoverObj->nodeList[i];
+
if (node_id != -1)
{
if (Req_info->primary_node_id == -1 &&
@@ -2584,7 +2609,8 @@ does_int_array_contains_value(int *intArray, int count, int value)
return false;
}
-static WDFailoverObject * get_failover_object(POOL_REQUEST_KIND reqKind, int nodesCount, int *nodeList)
+static WDFailoverObject *
+get_failover_object(POOL_REQUEST_KIND reqKind, int nodesCount, int *nodeList)
{
ListCell *lc;
@@ -2616,7 +2642,7 @@ static WDFailoverObject * get_failover_object(POOL_REQUEST_KIND reqKind, int nod
}
static void
-process_remote_failover_command_on_coordinator(WatchdogNode * wdNode, WDPacketData * pkt)
+process_remote_failover_command_on_coordinator(WatchdogNode *wdNode, WDPacketData *pkt)
{
if (get_local_node_state() != WD_COORDINATOR)
{
@@ -2661,7 +2687,7 @@ process_remote_failover_command_on_coordinator(WatchdogNode * wdNode, WDPacketDa
}
static bool
-reply_to_failover_command(WDCommandData * ipcCommand, WDFailoverCMDResults cmdResult, unsigned int failoverID)
+reply_to_failover_command(WDCommandData *ipcCommand, WDFailoverCMDResults cmdResult, unsigned int failoverID)
{
bool ret = false;
JsonNode *jNode = jw_create_with_object(true);
@@ -2696,7 +2722,8 @@ reply_to_failover_command(WDCommandData * ipcCommand, WDFailoverCMDResults cmdRe
* about the execution of failover command.
*/
-static WDFailoverCMDResults compute_failover_consensus(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, unsigned char *flags, WatchdogNode * wdNode)
+static WDFailoverCMDResults
+compute_failover_consensus(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, unsigned char *flags, WatchdogNode *wdNode)
{
#ifndef NODE_UP_REQUIRE_CONSENSUS
if (reqKind == NODE_UP_REQUEST)
@@ -2741,8 +2768,8 @@ static WDFailoverCMDResults compute_failover_consensus(POOL_REQUEST_KIND reqKind
}
/*
- * So we reached here means quorum is present Now come to difficult part of
- * ensuring the consensus
+ * So we reached here means quorum is present Now come to difficult part
+ * of ensuring the consensus
*/
if (pool_config->failover_require_consensus == true)
{
@@ -2783,8 +2810,9 @@ static WDFailoverCMDResults compute_failover_consensus(POOL_REQUEST_KIND reqKind
return FAILOVER_RES_PROCEED;
}
-static WDFailoverObject * add_failover(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, WatchdogNode * wdNode,
- unsigned char flags, bool *duplicate)
+static WDFailoverObject *
+add_failover(POOL_REQUEST_KIND reqKind, int *node_id_list, int node_count, WatchdogNode *wdNode,
+ unsigned char flags, bool *duplicate)
{
MemoryContext oldCxt;
@@ -2853,7 +2881,8 @@ static WDFailoverObject * add_failover(POOL_REQUEST_KIND reqKind, int *node_id_l
/*
* The function processes all failover commands on leader node
*/
-static IPC_CMD_PROCESS_RES process_failover_command_on_coordinator(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_failover_command_on_coordinator(WDCommandData *ipcCommand)
{
char *func_name;
int node_count = 0;
@@ -2967,7 +2996,8 @@ static IPC_CMD_PROCESS_RES process_failover_command_on_coordinator(WDCommandData
return IPC_CMD_COMPLETE;
}
-static IPC_CMD_PROCESS_RES process_IPC_failover_command(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_failover_command(WDCommandData *ipcCommand)
{
if (is_local_node_true_leader())
{
@@ -3014,7 +3044,8 @@ static IPC_CMD_PROCESS_RES process_IPC_failover_command(WDCommandData * ipcComma
return IPC_CMD_ERROR;
}
-static IPC_CMD_PROCESS_RES process_IPC_online_recovery(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_online_recovery(WDCommandData *ipcCommand)
{
if (get_local_node_state() == WD_STANDBY ||
get_local_node_state() == WD_COORDINATOR)
@@ -3052,7 +3083,8 @@ static IPC_CMD_PROCESS_RES process_IPC_online_recovery(WDCommandData * ipcComman
return IPC_CMD_TRY_AGAIN;
}
-static IPC_CMD_PROCESS_RES process_IPC_data_request_from_leader(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_data_request_from_leader(WDCommandData *ipcCommand)
{
/*
* if cluster or myself is not in stable state just return cluster in
@@ -3109,7 +3141,8 @@ static IPC_CMD_PROCESS_RES process_IPC_data_request_from_leader(WDCommandData *
return IPC_CMD_TRY_AGAIN;
}
-static IPC_CMD_PROCESS_RES process_IPC_failover_indication(WDCommandData * ipcCommand)
+static IPC_CMD_PROCESS_RES
+process_IPC_failover_indication(WDCommandData *ipcCommand)
{
WDFailoverCMDResults res = FAILOVER_RES_NOT_ALLOWED;
@@ -3189,7 +3222,7 @@ static IPC_CMD_PROCESS_RES process_IPC_failover_indication(WDCommandData * ipcCo
* only the local node that is a leader can start the failover.
*/
static WDFailoverCMDResults
-failover_start_indication(WDCommandData * ipcCommand)
+failover_start_indication(WDCommandData *ipcCommand)
{
ereport(LOG,
(errmsg("watchdog is informed of failover start by the main process")));
@@ -3217,7 +3250,7 @@ failover_start_indication(WDCommandData * ipcCommand)
}
static WDFailoverCMDResults
-failover_end_indication(WDCommandData * ipcCommand)
+failover_end_indication(WDCommandData *ipcCommand)
{
ereport(LOG,
(errmsg("watchdog is informed of failover end by the main process")));
@@ -3243,7 +3276,8 @@ failover_end_indication(WDCommandData * ipcCommand)
return FAILOVER_RES_TRANSITION;
}
-static WatchdogNode * parse_node_info_message(WDPacketData * pkt, char **authkey)
+static WatchdogNode *
+parse_node_info_message(WDPacketData *pkt, char **authkey)
{
if (pkt == NULL || (pkt->type != WD_ADD_NODE_MESSAGE && pkt->type != WD_INFO_MESSAGE))
return NULL;
@@ -3252,12 +3286,14 @@ static WatchdogNode * parse_node_info_message(WDPacketData * pkt, char **authkey
return get_watchdog_node_from_json(pkt->data, pkt->len, authkey);
}
-static WDPacketData * read_packet(SocketConnection * conn)
+static WDPacketData *
+read_packet(SocketConnection *conn)
{
return read_packet_of_type(conn, WD_NO_MESSAGE);
}
-static WDPacketData * read_packet_of_type(SocketConnection * conn, char ensure_type)
+static WDPacketData *
+read_packet_of_type(SocketConnection *conn, char ensure_type)
{
char type;
int len;
@@ -3407,7 +3443,7 @@ static void
wd_system_will_go_down(int code, Datum arg)
{
int i;
- ListCell *lc;
+ ListCell *lc;
ereport(LOG,
(errmsg("Watchdog is shutting down")));
@@ -3450,7 +3486,7 @@ wd_system_will_go_down(int code, Datum arg)
}
static void
-close_socket_connection(SocketConnection * conn)
+close_socket_connection(SocketConnection *conn)
{
if ((conn->sock > 0 && conn->sock_state == WD_SOCK_CONNECTED)
|| conn->sock_state == WD_SOCK_WAITING_FOR_CONNECT)
@@ -3462,14 +3498,14 @@ close_socket_connection(SocketConnection * conn)
}
static bool
-is_socket_connection_connected(SocketConnection * conn)
+is_socket_connection_connected(SocketConnection *conn)
{
return (conn->sock > 0 && conn->sock_state == WD_SOCK_CONNECTED);
}
static bool
-is_node_reachable(WatchdogNode * wdNode)
+is_node_reachable(WatchdogNode *wdNode)
{
if (is_socket_connection_connected(&wdNode->client_socket))
return true;
@@ -3479,7 +3515,7 @@ is_node_reachable(WatchdogNode * wdNode)
}
static bool
-is_node_active(WatchdogNode * wdNode)
+is_node_active(WatchdogNode *wdNode)
{
if (wdNode->state == WD_DEAD || wdNode->state == WD_LOST || wdNode->state == WD_SHUTDOWN)
return false;
@@ -3487,7 +3523,7 @@ is_node_active(WatchdogNode * wdNode)
}
static bool
-is_node_active_and_reachable(WatchdogNode * wdNode)
+is_node_active_and_reachable(WatchdogNode *wdNode)
{
if (is_node_active(wdNode))
return is_node_reachable(wdNode);
@@ -3656,7 +3692,7 @@ update_successful_outgoing_cons(fd_set *wmask, int pending_fds_count)
}
static bool
-write_packet_to_socket(int sock, WDPacketData * pkt, bool ipcPacket)
+write_packet_to_socket(int sock, WDPacketData *pkt, bool ipcPacket)
{
int ret = 0;
int command_id,
@@ -3719,7 +3755,7 @@ write_packet_to_socket(int sock, WDPacketData * pkt, bool ipcPacket)
}
static void
-wd_packet_shallow_copy(WDPacketData * srcPkt, WDPacketData * dstPkt)
+wd_packet_shallow_copy(WDPacketData *srcPkt, WDPacketData *dstPkt)
{
dstPkt->command_id = srcPkt->command_id;
dstPkt->data = srcPkt->data;
@@ -3728,13 +3764,14 @@ wd_packet_shallow_copy(WDPacketData * srcPkt, WDPacketData * dstPkt)
}
static void
-init_wd_packet(WDPacketData * pkt)
+init_wd_packet(WDPacketData *pkt)
{
pkt->len = 0;
pkt->data = NULL;
}
-static WDPacketData * get_empty_packet(void)
+static WDPacketData *
+get_empty_packet(void)
{
WDPacketData *pkt = palloc0(sizeof(WDPacketData));
@@ -3742,7 +3779,7 @@ static WDPacketData * get_empty_packet(void)
}
static void
-free_packet(WDPacketData * pkt)
+free_packet(WDPacketData *pkt)
{
if (pkt)
{
@@ -3753,25 +3790,25 @@ free_packet(WDPacketData * pkt)
}
static void
-set_message_type(WDPacketData * pkt, char type)
+set_message_type(WDPacketData *pkt, char type)
{
pkt->type = type;
}
static void
-set_message_commandID(WDPacketData * pkt, unsigned int commandID)
+set_message_commandID(WDPacketData *pkt, unsigned int commandID)
{
pkt->command_id = commandID;
}
static void
-set_next_commandID_in_message(WDPacketData * pkt)
+set_next_commandID_in_message(WDPacketData *pkt)
{
set_message_commandID(pkt, get_next_commandID());
}
static void
-set_message_data(WDPacketData * pkt, const char *data, int len)
+set_message_data(WDPacketData *pkt, const char *data, int len)
{
pkt->data = (char *) data;
pkt->len = len;
@@ -3782,7 +3819,7 @@ set_message_data(WDPacketData * pkt, const char *data, int len)
#define NotSet "Not_Set"
static bool
-add_nodeinfo_to_json(JsonNode * jNode, WatchdogNode * node)
+add_nodeinfo_to_json(JsonNode *jNode, WatchdogNode *node)
{
jw_start_object(jNode, "WatchdogNode");
@@ -3803,7 +3840,8 @@ add_nodeinfo_to_json(JsonNode * jNode, WatchdogNode * node)
return true;
}
-static JsonNode * get_node_list_json(int id)
+static JsonNode *
+get_node_list_json(int id)
{
int i;
JsonNode *jNode = jw_create_with_object(true);
@@ -3865,7 +3903,8 @@ static JsonNode * get_node_list_json(int id)
return jNode;
}
-static WDPacketData * get_beacon_message(char type, WDPacketData * replyFor)
+static WDPacketData *
+get_beacon_message(char type, WDPacketData *replyFor)
{
WDPacketData *message = get_empty_packet();
char *json_data;
@@ -3883,7 +3922,8 @@ static WDPacketData * get_beacon_message(char type, WDPacketData * replyFor)
return message;
}
-static WDPacketData * get_addnode_message(void)
+static WDPacketData *
+get_addnode_message(void)
{
char authhash[WD_AUTH_HASH_LEN + 1];
WDPacketData *message = get_empty_packet();
@@ -3896,7 +3936,8 @@ static WDPacketData * get_addnode_message(void)
return message;
}
-static WDPacketData * get_mynode_info_message(WDPacketData * replyFor)
+static WDPacketData *
+get_mynode_info_message(WDPacketData *replyFor)
{
char authhash[WD_AUTH_HASH_LEN + 1];
WDPacketData *message = get_empty_packet();
@@ -3913,7 +3954,8 @@ static WDPacketData * get_mynode_info_message(WDPacketData * replyFor)
return message;
}
-static WDPacketData * get_minimum_message(char type, WDPacketData * replyFor)
+static WDPacketData *
+get_minimum_message(char type, WDPacketData *replyFor)
{
/* TODO it is a waste of space */
WDPacketData *message = get_empty_packet();
@@ -3926,16 +3968,19 @@ static WDPacketData * get_minimum_message(char type, WDPacketData * replyFor)
return message;
}
-static WDCommandData * get_wd_IPC_command_from_reply(WDPacketData * pkt)
+static WDCommandData *
+get_wd_IPC_command_from_reply(WDPacketData *pkt)
{
return get_wd_command_from_reply(g_cluster.ipc_commands, pkt);
}
-static WDCommandData * get_wd_cluster_command_from_reply(WDPacketData * pkt)
+static WDCommandData *
+get_wd_cluster_command_from_reply(WDPacketData *pkt)
{
return get_wd_command_from_reply(g_cluster.clusterCommands, pkt);
}
-static WDCommandData * get_wd_command_from_reply(List *commands, WDPacketData * pkt)
+static WDCommandData *
+get_wd_command_from_reply(List *commands, WDPacketData *pkt)
{
ListCell *lc;
@@ -3960,7 +4005,8 @@ static WDCommandData * get_wd_command_from_reply(List *commands, WDPacketData *
return NULL;
}
-static WDCommandData * get_wd_IPC_command_from_socket(int sock)
+static WDCommandData *
+get_wd_IPC_command_from_socket(int sock)
{
ListCell *lc;
@@ -3982,7 +4028,7 @@ static WDCommandData * get_wd_IPC_command_from_socket(int sock)
static void
-cleanUpIPCCommand(WDCommandData * ipcCommand)
+cleanUpIPCCommand(WDCommandData *ipcCommand)
{
/*
* close the socket associated with ipcCommand and remove it from
@@ -4005,7 +4051,8 @@ cleanUpIPCCommand(WDCommandData * ipcCommand)
MemoryContextDelete(ipcCommand->memoryContext);
}
-static WDPacketData * process_data_request(WatchdogNode * wdNode, WDPacketData * pkt)
+static WDPacketData *
+process_data_request(WatchdogNode *wdNode, WDPacketData *pkt)
{
char *request_type;
char *data = NULL;
@@ -4052,7 +4099,7 @@ static WDPacketData * process_data_request(WatchdogNode * wdNode, WDPacketData *
}
static void
-cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt)
+cluster_service_message_processor(WatchdogNode *wdNode, WDPacketData *pkt)
{
if (pkt->type != WD_CLUSTER_SERVICE_MESSAGE)
return;
@@ -4165,29 +4212,29 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt)
break;
case CLUSTER_NODE_REQUIRE_TO_RELOAD:
- {
- watchdog_state_machine(WD_EVENT_WD_STATE_REQUIRE_RELOAD, NULL, NULL, NULL);
- }
+ {
+ watchdog_state_machine(WD_EVENT_WD_STATE_REQUIRE_RELOAD, NULL, NULL, NULL);
+ }
break;
case CLUSTER_NODE_APPEARING_LOST:
- {
- ereport(LOG,
- (errmsg("remote node \"%s\" is reporting that it has lost us",
- wdNode->nodeName)));
- wdNode->has_lost_us = true;
- watchdog_state_machine(WD_EVENT_I_AM_APPEARING_LOST, wdNode, NULL, NULL);
- }
+ {
+ ereport(LOG,
+ (errmsg("remote node \"%s\" is reporting that it has lost us",
+ wdNode->nodeName)));
+ wdNode->has_lost_us = true;
+ watchdog_state_machine(WD_EVENT_I_AM_APPEARING_LOST, wdNode, NULL, NULL);
+ }
break;
case CLUSTER_NODE_APPEARING_FOUND:
- {
- ereport(LOG,
- (errmsg("remote node \"%s\" is reporting that it has found us again",
- wdNode->nodeName)));
- wdNode->has_lost_us = false;
- watchdog_state_machine(WD_EVENT_I_AM_APPEARING_FOUND, wdNode, NULL, NULL);
- }
+ {
+ ereport(LOG,
+ (errmsg("remote node \"%s\" is reporting that it has found us again",
+ wdNode->nodeName)));
+ wdNode->has_lost_us = false;
+ watchdog_state_machine(WD_EVENT_I_AM_APPEARING_FOUND, wdNode, NULL, NULL);
+ }
break;
case CLUSTER_NODE_INVALID_VERSION:
@@ -4208,11 +4255,11 @@ cluster_service_message_processor(WatchdogNode * wdNode, WDPacketData * pkt)
}
static void
-wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
+wd_execute_cluster_command_processor(WatchdogNode *wdNode, WDPacketData *pkt)
{
/* get the json for node list */
- char *clusterCommand = NULL;
- List *args_list = NULL;
+ char *clusterCommand = NULL;
+ List *args_list = NULL;
if (pkt->type != WD_EXECUTE_COMMAND_REQUEST)
return;
@@ -4225,7 +4272,7 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
}
if (!parse_wd_exec_cluster_command_json(pkt->data, pkt->len,
- &clusterCommand, &args_list))
+ &clusterCommand, &args_list))
{
ereport(LOG,
(errmsg("node \"%s\" sent an invalid JSON data in cluster command message", wdNode->nodeName)));
@@ -4233,14 +4280,16 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
}
ereport(DEBUG1,
- (errmsg("received \"%s\" command from node \"%s\"",clusterCommand, wdNode->nodeName)));
+ (errmsg("received \"%s\" command from node \"%s\"", clusterCommand, wdNode->nodeName)));
if (strcasecmp(WD_COMMAND_SHUTDOWN_CLUSTER, clusterCommand) == 0)
{
- char mode = 's';
+ char mode = 's';
ListCell *lc;
+
foreach(lc, args_list)
{
WDExecCommandArg *wdExecCommandArg = lfirst(lc);
+
if (strcmp(wdExecCommandArg->arg_name, "mode") == 0)
{
mode = wdExecCommandArg->arg_value[0];
@@ -4268,16 +4317,19 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
}
else if (strcasecmp(WD_COMMAND_LOCK_ON_STANDBY, clusterCommand) == 0)
{
- int lock_type = -1;
- char *operation = NULL;
+ int lock_type = -1;
+ char *operation = NULL;
+
if (get_local_node_state() == WD_STANDBY && wdNode->state == WD_COORDINATOR)
{
if (list_length(args_list) == 2)
{
ListCell *lc;
+
foreach(lc, args_list)
{
WDExecCommandArg *wdExecCommandArg = lfirst(lc);
+
if (strcmp(wdExecCommandArg->arg_name, "StandbyLockType") == 0)
{
lock_type = atoi(wdExecCommandArg->arg_value);
@@ -4299,7 +4351,7 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
else if (lock_type == WD_FOLLOW_PRIMARY_LOCK)
{
ereport(LOG,
- (errmsg("processing follow primary looking[%s] request from remote node \"%s\"", operation,wdNode->nodeName)));
+ (errmsg("processing follow primary looking[%s] request from remote node \"%s\"", operation, wdNode->nodeName)));
if (strcasecmp("acquire", operation) == 0)
pool_acquire_follow_primary_lock(false, true);
@@ -4318,7 +4370,7 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
else
{
ereport(LOG,
- (errmsg("invalid arguments in 'LOCK ON STANDBY' command from remote node \"%s\"", wdNode->nodeName)));
+ (errmsg("invalid arguments in 'LOCK ON STANDBY' command from remote node \"%s\"", wdNode->nodeName)));
}
}
else if (get_local_node_state() != WD_STANDBY)
@@ -4337,7 +4389,7 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
else
{
ereport(WARNING,
- (errmsg("received \"%s\" command from node \"%s\" is not supported",clusterCommand, wdNode->nodeName)));
+ (errmsg("received \"%s\" command from node \"%s\" is not supported", clusterCommand, wdNode->nodeName)));
}
if (args_list)
@@ -4347,7 +4399,7 @@ wd_execute_cluster_command_processor(WatchdogNode * wdNode, WDPacketData * pkt)
}
static int
-standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt)
+standard_packet_processor(WatchdogNode *wdNode, WDPacketData *pkt)
{
WDPacketData *replyPkt = NULL;
@@ -4508,21 +4560,19 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt)
if (oldNodeState == WD_LOST)
{
/*
- * We have received the message from lost node
- * add it back to cluster if it was not marked by
- * life-check
- * Node lost by life-check processes can only be
- * added back when we get alive notification for the
- * node from life-check
+ * We have received the message from lost node add it back
+ * to cluster if it was not marked by life-check Node lost
+ * by life-check processes can only be added back when we
+ * get alive notification for the node from life-check
*/
ereport(LOG,
- (errmsg("we have received the NODE INFO message from the node:\"%s\" that was lost",wdNode->nodeName),
- errdetail("we had lost this node because of \"%s\"",wd_node_lost_reasons[wdNode->node_lost_reason])));
+ (errmsg("we have received the NODE INFO message from the node:\"%s\" that was lost", wdNode->nodeName),
+ errdetail("we had lost this node because of \"%s\"", wd_node_lost_reasons[wdNode->node_lost_reason])));
if (wdNode->node_lost_reason == NODE_LOST_BY_LIFECHECK)
{
ereport(LOG,
- (errmsg("node:\"%s\" was reported lost by the life-check process",wdNode->nodeName),
+ (errmsg("node:\"%s\" was reported lost by the life-check process", wdNode->nodeName),
errdetail("node will be added to cluster once life-check mark it as reachable again")));
/* restore the node's lost state */
wdNode->state = oldNodeState;
@@ -4571,10 +4621,10 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt)
replyPkt = get_mynode_info_message(pkt);
beacon_message_received_from_node(wdNode, pkt);
}
+
/*
- * if (WD_LEADER_NODE == NULL)
- * do not reply to beacon if we are not connected to
- * any leader node
+ * if (WD_LEADER_NODE == NULL) do not reply to beacon if we
+ * are not connected to any leader node
*/
}
break;
@@ -4594,7 +4644,7 @@ standard_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt)
static bool
-send_message_to_connection(SocketConnection * conn, WDPacketData * pkt)
+send_message_to_connection(SocketConnection *conn, WDPacketData *pkt)
{
if (check_debug_request_kill_all_communication() == true ||
check_debug_request_kill_all_senders() == true)
@@ -4613,7 +4663,7 @@ send_message_to_connection(SocketConnection * conn, WDPacketData * pkt)
}
static bool
-send_message_to_node(WatchdogNode * wdNode, WDPacketData * pkt)
+send_message_to_node(WatchdogNode *wdNode, WDPacketData *pkt)
{
bool ret;
@@ -4654,7 +4704,7 @@ send_message_to_node(WatchdogNode * wdNode, WDPacketData * pkt)
* Returns the number of nodes the message is sent to
*/
static int
-send_message(WatchdogNode * wdNode, WDPacketData * pkt)
+send_message(WatchdogNode *wdNode, WDPacketData *pkt)
{
int i,
count = 0;
@@ -4678,7 +4728,8 @@ send_message(WatchdogNode * wdNode, WDPacketData * pkt)
return count;
}
-static IPC_CMD_PROCESS_RES wd_command_processor_for_node_lost_event(WDCommandData * ipcCommand, WatchdogNode * wdLostNode)
+static IPC_CMD_PROCESS_RES
+wd_command_processor_for_node_lost_event(WDCommandData *ipcCommand, WatchdogNode *wdLostNode)
{
if (ipcCommand->sendToNode)
{
@@ -4740,7 +4791,7 @@ static IPC_CMD_PROCESS_RES wd_command_processor_for_node_lost_event(WDCommandDat
}
static void
-wd_command_is_complete(WDCommandData * ipcCommand)
+wd_command_is_complete(WDCommandData *ipcCommand)
{
if (ipcCommand->commandCompleteFunc)
{
@@ -4788,7 +4839,7 @@ wd_command_is_complete(WDCommandData * ipcCommand)
static void
-node_lost_while_ipc_command(WatchdogNode * wdNode)
+node_lost_while_ipc_command(WatchdogNode *wdNode)
{
List *ipcCommands_to_del = NIL;
ListCell *lc;
@@ -4924,13 +4975,14 @@ service_unreachable_nodes(void)
if (wdNode->state == WD_LOST && wdNode->membership_status == WD_NODE_MEMBERSHIP_ACTIVE
&& pool_config->wd_lost_node_removal_timeout)
{
- int lost_seconds = WD_TIME_DIFF_SEC(currTime, wdNode->lost_time);
+ int lost_seconds = WD_TIME_DIFF_SEC(currTime, wdNode->lost_time);
+
if (lost_seconds >= pool_config->wd_lost_node_removal_timeout)
{
ereport(LOG,
- (errmsg("remote node \"%s\" is lost for %d seconds", wdNode->nodeName,lost_seconds),
+ (errmsg("remote node \"%s\" is lost for %d seconds", wdNode->nodeName, lost_seconds),
errdetail("revoking the node's membership")));
- revoke_cluster_membership_of_node(wdNode,WD_NODE_REVOKED_LOST);
+ revoke_cluster_membership_of_node(wdNode, WD_NODE_REVOKED_LOST);
}
continue;
}
@@ -4938,13 +4990,14 @@ service_unreachable_nodes(void)
if (wdNode->state == WD_DEAD && wdNode->membership_status == WD_NODE_MEMBERSHIP_ACTIVE
&& pool_config->wd_no_show_node_removal_timeout)
{
- int no_show_seconds = WD_TIME_DIFF_SEC(currTime, g_cluster.localNode->startup_time);
+ int no_show_seconds = WD_TIME_DIFF_SEC(currTime, g_cluster.localNode->startup_time);
+
if (no_show_seconds >= pool_config->wd_no_show_node_removal_timeout)
{
ereport(LOG,
- (errmsg("remote node \"%s\" didn't showed-up in %d seconds", wdNode->nodeName,no_show_seconds),
+ (errmsg("remote node \"%s\" didn't showed-up in %d seconds", wdNode->nodeName, no_show_seconds),
errdetail("revoking the node's membership")));
- revoke_cluster_membership_of_node(wdNode,WD_NODE_REVOKED_NO_SHOW);
+ revoke_cluster_membership_of_node(wdNode, WD_NODE_REVOKED_NO_SHOW);
}
continue;
}
@@ -4970,7 +5023,7 @@ service_unreachable_nodes(void)
else if (wdNode->sending_failures_count > MAX_ALLOWED_SEND_FAILURES)
{
ereport(LOG,
- (errmsg("not able to send messages to remote node \"%s\"",wdNode->nodeName),
+ (errmsg("not able to send messages to remote node \"%s\"", wdNode->nodeName),
errdetail("marking the node as lost")));
/* mark the node as lost */
wdNode->node_lost_reason = NODE_LOST_BY_SEND_FAILURE;
@@ -4979,11 +5032,11 @@ service_unreachable_nodes(void)
else if (wdNode->missed_beacon_count > MAX_ALLOWED_BEACON_REPLY_MISS)
{
ereport(LOG,
- (errmsg("remote node \"%s\" is not responding to our beacon messages",wdNode->nodeName),
+ (errmsg("remote node \"%s\" is not responding to our beacon messages", wdNode->nodeName),
errdetail("marking the node as lost")));
/* mark the node as lost */
wdNode->node_lost_reason = NODE_LOST_BY_MISSING_BEACON;
- wdNode->missed_beacon_count = 0; /* Reset the counter */
+ wdNode->missed_beacon_count = 0; /* Reset the counter */
watchdog_state_machine(WD_EVENT_REMOTE_NODE_LOST, wdNode, NULL, NULL);
}
}
@@ -4999,7 +5052,7 @@ service_unreachable_nodes(void)
}
static bool
-watchdog_internal_command_packet_processor(WatchdogNode * wdNode, WDPacketData * pkt)
+watchdog_internal_command_packet_processor(WatchdogNode *wdNode, WDPacketData *pkt)
{
int i;
WDCommandNodeResult *nodeResult = NULL;
@@ -5021,6 +5074,7 @@ watchdog_internal_command_packet_processor(WatchdogNode * wdNode, WDPacketData *
for (i = 0; i < g_cluster.remoteNodeCount; i++)
{
WDCommandNodeResult *nodeRes = &clusterCommand->nodeResults[i];
+
if (nodeRes->wdNode == wdNode)
{
nodeResult = nodeRes;
@@ -5060,7 +5114,10 @@ watchdog_internal_command_packet_processor(WatchdogNode * wdNode, WDPacketData *
}
else if (pkt->type == WD_REJECT_MESSAGE || pkt->type == WD_ERROR_MESSAGE)
{
- /* Error or reject message by any node immediately finishes the command */
+ /*
+ * Error or reject message by any node immediately finishes the
+ * command
+ */
ereport(DEBUG1,
(errmsg("command %c with command id %d is finished with COMMAND_FINISHED_NODE_REJECTED", pkt->type, pkt->command_id)));
clusterCommand->commandStatus = COMMAND_FINISHED_NODE_REJECTED;
@@ -5120,7 +5177,7 @@ check_for_current_command_timeout(void)
* Returns the number of nodes the message is sent to
*/
static int
-issue_watchdog_internal_command(WatchdogNode * wdNode, WDPacketData * pkt, int timeout_sec)
+issue_watchdog_internal_command(WatchdogNode *wdNode, WDPacketData *pkt, int timeout_sec)
{
int i;
bool save_message = false;
@@ -5291,7 +5348,8 @@ get_cluster_node_count(void)
return count;
}
-static WDPacketData * get_message_of_type(char type, WDPacketData * replyFor)
+static WDPacketData *
+get_message_of_type(char type, WDPacketData *replyFor)
{
WDPacketData *pkt = NULL;
@@ -5327,7 +5385,7 @@ static WDPacketData * get_message_of_type(char type, WDPacketData * replyFor)
}
static int
-send_message_of_type(WatchdogNode * wdNode, char type, WDPacketData * replyFor)
+send_message_of_type(WatchdogNode *wdNode, char type, WDPacketData *replyFor)
{
int ret = -1;
WDPacketData *pkt = get_message_of_type(type, replyFor);
@@ -5341,7 +5399,7 @@ send_message_of_type(WatchdogNode * wdNode, char type, WDPacketData * replyFor)
}
static int
-send_cluster_command(WatchdogNode * wdNode, char type, int timeout_sec)
+send_cluster_command(WatchdogNode *wdNode, char type, int timeout_sec)
{
int ret = -1;
WDPacketData *pkt = get_message_of_type(type, NULL);
@@ -5355,7 +5413,7 @@ send_cluster_command(WatchdogNode * wdNode, char type, int timeout_sec)
}
static bool
-reply_with_minimal_message(WatchdogNode * wdNode, char type, WDPacketData * replyFor)
+reply_with_minimal_message(WatchdogNode *wdNode, char type, WDPacketData *replyFor)
{
WDPacketData *pkt = get_minimum_message(type, replyFor);
int ret = send_message(wdNode, pkt);
@@ -5365,7 +5423,7 @@ reply_with_minimal_message(WatchdogNode * wdNode, char type, WDPacketData * repl
}
static bool
-send_cluster_service_message(WatchdogNode * wdNode, WDPacketData * replyFor, char message)
+send_cluster_service_message(WatchdogNode *wdNode, WDPacketData *replyFor, char message)
{
/* Check if its a broadcast message */
if (wdNode == NULL)
@@ -5374,17 +5432,18 @@ send_cluster_service_message(WatchdogNode * wdNode, WDPacketData * replyFor, cha
if (message == g_cluster.last_bcast_srv_msg)
{
struct timeval currTime;
+
gettimeofday(&currTime, NULL);
- int last_bcast_sec = WD_TIME_DIFF_SEC(currTime, g_cluster.last_bcast_srv_msg_time);
+ int last_bcast_sec = WD_TIME_DIFF_SEC(currTime, g_cluster.last_bcast_srv_msg_time);
+
if (last_bcast_sec < MIN_SECS_BETWEEN_BROADCAST_SRV_MSG)
{
/*
- * do not broadcast this message
- * to prevent flooding
+ * do not broadcast this message to prevent flooding
*/
ereport(DEBUG4,
- (errmsg("not broadcasting cluster service message %c to prevent flooding ",message),
- errdetail("last time same message was sent %d seconds ago",last_bcast_sec)));
+ (errmsg("not broadcasting cluster service message %c to prevent flooding ", message),
+ errdetail("last time same message was sent %d seconds ago", last_bcast_sec)));
return true;
}
}
@@ -5396,7 +5455,7 @@ send_cluster_service_message(WatchdogNode * wdNode, WDPacketData * replyFor, cha
static bool
-reply_with_message(WatchdogNode * wdNode, char type, char *data, int data_len, WDPacketData * replyFor)
+reply_with_message(WatchdogNode *wdNode, char type, char *data, int data_len, WDPacketData *replyFor)
{
WDPacketData wdPacket;
int ret;
@@ -5414,7 +5473,8 @@ reply_with_message(WatchdogNode * wdNode, char type, char *data, int data_len, W
return ret;
}
-static inline WD_STATES get_local_node_state(void)
+static inline WD_STATES
+get_local_node_state(void)
{
return g_cluster.localNode->state;
}
@@ -5430,7 +5490,7 @@ is_local_node_true_leader(void)
* processor and no further action is required
*/
static bool
-wd_commands_packet_processor(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt)
+wd_commands_packet_processor(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt)
{
WDCommandData *ipcCommand;
@@ -5583,7 +5643,7 @@ any_interface_available(void)
}
static int
-watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
ereport(DEBUG1,
(errmsg("STATE MACHINE INVOKED WITH EVENT = %s Current State = %s",
@@ -5597,7 +5657,7 @@ watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pk
ereport(LOG,
(errmsg("remote node \"%s\" is shutting down", wdNode->nodeName)));
if (pool_config->wd_remove_shutdown_nodes)
- revoke_cluster_membership_of_node(wdNode,WD_NODE_REVOKED_SHUTDOWN);
+ revoke_cluster_membership_of_node(wdNode, WD_NODE_REVOKED_SHUTDOWN);
}
else
{
@@ -5605,7 +5665,7 @@ watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pk
ereport(LOG,
(errmsg("remote node \"%s\" is lost", wdNode->nodeName)));
/* Inform the node, that it is lost for us */
- send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_APPEARING_LOST);
+ send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_APPEARING_LOST);
}
if (wdNode == WD_LEADER_NODE)
{
@@ -5629,9 +5689,10 @@ watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pk
ereport(LOG,
(errmsg("remote node \"%s\" became reachable again", wdNode->nodeName),
errdetail("requesting the node info")));
+
/*
- * remove the lost state from the node
- * and change it to joining for now
+ * remove the lost state from the node and change it to joining for
+ * now
*/
wdNode->node_lost_reason = NODE_LOST_UNKNOWN_REASON;
wdNode->state = WD_LOADING;
@@ -5776,7 +5837,7 @@ watchdog_state_machine(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pk
* Wait for 4 seconds if some node rejects us.
*/
static int
-watchdog_state_machine_loading(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_loading(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -5884,7 +5945,7 @@ watchdog_state_machine_loading(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
* much sense as at loading time we already have updated node informations
*/
static int
-watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -5961,7 +6022,7 @@ watchdog_state_machine_joining(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
*/
static int
-watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -6043,7 +6104,7 @@ watchdog_state_machine_initializing(WD_EVENTS event, WatchdogNode * wdNode, WDPa
}
static int
-watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -6070,7 +6131,7 @@ watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPa
if (pkt->type == WD_ERROR_MESSAGE)
{
ereport(LOG,
- (errmsg("our stand for coordinator request is rejected by node \"%s\"",wdNode->nodeName),
+ (errmsg("our stand for coordinator request is rejected by node \"%s\"", wdNode->nodeName),
errdetail("we might be in partial network isolation and cluster already have a valid leader"),
errhint("please verify the watchdog life-check and network is working properly")));
set_state(WD_NETWORK_ISOLATION);
@@ -6168,7 +6229,7 @@ watchdog_state_machine_standForCord(WD_EVENTS event, WatchdogNode * wdNode, WDPa
* node is the leader/coordinator node.
*/
static int
-watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -6176,7 +6237,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
{
send_cluster_command(NULL, WD_DECLARE_COORDINATOR_MESSAGE, 4);
set_timeout(MAX_SECS_WAIT_FOR_REPLY_FROM_NODE);
- update_missed_beacon_count(NULL,true);
+ update_missed_beacon_count(NULL, true);
update_failover_timeout(g_cluster.localNode, pool_config);
ereport(LOG,
@@ -6184,6 +6245,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
if (message_level_is_interesting(DEBUG2))
{
int i;
+
ereport(DEBUG2,
(errmsg("printing all remote node information")));
for (i = 0; i < g_cluster.remoteNodeCount; i++)
@@ -6255,7 +6317,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
else if (clusterCommand->commandPacket.type == WD_IAM_COORDINATOR_MESSAGE)
{
- update_missed_beacon_count(clusterCommand,false);
+ update_missed_beacon_count(clusterCommand, false);
if (clusterCommand->commandStatus == COMMAND_FINISHED_ALL_REPLIED)
{
@@ -6387,40 +6449,43 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
break;
case WD_EVENT_I_AM_APPEARING_LOST:
- {
- /* The remote node has lost us, It would have already marked
- * us as lost, So remove it from standby*/
- standby_node_left_cluster(wdNode);
- }
- break;
-
- case WD_EVENT_I_AM_APPEARING_FOUND:
- {
- /* The remote node has found us again */
- if (wdNode->wd_data_major_version >= 1 && wdNode->wd_data_minor_version >= 1)
{
/*
- * Since data version 1.1 we support CLUSTER_NODE_REQUIRE_TO_RELOAD
- * which makes the standby nodes to re-send the join leader node
+ * The remote node has lost us, It would have already marked
+ * us as lost, So remove it from standby
*/
- ereport(DEBUG1,
- (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName),
- errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION)));
-
- send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD);
+ standby_node_left_cluster(wdNode);
}
- else
+ break;
+
+ case WD_EVENT_I_AM_APPEARING_FOUND:
{
- /*
- * The node is on older version
- * So ask it to re-join the cluster
- */
- ereport(DEBUG1,
- (errmsg("asking remote node \"%s\" to rejoin cluster", wdNode->nodeName),
- errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION)));
- send_cluster_service_message(wdNode, pkt, CLUSTER_NEEDS_ELECTION);
+ /* The remote node has found us again */
+ if (wdNode->wd_data_major_version >= 1 && wdNode->wd_data_minor_version >= 1)
+ {
+ /*
+ * Since data version 1.1 we support
+ * CLUSTER_NODE_REQUIRE_TO_RELOAD which makes the standby
+ * nodes to re-send the join leader node
+ */
+ ereport(DEBUG1,
+ (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName),
+ errdetail("watchdog data version %s", WD_MESSAGE_DATA_VERSION)));
+
+ send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD);
+ }
+ else
+ {
+ /*
+ * The node is on older version So ask it to re-join the
+ * cluster
+ */
+ ereport(DEBUG1,
+ (errmsg("asking remote node \"%s\" to rejoin cluster", wdNode->nodeName),
+ errdetail("watchdog data version %s", WD_MESSAGE_DATA_VERSION)));
+ send_cluster_service_message(wdNode, pkt, CLUSTER_NEEDS_ELECTION);
+ }
}
- }
break;
case WD_EVENT_REMOTE_NODE_LOST:
@@ -6430,47 +6495,53 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
break;
case WD_EVENT_REMOTE_NODE_FOUND:
- {
- ereport(LOG,
- (errmsg("remote node \"%s\" is reachable again", wdNode->nodeName),
- errdetail("trying to add it back as a standby")));
- wdNode->node_lost_reason = NODE_LOST_UNKNOWN_REASON;
- /* If I am the cluster leader. Ask for the node info and to re-send the join message */
- send_message_of_type(wdNode, WD_REQ_INFO_MESSAGE, NULL);
- if (wdNode->wd_data_major_version >= 1 && wdNode->wd_data_minor_version >= 1)
{
- /*
- * Since data version 1.1 we support CLUSTER_NODE_REQUIRE_TO_RELOAD
- * which makes the standby nodes to re-send the join leader node
- */
- ereport(DEBUG1,
- (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName),
- errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION)));
+ ereport(LOG,
+ (errmsg("remote node \"%s\" is reachable again", wdNode->nodeName),
+ errdetail("trying to add it back as a standby")));
+ wdNode->node_lost_reason = NODE_LOST_UNKNOWN_REASON;
- send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD);
- }
- else
- {
/*
- * The node is on older version
- * So ask it to re-join the cluster
+ * If I am the cluster leader. Ask for the node info and to
+ * re-send the join message
*/
- ereport(DEBUG1,
- (errmsg("asking remote node \"%s\" to rejoin cluster", wdNode->nodeName),
- errdetail("watchdog data version %s",WD_MESSAGE_DATA_VERSION)));
- send_cluster_service_message(wdNode, pkt, CLUSTER_NEEDS_ELECTION);
+ send_message_of_type(wdNode, WD_REQ_INFO_MESSAGE, NULL);
+ if (wdNode->wd_data_major_version >= 1 && wdNode->wd_data_minor_version >= 1)
+ {
+ /*
+ * Since data version 1.1 we support
+ * CLUSTER_NODE_REQUIRE_TO_RELOAD which makes the standby
+ * nodes to re-send the join leader node
+ */
+ ereport(DEBUG1,
+ (errmsg("asking remote node \"%s\" to rejoin leader", wdNode->nodeName),
+ errdetail("watchdog data version %s", WD_MESSAGE_DATA_VERSION)));
+
+ send_cluster_service_message(wdNode, pkt, CLUSTER_NODE_REQUIRE_TO_RELOAD);
+ }
+ else
+ {
+ /*
+ * The node is on older version So ask it to re-join the
+ * cluster
+ */
+ ereport(DEBUG1,
+ (errmsg("asking remote node \"%s\" to rejoin cluster", wdNode->nodeName),
+ errdetail("watchdog data version %s", WD_MESSAGE_DATA_VERSION)));
+ send_cluster_service_message(wdNode, pkt, CLUSTER_NEEDS_ELECTION);
+ }
+ break;
}
- break;
- }
case WD_EVENT_PACKET_RCV:
{
switch (pkt->type)
{
case WD_ADD_NODE_MESSAGE:
- /* In case we received the ADD node message from
- * one of our standby, Remove that standby from
- * the list
+
+ /*
+ * In case we received the ADD node message from one
+ * of our standby, Remove that standby from the list
*/
standby_node_left_cluster(wdNode);
standard_packet_processor(wdNode, pkt);
@@ -6523,15 +6594,19 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
if (wdNode->state == WD_LOST && wdNode->node_lost_reason == NODE_LOST_BY_LIFECHECK)
{
ereport(LOG,
- (errmsg("lost remote node \"%s\" is requesting to join the cluster",wdNode->nodeName),
+ (errmsg("lost remote node \"%s\" is requesting to join the cluster", wdNode->nodeName),
errdetail("rejecting the request until life-check inform us that it is reachable again")));
reply_with_minimal_message(wdNode, WD_REJECT_MESSAGE, pkt);
}
else
{
reply_with_minimal_message(wdNode, WD_ACCEPT_MESSAGE, pkt);
- /* Also get the configurations from the standby node */
- send_message_of_type(wdNode,WD_ASK_FOR_POOL_CONFIG,NULL);
+
+ /*
+ * Also get the configurations from the
+ * standby node
+ */
+ send_message_of_type(wdNode, WD_ASK_FOR_POOL_CONFIG, NULL);
standby_node_join_cluster(wdNode);
}
}
@@ -6579,7 +6654,7 @@ watchdog_state_machine_coordinator(WD_EVENTS event, WatchdogNode * wdNode, WDPac
* which is to commit a suicide as soon an the network becomes unreachable
*/
static int
-watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -6648,7 +6723,7 @@ watchdog_state_machine_nw_error(WD_EVENTS event, WatchdogNode * wdNode, WDPacket
* and retry to join the cluster.
*/
static int
-watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -6667,7 +6742,7 @@ watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode * wdNode, WDPa
/* fall through */
case WD_EVENT_NW_IP_IS_ASSIGNED:
ereport(LOG,
- (errmsg("trying again to join the cluster")));
+ (errmsg("trying again to join the cluster")));
set_state(WD_JOINING);
break;
@@ -6678,7 +6753,7 @@ watchdog_state_machine_nw_isolation(WD_EVENTS event, WatchdogNode * wdNode, WDPa
}
static bool
-beacon_message_received_from_node(WatchdogNode * wdNode, WDPacketData * pkt)
+beacon_message_received_from_node(WatchdogNode *wdNode, WDPacketData *pkt)
{
long seconds_since_node_startup;
long seconds_since_current_state;
@@ -6725,7 +6800,7 @@ beacon_message_received_from_node(WatchdogNode * wdNode, WDPacketData * pkt)
* 1 : local node should remain as the leader/coordinator
*/
static int
-I_am_leader_and_cluster_in_split_brain(WatchdogNode * otherLeaderNode)
+I_am_leader_and_cluster_in_split_brain(WatchdogNode *otherLeaderNode)
{
if (get_local_node_state() != WD_COORDINATOR)
return 0;
@@ -6824,7 +6899,7 @@ I_am_leader_and_cluster_in_split_brain(WatchdogNode * otherLeaderNode)
}
static void
-handle_split_brain(WatchdogNode * otherLeaderNode, WDPacketData * pkt)
+handle_split_brain(WatchdogNode *otherLeaderNode, WDPacketData *pkt)
{
int decide_leader = I_am_leader_and_cluster_in_split_brain(otherLeaderNode);
@@ -6940,7 +7015,7 @@ resign_from_escalated_node(void)
* state machine function for state participate in elections
*/
static int
-watchdog_state_machine_voting(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_voting(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -7006,7 +7081,7 @@ watchdog_state_machine_voting(WD_EVENTS event, WatchdogNode * wdNode, WDPacketDa
}
static int
-watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * clusterCommand)
+watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *clusterCommand)
{
switch (event)
{
@@ -7030,63 +7105,63 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
break;
case WD_EVENT_COMMAND_FINISHED:
- {
- if (clusterCommand->commandPacket.type == WD_JOIN_COORDINATOR_MESSAGE)
{
- if (clusterCommand->commandStatus == COMMAND_FINISHED_ALL_REPLIED ||
- clusterCommand->commandStatus == COMMAND_FINISHED_TIMEOUT)
+ if (clusterCommand->commandPacket.type == WD_JOIN_COORDINATOR_MESSAGE)
{
- register_watchdog_state_change_interrupt();
-
- ereport(LOG,
- (errmsg("successfully joined the watchdog cluster as standby node"),
- errdetail("our join coordinator request is accepted by cluster leader node \"%s\"", WD_LEADER_NODE->nodeName)));
- /* broadcast our new state change to the cluster */
- send_message_of_type(NULL, WD_INFO_MESSAGE, NULL);
-
- }
- else
- {
- ereport(NOTICE,
- (errmsg("our join coordinator is rejected by node \"%s\"", wdNode->nodeName),
- errhint("rejoining the cluster.")));
-
- if (WD_LEADER_NODE->has_lost_us)
+ if (clusterCommand->commandStatus == COMMAND_FINISHED_ALL_REPLIED ||
+ clusterCommand->commandStatus == COMMAND_FINISHED_TIMEOUT)
{
+ register_watchdog_state_change_interrupt();
+
ereport(LOG,
- (errmsg("leader node \"%s\" thinks we are lost, and \"%s\" is not letting us join",WD_LEADER_NODE->nodeName,wdNode->nodeName),
- errhint("please verify the watchdog life-check and network is working properly")));
- set_state(WD_NETWORK_ISOLATION);
+ (errmsg("successfully joined the watchdog cluster as standby node"),
+ errdetail("our join coordinator request is accepted by cluster leader node \"%s\"", WD_LEADER_NODE->nodeName)));
+ /* broadcast our new state change to the cluster */
+ send_message_of_type(NULL, WD_INFO_MESSAGE, NULL);
+
}
else
{
- set_state(WD_JOINING);
+ ereport(NOTICE,
+ (errmsg("our join coordinator is rejected by node \"%s\"", wdNode->nodeName),
+ errhint("rejoining the cluster.")));
+
+ if (WD_LEADER_NODE->has_lost_us)
+ {
+ ereport(LOG,
+ (errmsg("leader node \"%s\" thinks we are lost, and \"%s\" is not letting us join", WD_LEADER_NODE->nodeName, wdNode->nodeName),
+ errhint("please verify the watchdog life-check and network is working properly")));
+ set_state(WD_NETWORK_ISOLATION);
+ }
+ else
+ {
+ set_state(WD_JOINING);
+ }
}
}
}
- }
break;
case WD_EVENT_I_AM_APPEARING_LOST:
- {
- /* The remote node has lost us, and if it
- * was our coordinator we might already be
- * removed from it's standby list
- * So re-Join the cluster
- */
- if (WD_LEADER_NODE == wdNode)
{
- ereport(LOG,
- (errmsg("we are lost on the leader node \"%s\"",wdNode->nodeName)));
- set_state(WD_JOINING);
+ /*
+ * The remote node has lost us, and if it was our coordinator
+ * we might already be removed from it's standby list So
+ * re-Join the cluster
+ */
+ if (WD_LEADER_NODE == wdNode)
+ {
+ ereport(LOG,
+ (errmsg("we are lost on the leader node \"%s\"", wdNode->nodeName)));
+ set_state(WD_JOINING);
+ }
}
- }
break;
case WD_EVENT_I_AM_APPEARING_FOUND:
{
ereport(DEBUG1,
- (errmsg("updating remote node \"%s\" with node info message", wdNode->nodeName)));
+ (errmsg("updating remote node \"%s\" with node info message", wdNode->nodeName)));
send_message_of_type(wdNode, WD_INFO_MESSAGE, NULL);
}
@@ -7112,19 +7187,20 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
switch (pkt->type)
{
case WD_ADD_NODE_MESSAGE:
- {
- /* In case we received the ADD node message from
- * our coordinator. Reset the cluster state
- */
- if (wdNode == WD_LEADER_NODE)
{
- ereport(LOG,
- (errmsg("received ADD NODE message from the leader node \"%s\"", wdNode->nodeName),
- errdetail("re-joining the cluster")));
- set_state(WD_JOINING);
+ /*
+ * In case we received the ADD node message from
+ * our coordinator. Reset the cluster state
+ */
+ if (wdNode == WD_LEADER_NODE)
+ {
+ ereport(LOG,
+ (errmsg("received ADD NODE message from the leader node \"%s\"", wdNode->nodeName),
+ errdetail("re-joining the cluster")));
+ set_state(WD_JOINING);
+ }
+ standard_packet_processor(wdNode, pkt);
}
- standard_packet_processor(wdNode, pkt);
- }
break;
case WD_FAILOVER_END:
@@ -7143,7 +7219,7 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
else
{
ereport(LOG,
- (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to become a leader",WD_LEADER_NODE->nodeName, wdNode->nodeName)));
+ (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to become a leader", WD_LEADER_NODE->nodeName, wdNode->nodeName)));
reply_with_minimal_message(wdNode, WD_ERROR_MESSAGE, pkt);
/* Ask leader to re-send its node info */
send_message_of_type(WD_LEADER_NODE, WD_REQ_INFO_MESSAGE, NULL);
@@ -7160,7 +7236,7 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
* new node trying to be leader
*/
ereport(LOG,
- (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to declare itself as a leader",WD_LEADER_NODE->nodeName, wdNode->nodeName)));
+ (errmsg("We are connected to leader node \"%s\" and another node \"%s\" is trying to declare itself as a leader", WD_LEADER_NODE->nodeName, wdNode->nodeName)));
reply_with_minimal_message(wdNode, WD_ERROR_MESSAGE, pkt);
/* Ask leader to re-send its node info */
send_message_of_type(WD_LEADER_NODE, WD_REQ_INFO_MESSAGE, NULL);
@@ -7226,8 +7302,8 @@ watchdog_state_machine_standby(WD_EVENTS event, WatchdogNode * wdNode, WDPacketD
else if (last_rcv_sec >= (2 * BEACON_MESSAGE_INTERVAL_SECONDS))
{
/*
- * We have not received a last beacon from leader ask for the
- * node info from leader node
+ * We have not received a last beacon from leader ask for the node
+ * info from leader node
*/
ereport(WARNING,
(errmsg("we have not received a beacon message from leader node \"%s\"",
@@ -7294,7 +7370,7 @@ get_minimum_remote_nodes_required_for_quorum(void)
* so minimum quorum is just remote/2.
*/
if (g_cluster.memberRemoteNodeCount % 2 == 0)
- return (g_cluster.memberRemoteNodeCount / 2);
+ return (g_cluster.memberRemoteNodeCount / 2);
/*
* Total nodes including self are even, So we return 50% nodes as quorum
@@ -7310,40 +7386,38 @@ static int
get_minimum_votes_to_resolve_consensus(void)
{
/*
- * Since get_minimum_remote_nodes_required_for_quorum() returns
- * the number of remote nodes required to complete the quorum
- * that is always one less than the total number of nodes required
- * for the cluster to build quorum or consensus, reason being
- * in get_minimum_remote_nodes_required_for_quorum()
- * we always consider the local node as a valid pre-casted vote.
- * But when it comes to count the number of votes required to build
- * consensus for any type of decision, for example for building the
- * consensus on backend failover, the local node can vote on either
- * side. So it's vote is not explicitly counted and for the consensus
- * we actually need one more vote than the total number of remote nodes
- * required for the quorum
+ * Since get_minimum_remote_nodes_required_for_quorum() returns the number
+ * of remote nodes required to complete the quorum that is always one less
+ * than the total number of nodes required for the cluster to build quorum
+ * or consensus, reason being in
+ * get_minimum_remote_nodes_required_for_quorum() we always consider the
+ * local node as a valid pre-casted vote. But when it comes to count the
+ * number of votes required to build consensus for any type of decision,
+ * for example for building the consensus on backend failover, the local
+ * node can vote on either side. So it's vote is not explicitly counted
+ * and for the consensus we actually need one more vote than the total
+ * number of remote nodes required for the quorum
*
- * For example
- * If Total nodes in cluster = 4
- * remote node will be = 3
- * get_minimum_remote_nodes_required_for_quorum() return = 1
- * Minimum number of votes required for consensus will be
+ * For example If Total nodes in cluster = 4 remote node will be = 3
+ * get_minimum_remote_nodes_required_for_quorum() return = 1 Minimum
+ * number of votes required for consensus will be
*
- * if(pool_config->enable_consensus_with_half_votes = true)
- * (exact 50% n/2) ==> 4/2 = 2
+ * if(pool_config->enable_consensus_with_half_votes = true) (exact 50%
+ * n/2) ==> 4/2 = 2
*
- * if(pool_config->enable_consensus_with_half_votes = false)
- * (exact 50% +1 ==> (n/2)+1) ==> (4/2)+1 = 3
+ * if(pool_config->enable_consensus_with_half_votes = false) (exact 50% +1
+ * ==> (n/2)+1) ==> (4/2)+1 = 3
*
*/
- int required_node_count = get_minimum_remote_nodes_required_for_quorum() + 1;
+ int required_node_count = get_minimum_remote_nodes_required_for_quorum() + 1;
+
/*
* When the total number of nodes in the watchdog cluster including the
* local node are even, The number of votes required for the consensus
- * depends on the enable_consensus_with_half_votes.
- * So for even number of nodes when enable_consensus_with_half_votes is
- * not allowed than we would add one more vote than exact 50%
+ * depends on the enable_consensus_with_half_votes. So for even number of
+ * nodes when enable_consensus_with_half_votes is not allowed than we
+ * would add one more vote than exact 50%
*/
if (g_cluster.memberRemoteNodeCount % 2 != 0)
{
@@ -7390,7 +7464,7 @@ set_state(WD_STATES newState)
static void
-allocate_resultNodes_in_command(WDCommandData * ipcCommand)
+allocate_resultNodes_in_command(WDCommandData *ipcCommand)
{
MemoryContext oldCxt;
int i;
@@ -7409,7 +7483,7 @@ allocate_resultNodes_in_command(WDCommandData * ipcCommand)
static void
-process_remote_online_recovery_command(WatchdogNode * wdNode, WDPacketData * pkt)
+process_remote_online_recovery_command(WatchdogNode *wdNode, WDPacketData *pkt)
{
char *func_name;
int node_count = 0;
@@ -7498,7 +7572,7 @@ process_remote_online_recovery_command(WatchdogNode * wdNode, WDPacketData * pkt
static bool
-reply_is_received_for_pgpool_replicate_command(WatchdogNode * wdNode, WDPacketData * pkt, WDCommandData * ipcCommand)
+reply_is_received_for_pgpool_replicate_command(WatchdogNode *wdNode, WDPacketData *pkt, WDCommandData *ipcCommand)
{
int i;
WDCommandNodeResult *nodeResult = NULL;
@@ -7553,7 +7627,7 @@ reply_is_received_for_pgpool_replicate_command(WatchdogNode * wdNode, WDPacketDa
* return true if want to cancel timer,
*/
static bool
-process_wd_command_timer_event(bool timer_expired, WDFunctionCommandData * wd_func_command)
+process_wd_command_timer_event(bool timer_expired, WDFunctionCommandData *wd_func_command)
{
if (wd_func_command->commandType == WD_IPC_ONLINE_RECOVERY_COMMAND)
{
@@ -7631,7 +7705,7 @@ process_wd_func_commands_for_timer_events(void)
}
static void
-add_wd_command_for_timer_events(unsigned int expire_secs, bool need_tics, WDFunctionCommandData * wd_func_command)
+add_wd_command_for_timer_events(unsigned int expire_secs, bool need_tics, WDFunctionCommandData *wd_func_command)
{
/* create a new Timer struct */
MemoryContext oldCtx = MemoryContextSwitchTo(TopMemoryContext);
@@ -7676,7 +7750,7 @@ do { \
} while(0)
static void
-verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config)
+verify_pool_configurations(WatchdogNode *wdNode, POOL_CONFIG *config)
{
int i;
@@ -7757,7 +7831,7 @@ verify_pool_configurations(WatchdogNode * wdNode, POOL_CONFIG * config)
}
static bool
-get_authhash_for_node(WatchdogNode * wdNode, char *authhash)
+get_authhash_for_node(WatchdogNode *wdNode, char *authhash)
{
if (strlen(pool_config->wd_authkey))
{
@@ -7777,7 +7851,7 @@ get_authhash_for_node(WatchdogNode * wdNode, char *authhash)
}
static bool
-verify_authhash_for_node(WatchdogNode * wdNode, char *authhash)
+verify_authhash_for_node(WatchdogNode *wdNode, char *authhash)
{
if (strlen(pool_config->wd_authkey))
{
@@ -7809,7 +7883,7 @@ verify_authhash_for_node(WatchdogNode * wdNode, char *authhash)
* to restrict certain watchdog IPC functions for outside of pgpool-II
*/
static bool
-check_IPC_client_authentication(json_value * rootObj, bool internal_client_only)
+check_IPC_client_authentication(json_value *rootObj, bool internal_client_only)
{
char *packet_auth_key;
unsigned int packet_key;
@@ -7883,7 +7957,7 @@ check_IPC_client_authentication(json_value * rootObj, bool internal_client_only)
*/
static bool
-check_and_report_IPC_authentication(WDCommandData * ipcCommand)
+check_and_report_IPC_authentication(WDCommandData *ipcCommand)
{
json_value *root = NULL;
bool internal_client_only = false;
@@ -7963,7 +8037,7 @@ check_and_report_IPC_authentication(WDCommandData * ipcCommand)
}
static void
-print_watchdog_node_info(WatchdogNode * wdNode)
+print_watchdog_node_info(WatchdogNode *wdNode)
{
ereport(DEBUG2,
(errmsg("state: \"%s\" Host: \"%s\" Name: \"%s\" WD Port:%d PP Port: %d priority:%d",
@@ -7976,7 +8050,7 @@ print_watchdog_node_info(WatchdogNode * wdNode)
}
static void
-print_packet_node_info(WDPacketData * pkt, WatchdogNode * wdNode, bool sending)
+print_packet_node_info(WDPacketData *pkt, WatchdogNode *wdNode, bool sending)
{
int i;
packet_types *pkt_type = NULL;
@@ -8009,7 +8083,7 @@ print_packet_node_info(WDPacketData * pkt, WatchdogNode * wdNode, bool sending)
}
static void
-print_packet_info(WDPacketData * pkt, bool sending)
+print_packet_info(WDPacketData *pkt, bool sending)
{
int i;
packet_types *pkt_type = NULL;
@@ -8041,7 +8115,7 @@ print_packet_info(WDPacketData * pkt, bool sending)
}
static int
-send_command_packet_to_remote_nodes(WDCommandData * ipcCommand, bool source_included)
+send_command_packet_to_remote_nodes(WDCommandData *ipcCommand, bool source_included)
{
int i;
@@ -8106,7 +8180,7 @@ send_command_packet_to_remote_nodes(WDCommandData * ipcCommand, bool source_incl
}
static void
-set_cluster_leader_node(WatchdogNode * wdNode)
+set_cluster_leader_node(WatchdogNode *wdNode)
{
if (WD_LEADER_NODE != wdNode)
{
@@ -8124,7 +8198,7 @@ set_cluster_leader_node(WatchdogNode * wdNode)
}
}
-static WatchdogNode*
+static WatchdogNode *
getLeaderWatchdogNode(void)
{
return g_cluster.clusterLeaderInfo.leaderNode;
@@ -8133,11 +8207,13 @@ getLeaderWatchdogNode(void)
static int
update_cluster_memberships(void)
{
- int i;
+ int i;
+
g_cluster.memberRemoteNodeCount = g_cluster.remoteNodeCount;
for (i = 0; i < g_cluster.remoteNodeCount; i++)
{
WatchdogNode *wdNode = &(g_cluster.remoteNodes[i]);
+
if (wdNode->membership_status != WD_NODE_MEMBERSHIP_ACTIVE)
g_cluster.memberRemoteNodeCount--;
}
@@ -8145,7 +8221,7 @@ update_cluster_memberships(void)
}
static int
-revoke_cluster_membership_of_node(WatchdogNode* wdNode, WD_NODE_MEMBERSHIP_STATUS revoke_status)
+revoke_cluster_membership_of_node(WatchdogNode *wdNode, WD_NODE_MEMBERSHIP_STATUS revoke_status)
{
if (wdNode->membership_status == WD_NODE_MEMBERSHIP_ACTIVE)
{
@@ -8153,7 +8229,7 @@ revoke_cluster_membership_of_node(WatchdogNode* wdNode, WD_NODE_MEMBERSHIP_STATU
ereport(LOG,
(errmsg("revoking the membership of [%s] node:\"%s\" [node_id:%d]",
- wd_state_names[wdNode->state], wdNode->nodeName,wdNode->pgpool_node_id),
+ wd_state_names[wdNode->state], wdNode->nodeName, wdNode->pgpool_node_id),
errdetail("membership revoke reason: \"%s\"",
wd_cluster_membership_status[wdNode->membership_status])));
@@ -8163,14 +8239,14 @@ revoke_cluster_membership_of_node(WatchdogNode* wdNode, WD_NODE_MEMBERSHIP_STATU
}
static int
-restore_cluster_membership_of_node(WatchdogNode* wdNode)
+restore_cluster_membership_of_node(WatchdogNode *wdNode)
{
if (wdNode->membership_status != WD_NODE_MEMBERSHIP_ACTIVE)
{
ereport(LOG,
- (errmsg("Restoring cluster membership of node:\"%s\"",wdNode->nodeName),
+ (errmsg("Restoring cluster membership of node:\"%s\"", wdNode->nodeName),
errdetail("membership of node was revoked because it was \"%s\"",
- wd_cluster_membership_status[wdNode->membership_status])));
+ wd_cluster_membership_status[wdNode->membership_status])));
wdNode->membership_status = WD_NODE_MEMBERSHIP_ACTIVE;
/* reset the lost time on the node */
@@ -8184,22 +8260,25 @@ restore_cluster_membership_of_node(WatchdogNode* wdNode)
static void
reset_lost_timers(void)
{
- int i;
+ int i;
+
for (i = 0; i < g_cluster.remoteNodeCount; i++)
{
WatchdogNode *wdNode = &(g_cluster.remoteNodes[i]);
+
wdNode->lost_time.tv_sec = 0;
wdNode->lost_time.tv_usec = 0;
}
}
static int
-standby_node_join_cluster(WatchdogNode * wdNode)
+standby_node_join_cluster(WatchdogNode *wdNode)
{
if (get_local_node_state() == WD_COORDINATOR)
{
int i;
- /* Just rest the lost time stamp*/
+
+ /* Just rest the lost time stamp */
/* set the timestamp on node to track for how long this node is lost */
wdNode->lost_time.tv_sec = 0;
wdNode->lost_time.tv_usec = 0;
@@ -8225,7 +8304,7 @@ standby_node_join_cluster(WatchdogNode * wdNode)
}
static int
-standby_node_left_cluster(WatchdogNode * wdNode)
+standby_node_left_cluster(WatchdogNode *wdNode)
{
if (get_local_node_state() == WD_COORDINATOR)
{
@@ -8252,7 +8331,11 @@ standby_node_left_cluster(WatchdogNode * wdNode)
*/
ereport(LOG,
(errmsg("removing watchdog node \"%s\" from the standby list", wdNode->nodeName)));
- /* set the timestamp on node to track for how long this node is lost */
+
+ /*
+ * set the timestamp on node to track for how long this
+ * node is lost
+ */
gettimeofday(&wdNode->lost_time, NULL);
g_cluster.clusterLeaderInfo.standbyNodes[i] = NULL;
g_cluster.clusterLeaderInfo.standby_nodes_count--;
@@ -8281,20 +8364,24 @@ clear_standby_nodes_list(void)
g_cluster.localNode->standby_nodes_count = 0;
}
-static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear)
+static void
+update_missed_beacon_count(WDCommandData *ipcCommand, bool clear)
{
- int i;
- for (i=0; i< g_cluster.remoteNodeCount; i++)
+ int i;
+
+ for (i = 0; i < g_cluster.remoteNodeCount; i++)
{
if (clear)
{
- WatchdogNode* wdNode = &(g_cluster.remoteNodes[i]);
+ WatchdogNode *wdNode = &(g_cluster.remoteNodes[i]);
+
wdNode->missed_beacon_count = 0;
}
else
{
- WDCommandNodeResult* nodeResult = &ipcCommand->nodeResults[i];
- if (ipcCommand->commandStatus == COMMAND_IN_PROGRESS )
+ WDCommandNodeResult *nodeResult = &ipcCommand->nodeResults[i];
+
+ if (ipcCommand->commandStatus == COMMAND_IN_PROGRESS)
return;
if (nodeResult->cmdState == COMMAND_STATE_SENT)
@@ -8304,8 +8391,8 @@ static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear)
nodeResult->wdNode->missed_beacon_count++;
if (nodeResult->wdNode->missed_beacon_count > 1)
ereport(LOG,
- (errmsg("remote node \"%s\" is not replying to our beacons",nodeResult->wdNode->nodeName),
- errdetail("missed beacon reply count:%d",nodeResult->wdNode->missed_beacon_count)));
+ (errmsg("remote node \"%s\" is not replying to our beacons", nodeResult->wdNode->nodeName),
+ errdetail("missed beacon reply count:%d", nodeResult->wdNode->missed_beacon_count)));
}
else
nodeResult->wdNode->missed_beacon_count = 0;
@@ -8314,7 +8401,7 @@ static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear)
{
if (nodeResult->wdNode->missed_beacon_count > 0)
ereport(LOG,
- (errmsg("remote node \"%s\" is replying again after missing %d beacons",nodeResult->wdNode->nodeName,
+ (errmsg("remote node \"%s\" is replying again after missing %d beacons", nodeResult->wdNode->nodeName,
nodeResult->wdNode->missed_beacon_count)));
nodeResult->wdNode->missed_beacon_count = 0;
}
@@ -8323,9 +8410,10 @@ static void update_missed_beacon_count(WDCommandData* ipcCommand, bool clear)
}
static void
-update_failover_timeout(WatchdogNode * wdNode, POOL_CONFIG *pool_config)
+update_failover_timeout(WatchdogNode *wdNode, POOL_CONFIG *pool_config)
{
- int failover_command_timeout;
+ int failover_command_timeout;
+
if (get_local_node_state() != WD_COORDINATOR)
return;
@@ -8333,12 +8421,13 @@ update_failover_timeout(WatchdogNode * wdNode, POOL_CONFIG *pool_config)
if (pool_config->health_check_params)
{
- int i;
- for (i = 0 ; i < pool_config->backend_desc->num_backends; i++)
+ int i;
+
+ for (i = 0; i < pool_config->backend_desc->num_backends; i++)
{
- int pn_failover_command_timeout = pool_config->health_check_params[i].health_check_period +
- (pool_config->health_check_params[i].health_check_retry_delay *
- pool_config->health_check_params[i].health_check_max_retries);
+ int pn_failover_command_timeout = pool_config->health_check_params[i].health_check_period +
+ (pool_config->health_check_params[i].health_check_retry_delay *
+ pool_config->health_check_params[i].health_check_max_retries);
if (failover_command_timeout < pn_failover_command_timeout)
failover_command_timeout = pn_failover_command_timeout;
@@ -8347,7 +8436,7 @@ update_failover_timeout(WatchdogNode * wdNode, POOL_CONFIG *pool_config)
if (g_cluster.localNode == wdNode)
{
- /* Reset*/
+ /* Reset */
g_cluster.failover_command_timeout = failover_command_timeout;
}
else if (g_cluster.failover_command_timeout < failover_command_timeout)
@@ -8356,7 +8445,7 @@ update_failover_timeout(WatchdogNode * wdNode, POOL_CONFIG *pool_config)
if (g_cluster.failover_command_timeout < FAILOVER_COMMAND_FINISH_TIMEOUT)
g_cluster.failover_command_timeout = FAILOVER_COMMAND_FINISH_TIMEOUT;
- ereport(LOG,(errmsg("Setting failover command timeout to %d",failover_command_timeout)));
+ ereport(LOG, (errmsg("Setting failover command timeout to %d", failover_command_timeout)));
}
#ifdef WATCHDOG_DEBUG
@@ -8390,10 +8479,10 @@ unsigned int watchdog_debug_command = 0;
watchdog_debug_commands wd_debug_commands[] = {
{"DO_NOT_REPLY_TO_BEACON", DO_NOT_REPLY_TO_BEACON},
- {"DO_NOT_SEND_BEACON", DO_NOT_SEND_BEACON},
+ {"DO_NOT_SEND_BEACON", DO_NOT_SEND_BEACON},
{"KILL_ALL_COMMUNICATION", KILL_ALL_COMMUNICATION},
- {"KILL_ALL_RECEIVERS", KILL_ALL_RECEIVERS},
- {"KILL_ALL_SENDERS", KILL_ALL_SENDERS},
+ {"KILL_ALL_RECEIVERS", KILL_ALL_RECEIVERS},
+ {"KILL_ALL_SENDERS", KILL_ALL_SENDERS},
{"", 0}
};
@@ -8424,6 +8513,7 @@ check_debug_request_do_not_reply_beacon(void)
{
return (watchdog_debug_command & DO_NOT_REPLY_TO_BEACON);
}
+
/*
* Check watchdog debug request options file for debug commands
* each line should contain only one command
@@ -8465,21 +8555,22 @@ load_watchdog_debug_test_option(void)
for (i = 0;; i++)
{
- int cmd = 0;
- bool valid_command = false;
+ int cmd = 0;
+ bool valid_command = false;
+
readbuf[MAXLINE - 1] = '\0';
if (fgets(readbuf, MAXLINE - 1, fd) == 0)
break;
- for (cmd =0 ;; cmd++)
+ for (cmd = 0;; cmd++)
{
if (strlen(wd_debug_commands[cmd].command) == 0 || wd_debug_commands[cmd].code == 0)
break;
- if (strncasecmp(wd_debug_commands[cmd].command,readbuf,strlen(wd_debug_commands[cmd].command)) == 0)
+ if (strncasecmp(wd_debug_commands[cmd].command, readbuf, strlen(wd_debug_commands[cmd].command)) == 0)
{
ereport(DEBUG3,
(errmsg("Watchdog DEBUG COMMAND %d: \"%s\" request found",
- cmd,wd_debug_commands[cmd].command)));
+ cmd, wd_debug_commands[cmd].command)));
watchdog_debug_command |= wd_debug_commands[cmd].code;
valid_command = true;
@@ -8488,7 +8579,7 @@ load_watchdog_debug_test_option(void)
}
if (!valid_command)
ereport(WARNING,
- (errmsg("%s file contains invalid command",
+ (errmsg("%s file contains invalid command",
wd_debug_request_file),
errdetail("\"%s\" not recognized", readbuf)));
}
@@ -8502,17 +8593,27 @@ load_watchdog_debug_test_option(void)
*/
static bool
check_debug_request_do_not_send_beacon(void)
-{return false;}
+{
+ return false;
+}
static bool
check_debug_request_do_not_reply_beacon(void)
-{return false;}
+{
+ return false;
+}
static bool
check_debug_request_kill_all_communication(void)
-{return false;}
+{
+ return false;
+}
static bool
check_debug_request_kill_all_receivers(void)
-{return false;}
+{
+ return false;
+}
static bool
check_debug_request_kill_all_senders(void)
-{return false;}
+{
+ return false;
+}
#endif
diff --git a/src/watchdog/wd_commands.c b/src/watchdog/wd_commands.c
index 145955865..1539ef675 100644
--- a/src/watchdog/wd_commands.c
+++ b/src/watchdog/wd_commands.c
@@ -54,10 +54,10 @@
#define WD_DEFAULT_IPC_COMMAND_TIMEOUT 8 /* default number of seconds to
* wait for IPC command results */
-int wd_command_timeout_sec = WD_DEFAULT_IPC_COMMAND_TIMEOUT;
+int wd_command_timeout_sec = WD_DEFAULT_IPC_COMMAND_TIMEOUT;
WD_STATES
-get_watchdog_local_node_state(char* wd_authkey)
+get_watchdog_local_node_state(char *wd_authkey)
{
WD_STATES ret = WD_DEAD;
WDGenericData *state = get_wd_runtime_variable_value(wd_authkey, WD_RUNTIME_VAR_WD_STATE);
@@ -83,7 +83,7 @@ get_watchdog_local_node_state(char* wd_authkey)
}
int
-get_watchdog_quorum_state(char* wd_authkey)
+get_watchdog_quorum_state(char *wd_authkey)
{
WD_STATES ret = WD_DEAD;
WDGenericData *state = get_wd_runtime_variable_value(wd_authkey, WD_RUNTIME_VAR_QUORUM_STATE);
@@ -114,10 +114,10 @@ get_watchdog_quorum_state(char* wd_authkey)
* watchdog IPC
*/
WDGenericData *
-get_wd_runtime_variable_value(char* wd_authkey, char *varName)
+get_wd_runtime_variable_value(char *wd_authkey, char *varName)
{
char *data = get_request_json(WD_JSON_KEY_VARIABLE_NAME, varName,
- wd_authkey);
+ wd_authkey);
WDIPCCmdResult *result = issue_command_to_watchdog(WD_GET_RUNTIME_VARIABLE_VALUE,
wd_command_timeout_sec,
@@ -281,7 +281,7 @@ wd_get_watchdog_nodes_json(char *wd_authkey, int nodeID)
jw_put_int(jNode, "NodeID", nodeID);
if (wd_authkey != NULL && strlen(wd_authkey) > 0)
- jw_put_string(jNode, WD_IPC_AUTH_KEY, wd_authkey); /* put the auth key */
+ jw_put_string(jNode, WD_IPC_AUTH_KEY, wd_authkey); /* put the auth key */
jw_finish_document(jNode);
@@ -330,16 +330,16 @@ wd_get_watchdog_nodes_json(char *wd_authkey, int nodeID)
}
WDNodeInfo *
-parse_watchdog_node_info_from_wd_node_json(json_value * source)
+parse_watchdog_node_info_from_wd_node_json(json_value *source)
{
char *ptr;
WDNodeInfo *wdNodeInfo = palloc0(sizeof(WDNodeInfo));
-
+
if (source->type != json_object)
ereport(ERROR,
(errmsg("invalid json data"),
errdetail("node is not of object type")));
-
+
if (json_get_int_value_for_key(source, "ID", &wdNodeInfo->id))
{
ereport(ERROR,
@@ -360,7 +360,7 @@ parse_watchdog_node_info_from_wd_node_json(json_value * source)
else
strncpy(wdNodeInfo->membership_status_string, ptr, sizeof(wdNodeInfo->membership_status_string) - 1);
-
+
ptr = json_get_string_value_for_key(source, "NodeName");
if (ptr == NULL)
{
@@ -369,7 +369,7 @@ parse_watchdog_node_info_from_wd_node_json(json_value * source)
errdetail("unable to find Watchdog Node Name")));
}
strncpy(wdNodeInfo->nodeName, ptr, sizeof(wdNodeInfo->nodeName) - 1);
-
+
ptr = json_get_string_value_for_key(source, "HostName");
if (ptr == NULL)
{
@@ -378,7 +378,7 @@ parse_watchdog_node_info_from_wd_node_json(json_value * source)
errdetail("unable to find Watchdog Host Name")));
}
strncpy(wdNodeInfo->hostName, ptr, sizeof(wdNodeInfo->hostName) - 1);
-
+
ptr = json_get_string_value_for_key(source, "DelegateIP");
if (ptr == NULL)
{
@@ -387,28 +387,28 @@ parse_watchdog_node_info_from_wd_node_json(json_value * source)
errdetail("unable to find Watchdog delegate IP")));
}
strncpy(wdNodeInfo->delegate_ip, ptr, sizeof(wdNodeInfo->delegate_ip) - 1);
-
+
if (json_get_int_value_for_key(source, "WdPort", &wdNodeInfo->wd_port))
{
ereport(ERROR,
(errmsg("invalid json data"),
errdetail("unable to find WdPort")));
}
-
+
if (json_get_int_value_for_key(source, "PgpoolPort", &wdNodeInfo->pgpool_port))
{
ereport(ERROR,
(errmsg("invalid json data"),
errdetail("unable to find PgpoolPort")));
}
-
+
if (json_get_int_value_for_key(source, "State", &wdNodeInfo->state))
{
ereport(ERROR,
(errmsg("invalid json data"),
errdetail("unable to find state")));
}
-
+
ptr = json_get_string_value_for_key(source, "StateName");
if (ptr == NULL)
{
@@ -417,19 +417,20 @@ parse_watchdog_node_info_from_wd_node_json(json_value * source)
errdetail("unable to find Watchdog State Name")));
}
strncpy(wdNodeInfo->stateName, ptr, sizeof(wdNodeInfo->stateName) - 1);
-
+
if (json_get_int_value_for_key(source, "Priority", &wdNodeInfo->wd_priority))
{
ereport(ERROR,
(errmsg("invalid json data"),
errdetail("unable to find state")));
}
-
+
return wdNodeInfo;
-
+
}
-extern void set_wd_command_timeout(int sec)
+extern void
+set_wd_command_timeout(int sec)
{
wd_command_timeout_sec = sec;
}
@@ -453,4 +454,3 @@ get_request_json(char *key, char *value, char *authKey)
jw_destroy(jNode);
return json_str;
}
-
diff --git a/src/watchdog/wd_heartbeat.c b/src/watchdog/wd_heartbeat.c
index 09b1ef559..a9e99dec9 100644
--- a/src/watchdog/wd_heartbeat.c
+++ b/src/watchdog/wd_heartbeat.c
@@ -66,22 +66,22 @@ typedef struct
int from_pgpool_port;
struct timeval send_time;
char hash[WD_AUTH_HASH_LEN + 1];
-} WdHbPacket;
+} WdHbPacket;
static RETSIGTYPE hb_sender_exit(int sig);
static RETSIGTYPE hb_receiver_exit(int sig);
-static int hton_wd_hb_packet(WdHbPacket * to, WdHbPacket * from);
-static int ntoh_wd_hb_packet(WdHbPacket * to, WdHbPacket * from);
-static int packet_to_string_hb(WdHbPacket * pkt, char *str, int maxlen);
+static int hton_wd_hb_packet(WdHbPacket *to, WdHbPacket *from);
+static int ntoh_wd_hb_packet(WdHbPacket *to, WdHbPacket *from);
+static int packet_to_string_hb(WdHbPacket *pkt, char *str, int maxlen);
static void wd_set_reuseport(int sock);
static int select_socket_from_list(List *socks);
-static int wd_create_hb_send_socket(WdHbIf * hb_if);
-static List *wd_create_hb_recv_socket(WdHbIf * hb_if);
+static int wd_create_hb_send_socket(WdHbIf *hb_if);
+static List *wd_create_hb_recv_socket(WdHbIf *hb_if);
-static void wd_hb_send(int sock, WdHbPacket * pkt, int len, const char *destination, const int dest_port);
-static void wd_hb_recv(int sock, WdHbPacket * pkt, char *from_addr);
+static void wd_hb_send(int sock, WdHbPacket *pkt, int len, const char *destination, const int dest_port);
+static void wd_hb_recv(int sock, WdHbPacket *pkt, char *from_addr);
/*
* Readable socket will be returned among the listening socket list.
@@ -151,7 +151,7 @@ select_socket_from_list(List *socks)
/* create socket for sending heartbeat */
static int
-wd_create_hb_send_socket(WdHbIf * hb_if)
+wd_create_hb_send_socket(WdHbIf *hb_if)
{
int sock = -1;
int tos;
@@ -246,7 +246,7 @@ wd_create_hb_send_socket(WdHbIf * hb_if)
/* create socket for receiving heartbeat */
static List *
-wd_create_hb_recv_socket(WdHbIf * hb_if)
+wd_create_hb_recv_socket(WdHbIf *hb_if)
{
int sock = -1,
gai_ret,
@@ -431,7 +431,7 @@ wd_create_hb_recv_socket(WdHbIf * hb_if)
/* send heartbeat signal */
static void
-wd_hb_send(int sock, WdHbPacket * pkt, int len, const char *host, const int port)
+wd_hb_send(int sock, WdHbPacket *pkt, int len, const char *host, const int port)
{
int rtn;
WdHbPacket buf;
@@ -482,7 +482,7 @@ wd_hb_send(int sock, WdHbPacket * pkt, int len, const char *host, const int port
*/
void
static
-wd_hb_recv(int sock, WdHbPacket * pkt, char *from_addr)
+wd_hb_recv(int sock, WdHbPacket *pkt, char *from_addr)
{
int rtn;
WdHbPacket buf;
@@ -523,7 +523,7 @@ wd_hb_recv(int sock, WdHbPacket * pkt, char *from_addr)
/* fork heartbeat receiver child */
pid_t
-wd_hb_receiver(int fork_wait_time, WdHbIf * hb_if)
+wd_hb_receiver(int fork_wait_time, WdHbIf *hb_if)
{
int sock;
pid_t pid = 0;
@@ -668,7 +668,7 @@ wd_hb_receiver(int fork_wait_time, WdHbIf * hb_if)
/* fork heartbeat sender child */
pid_t
-wd_hb_sender(int fork_wait_time, WdHbIf * hb_if)
+wd_hb_sender(int fork_wait_time, WdHbIf *hb_if)
{
int sock;
pid_t pid = 0;
@@ -811,7 +811,7 @@ hb_receiver_exit(int sig)
}
static int
-hton_wd_hb_packet(WdHbPacket * to, WdHbPacket * from)
+hton_wd_hb_packet(WdHbPacket *to, WdHbPacket *from)
{
if ((to == NULL) || (from == NULL))
{
@@ -828,7 +828,7 @@ hton_wd_hb_packet(WdHbPacket * to, WdHbPacket * from)
}
static int
-ntoh_wd_hb_packet(WdHbPacket * to, WdHbPacket * from)
+ntoh_wd_hb_packet(WdHbPacket *to, WdHbPacket *from)
{
if ((to == NULL) || (from == NULL))
{
@@ -846,7 +846,7 @@ ntoh_wd_hb_packet(WdHbPacket * to, WdHbPacket * from)
/* convert packet to string and return length of the string */
static int
-packet_to_string_hb(WdHbPacket * pkt, char *str, int maxlen)
+packet_to_string_hb(WdHbPacket *pkt, char *str, int maxlen)
{
int len;
diff --git a/src/watchdog/wd_if.c b/src/watchdog/wd_if.c
index f206ec9b3..d97d252e1 100644
--- a/src/watchdog/wd_if.c
+++ b/src/watchdog/wd_if.c
@@ -119,7 +119,10 @@ wd_IP_up(void)
if (command)
{
- /* If if_up_cmd starts with "/", the setting specified in "if_cmd_path" will be ignored */
+ /*
+ * If if_up_cmd starts with "/", the setting specified in
+ * "if_cmd_path" will be ignored
+ */
if (command[0] == '/')
snprintf(path, sizeof(path), "%s", command);
else
@@ -141,7 +144,10 @@ wd_IP_up(void)
command = wd_get_cmd(pool_config->arping_cmd);
if (command)
{
- /* If arping_cmd starts with "/", the setting specified in "arping_path" will be ignored */
+ /*
+ * If arping_cmd starts with "/", the setting specified in
+ * "arping_path" will be ignored
+ */
if (command[0] == '/')
snprintf(path, sizeof(path), "%s", command);
else
@@ -202,7 +208,10 @@ wd_IP_down(void)
command = wd_get_cmd(pool_config->if_down_cmd);
if (command)
{
- /* If if_down_cmd starts with "/", the setting specified in "if_cmd_path" will be ignored */
+ /*
+ * If if_down_cmd starts with "/", the setting specified in
+ * "if_cmd_path" will be ignored
+ */
if (command[0] == '/')
snprintf(path, sizeof(path), "%s", command);
else
diff --git a/src/watchdog/wd_internal_commands.c b/src/watchdog/wd_internal_commands.c
index 61a796028..9a7c810ce 100644
--- a/src/watchdog/wd_internal_commands.c
+++ b/src/watchdog/wd_internal_commands.c
@@ -56,24 +56,24 @@
#define WD_INTERLOCK_WAIT_COUNT ((int) ((WD_INTERLOCK_TIMEOUT_SEC * 1000)/WD_INTERLOCK_WAIT_MSEC))
/* shared memory variables */
-bool *watchdog_require_cleanup = NULL; /* shared memory variable set
- * to true when watchdog
- * process terminates
- * abnormally */
-bool *watchdog_node_escalated = NULL; /* shared memory variable set to
- * true when watchdog process has
- * performed escalation */
-unsigned int *ipc_shared_key = NULL; /* key lives in shared memory used to
- * identify the ipc internal clients */
+bool *watchdog_require_cleanup = NULL; /* shared memory variable set
+ * to true when watchdog
+ * process terminates
+ * abnormally */
+bool *watchdog_node_escalated = NULL; /* shared memory variable set to
+ * true when watchdog process has
+ * performed escalation */
+unsigned int *ipc_shared_key = NULL; /* key lives in shared memory used to
+ * identify the ipc internal clients */
static char *get_wd_failover_state_json(bool start);
-static WDFailoverCMDResults wd_get_failover_result_from_data(WDIPCCmdResult * result,
+static WDFailoverCMDResults wd_get_failover_result_from_data(WDIPCCmdResult *result,
unsigned int *wd_failover_id);
static WDFailoverCMDResults wd_issue_failover_command(char *func_name, int *node_id_set,
- int count, unsigned char flags);
+ int count, unsigned char flags);
static WdCommandResult wd_send_locking_command(WD_LOCK_STANDBY_TYPE lock_type,
- bool acquire);
+ bool acquire);
void
wd_ipc_initialize_data(void)
@@ -103,9 +103,11 @@ wd_ipc_initialize_data(void)
}
}
-size_t wd_ipc_get_shared_mem_size(void)
+size_t
+wd_ipc_get_shared_mem_size(void)
{
- size_t size = 0;
+ size_t size = 0;
+
size += MAXALIGN(sizeof(unsigned int)); /* ipc_shared_key */
size += MAXALIGN(sizeof(bool)); /* watchdog_require_cleanup */
size += MAXALIGN(sizeof(bool)); /* watchdog_node_escalated */
@@ -278,13 +280,13 @@ wd_end_recovery(void)
}
WdCommandResult
-wd_execute_cluster_command(char* clusterCommand, List *argsList)
+wd_execute_cluster_command(char *clusterCommand, List *argsList)
{
char type;
unsigned int *shared_key = get_ipc_shared_key();
char *func = get_wd_exec_cluster_command_json(clusterCommand, argsList,
- shared_key ? *shared_key : 0, pool_config->wd_authkey);
+ shared_key ? *shared_key : 0, pool_config->wd_authkey);
WDIPCCmdResult *result = issue_command_to_watchdog(WD_EXECUTE_CLUSTER_COMMAND,
WD_DEFAULT_IPC_COMMAND_TIMEOUT,
@@ -364,7 +366,8 @@ wd_send_failover_func_status_command(bool start)
return res;
}
-static WDFailoverCMDResults wd_get_failover_result_from_data(WDIPCCmdResult * result, unsigned int *wd_failover_id)
+static WDFailoverCMDResults
+wd_get_failover_result_from_data(WDIPCCmdResult *result, unsigned int *wd_failover_id)
{
if (result == NULL)
{
@@ -558,18 +561,18 @@ static WdCommandResult
wd_send_locking_command(WD_LOCK_STANDBY_TYPE lock_type, bool acquire)
{
WdCommandResult res;
- List *args_list = NULL;
+ List *args_list = NULL;
WDExecCommandArg wdExecCommandArg[2];
strncpy(wdExecCommandArg[0].arg_name, "StandbyLockType", sizeof(wdExecCommandArg[0].arg_name) - 1);
- snprintf(wdExecCommandArg[0].arg_value, sizeof(wdExecCommandArg[0].arg_value) - 1, "%d",lock_type);
+ snprintf(wdExecCommandArg[0].arg_value, sizeof(wdExecCommandArg[0].arg_value) - 1, "%d", lock_type);
strncpy(wdExecCommandArg[1].arg_name, "LockingOperation", sizeof(wdExecCommandArg[1].arg_name) - 1);
snprintf(wdExecCommandArg[1].arg_value, sizeof(wdExecCommandArg[1].arg_value) - 1,
- "%s",acquire?"acquire":"release");
+ "%s", acquire ? "acquire" : "release");
- args_list = lappend(args_list,&wdExecCommandArg[0]);
- args_list = lappend(args_list,&wdExecCommandArg[1]);
+ args_list = lappend(args_list, &wdExecCommandArg[0]);
+ args_list = lappend(args_list, &wdExecCommandArg[1]);
ereport(DEBUG1,
(errmsg("sending standby locking request to watchdog")));
diff --git a/src/watchdog/wd_ipc_conn.c b/src/watchdog/wd_ipc_conn.c
index 32b330e96..966bdd19f 100644
--- a/src/watchdog/wd_ipc_conn.c
+++ b/src/watchdog/wd_ipc_conn.c
@@ -53,10 +53,12 @@
#include "watchdog/wd_ipc_defines.h"
static int open_wd_command_sock(bool throw_error);
+
/* shared memory variables */
char *watchdog_ipc_address = NULL;
-void wd_set_ipc_address(char *socket_dir, int port)
+void
+wd_set_ipc_address(char *socket_dir, int port)
{
if (watchdog_ipc_address == NULL)
{
@@ -84,9 +86,11 @@ wd_ipc_conn_initialize(void)
}
}
-size_t estimate_ipc_socket_addr_len(void)
+size_t
+estimate_ipc_socket_addr_len(void)
{
- return strlen(pool_config->wd_ipc_socket_dir) + 25; /* wd_ipc_socket_dir/.s.PGPOOLWD_CMD.port*/
+ return strlen(pool_config->wd_ipc_socket_dir) + 25; /* wd_ipc_socket_dir/.s.PGPOOLWD_CMD.port
+ * */
}
char *
@@ -276,11 +280,11 @@ open_wd_command_sock(bool throw_error)
}
void
-FreeCmdResult(WDIPCCmdResult * res)
+FreeCmdResult(WDIPCCmdResult *res)
{
if (res == NULL)
return;
-
+
if (res->data)
pfree(res->data);
pfree(res);
diff --git a/src/watchdog/wd_json_data.c b/src/watchdog/wd_json_data.c
index 474fc37a4..d5741b3e7 100644
--- a/src/watchdog/wd_json_data.c
+++ b/src/watchdog/wd_json_data.c
@@ -66,9 +66,9 @@ get_pool_config_from_json(char *json_data, int data_len)
goto ERROR_EXIT;
if (json_get_int_value_for_key(root, "max_spare_children", &config->max_spare_children))
goto ERROR_EXIT;
- if (json_get_int_value_for_key(root, "process_management_mode", (int*)&config->process_management))
+ if (json_get_int_value_for_key(root, "process_management_mode", (int *) &config->process_management))
goto ERROR_EXIT;
- if (json_get_int_value_for_key(root, "process_management_strategy", (int*)&config->process_management_strategy))
+ if (json_get_int_value_for_key(root, "process_management_strategy", (int *) &config->process_management_strategy))
goto ERROR_EXIT;
if (json_get_bool_value_for_key(root, "replication_mode", &config->replication_mode))
goto ERROR_EXIT;
@@ -142,17 +142,20 @@ get_pool_config_from_json(char *json_data, int data_len)
}
value = json_get_value_for_key(root, "health_check_params");
- /* We don't get separate health check params from older version
- * so be kind if the JSON does not contain one
+
+ /*
+ * We don't get separate health check params from older version so be kind
+ * if the JSON does not contain one
*/
if (value != NULL && value->type == json_array)
{
- int health_check_params_count = value->u.array.length;
+ int health_check_params_count = value->u.array.length;
+
if (health_check_params_count != config->backend_desc->num_backends)
{
ereport(LOG,
- (errmsg("unexpected number of health check parameters received"),
- errdetail("expected:%d got %d",config->backend_desc->num_backends,health_check_params_count)));
+ (errmsg("unexpected number of health check parameters received"),
+ errdetail("expected:%d got %d", config->backend_desc->num_backends, health_check_params_count)));
}
config->health_check_params = palloc0(sizeof(HealthCheckParams) * config->backend_desc->num_backends);
@@ -176,7 +179,7 @@ get_pool_config_from_json(char *json_data, int data_len)
}
}
else
- config->health_check_params = NULL;
+ config->health_check_params = NULL;
/* wd_nodes array */
value = json_get_value_for_key(root, "wd_nodes");
@@ -257,8 +260,8 @@ get_pool_config_json(void)
jw_put_bool(jNode, "failover_require_consensus", pool_config->failover_require_consensus);
jw_put_bool(jNode, "allow_multiple_failover_requests_from_node", pool_config->allow_multiple_failover_requests_from_node);
- /* Array of health_check params
- * We transport num_backend at max
+ /*
+ * Array of health_check params We transport num_backend at max
*/
jw_start_array(jNode, "health_check_params");
for (i = 0; i < pool_config->backend_desc->num_backends; i++)
@@ -369,7 +372,7 @@ parse_data_request_json(char *json_data, int data_len, char **request_type)
* and creates a json packet from it
*/
char *
-get_backend_node_status_json(WatchdogNode * wdNode)
+get_backend_node_status_json(WatchdogNode *wdNode)
{
int i;
char *json_str;
@@ -453,7 +456,7 @@ get_pg_backend_node_status_from_json(char *json_data, int data_len)
}
char *
-get_beacon_message_json(WatchdogNode * wdNode)
+get_beacon_message_json(WatchdogNode *wdNode)
{
char *json_str;
struct timeval current_time;
@@ -481,7 +484,7 @@ get_beacon_message_json(WatchdogNode * wdNode)
}
char *
-get_watchdog_node_info_json(WatchdogNode * wdNode, char *authkey)
+get_watchdog_node_info_json(WatchdogNode *wdNode, char *authkey)
{
char *json_str;
long seconds_since_current_state;
@@ -805,7 +808,7 @@ parse_wd_node_function_json(char *json_data, int data_len, char **func_name, int
}
else
{
- *flags = (unsigned char)tmpflags;
+ *flags = (unsigned char) tmpflags;
}
if (json_get_int_value_for_key(root, "NodeCount", &node_count))
@@ -873,8 +876,8 @@ char *
get_wd_exec_cluster_command_json(char *clusterCommand, List *args_list,
unsigned int sharedKey, char *authKey)
{
- char *json_str;
- int nArgs = args_list? list_length(args_list):0;
+ char *json_str;
+ int nArgs = args_list ? list_length(args_list) : 0;
JsonNode *jNode = jw_create_with_object(true);
@@ -888,20 +891,22 @@ get_wd_exec_cluster_command_json(char *clusterCommand, List *args_list,
jw_put_int(jNode, "nArgs", nArgs);
/* Array of arguments */
- if(nArgs > 0)
+ if (nArgs > 0)
{
ListCell *lc;
+
jw_start_array(jNode, "argument_list");
foreach(lc, args_list)
{
WDExecCommandArg *wdExecCommandArg = lfirst(lc);
+
jw_start_object(jNode, "Arg");
jw_put_string(jNode, "arg_name", wdExecCommandArg->arg_name);
jw_put_string(jNode, "arg_value", wdExecCommandArg->arg_value);
jw_end_element(jNode);
}
- jw_end_element(jNode); /* argument_list array End */
+ jw_end_element(jNode); /* argument_list array End */
}
@@ -916,9 +921,9 @@ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
char **clusterCommand, List **args_list)
{
json_value *root;
- char *ptr = NULL;
- int i;
- int nArgs = 0;
+ char *ptr = NULL;
+ int i;
+ int nArgs = 0;
*args_list = NULL;
@@ -953,12 +958,13 @@ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
if (nArgs > 0)
{
json_value *value;
+
/* backend_desc array */
value = json_get_value_for_key(root, "argument_list");
if (value == NULL || value->type != json_array)
goto ERROR_EXIT;
- if (nArgs!= value->u.array.length)
+ if (nArgs != value->u.array.length)
{
ereport(LOG,
(errmsg("watchdog is unable to parse exec cluster command json"),
@@ -968,11 +974,12 @@ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
for (i = 0; i < nArgs; i++)
{
WDExecCommandArg *command_arg = palloc0(sizeof(WDExecCommandArg));
+
/*
* Append to list right away, so that deep freeing the list also
* get rid of half cooked arguments in case of an error
*/
- *args_list = lappend(*args_list,command_arg);
+ *args_list = lappend(*args_list, command_arg);
json_value *arr_value = value->u.array.values[i];
char *ptr;
@@ -994,7 +1001,7 @@ parse_wd_exec_cluster_command_json(char *json_data, int data_len,
json_value_free(root);
return true;
- ERROR_EXIT:
+ERROR_EXIT:
if (root)
json_value_free(root);
if (*args_list)
diff --git a/src/watchdog/wd_lifecheck.c b/src/watchdog/wd_lifecheck.c
index 713fd4ebf..b62a9b493 100644
--- a/src/watchdog/wd_lifecheck.c
+++ b/src/watchdog/wd_lifecheck.c
@@ -64,22 +64,22 @@ typedef struct
{
LifeCheckNode *lifeCheckNode;
int retry; /* retry times (not used?) */
- char *password;
-} WdPgpoolThreadArg;
+ char *password;
+} WdPgpoolThreadArg;
typedef struct WdUpstreamConnectionData
{
char *hostname; /* host name of server */
pid_t pid; /* pid of ping process */
bool reachable; /* true if last ping was successful */
-} WdUpstreamConnectionData;
+} WdUpstreamConnectionData;
List *g_trusted_server_list = NIL;
static void wd_initialize_trusted_servers_list(void);
static bool wd_ping_all_server(void);
-static WdUpstreamConnectionData * wd_get_server_from_pid(pid_t pid);
+static WdUpstreamConnectionData *wd_get_server_from_pid(pid_t pid);
static void *thread_ping_pgpool(void *arg);
static PGconn *create_conn(char *hostname, int port, char *password);
@@ -91,7 +91,7 @@ static void check_pgpool_status_by_hb(void);
static int ping_pgpool(PGconn *conn);
static int is_parent_alive(void);
static bool fetch_watchdog_nodes_data(void);
-static int wd_check_heartbeat(LifeCheckNode * node);
+static int wd_check_heartbeat(LifeCheckNode *node);
static void load_watchdog_nodes_from_json(char *json_data, int len);
static void spawn_lifecheck_children(void);
@@ -105,7 +105,7 @@ static const char *lifecheck_child_name(pid_t pid);
static void reaper(void);
static int is_wd_lifecheck_ready(void);
static int wd_lifecheck(void);
-static int wd_ping_pgpool(LifeCheckNode * node, char *password);
+static int wd_ping_pgpool(LifeCheckNode *node, char *password);
static pid_t fork_lifecheck_child(void);
@@ -503,7 +503,7 @@ print_lifecheck_cluster(void)
}
static bool
-inform_node_status(LifeCheckNode * node, char *message)
+inform_node_status(LifeCheckNode *node, char *message)
{
int node_status,
x;
@@ -567,7 +567,7 @@ fetch_watchdog_nodes_data(void)
static void
load_watchdog_nodes_from_json(char *json_data, int len)
{
- size_t shmem_size;
+ size_t shmem_size;
json_value *root;
json_value *value;
int i,
@@ -621,14 +621,15 @@ load_watchdog_nodes_from_json(char *json_data, int len)
gslifeCheckCluster = pool_shared_memory_create(shmem_size);
gslifeCheckCluster->nodeCount = nodeCount;
- gslifeCheckCluster->lifeCheckNodes = (LifeCheckNode*)((char*)gslifeCheckCluster + MAXALIGN(sizeof(LifeCheckCluster)));
+ gslifeCheckCluster->lifeCheckNodes = (LifeCheckNode *) ((char *) gslifeCheckCluster + MAXALIGN(sizeof(LifeCheckCluster)));
for (i = 0; i < nodeCount; i++)
{
WDNodeInfo *nodeInfo = parse_watchdog_node_info_from_wd_node_json(value->u.array.values[i]);
-
+
gslifeCheckCluster->lifeCheckNodes[i].wdState = nodeInfo->state;
strcpy(gslifeCheckCluster->lifeCheckNodes[i].stateName, nodeInfo->stateName);
- gslifeCheckCluster->lifeCheckNodes[i].nodeState = NODE_EMPTY; /* This is local health check state*/
+ gslifeCheckCluster->lifeCheckNodes[i].nodeState = NODE_EMPTY; /* This is local health
+ * check state */
gslifeCheckCluster->lifeCheckNodes[i].ID = nodeInfo->id;
strcpy(gslifeCheckCluster->lifeCheckNodes[i].hostName, nodeInfo->hostName);
strcpy(gslifeCheckCluster->lifeCheckNodes[i].nodeName, nodeInfo->nodeName);
@@ -836,7 +837,7 @@ check_pgpool_status_by_query(void)
ereport(WARNING,
(errmsg("failed to create thread for checking pgpool status by query for %d (%s:%d)",
i, node->hostName, node->pgpoolPort),
- errdetail("pthread_create failed with error code %d: %s",rc, strerror(rc))));
+ errdetail("pthread_create failed with error code %d: %s", rc, strerror(rc))));
}
}
@@ -913,7 +914,8 @@ thread_ping_pgpool(void *arg)
{
uintptr_t rtn;
WdPgpoolThreadArg *thread_arg = (WdPgpoolThreadArg *) arg;
- rtn = (uintptr_t) wd_ping_pgpool(thread_arg->lifeCheckNode,thread_arg->password);
+
+ rtn = (uintptr_t) wd_ping_pgpool(thread_arg->lifeCheckNode, thread_arg->password);
pthread_exit((void *) rtn);
}
@@ -970,7 +972,7 @@ create_conn(char *hostname, int port, char *password)
* Check if pgpool is alive using heartbeat signal.
*/
static int
-wd_check_heartbeat(LifeCheckNode * node)
+wd_check_heartbeat(LifeCheckNode *node)
{
int interval;
struct timeval tv;
@@ -1010,19 +1012,19 @@ wd_check_heartbeat(LifeCheckNode * node)
* Check if pgpool can accept the lifecheck query.
*/
static int
-wd_ping_pgpool(LifeCheckNode * node, char* password)
+wd_ping_pgpool(LifeCheckNode *node, char *password)
{
PGconn *conn;
conn = create_conn(node->hostName, node->pgpoolPort, password);
if (conn == NULL)
{
- if(check_password_type_is_not_md5(pool_config->wd_lifecheck_user, pool_config->wd_lifecheck_password) == -1)
+ if (check_password_type_is_not_md5(pool_config->wd_lifecheck_user, pool_config->wd_lifecheck_password) == -1)
{
ereport(ERROR,
(errmsg("the password of wd_lifecheck_user %s is invalid format",
- pool_config->wd_lifecheck_user),
- errdetail("wd_lifecheck_password is not allowed to be md5 hashed format")));
+ pool_config->wd_lifecheck_user),
+ errdetail("wd_lifecheck_password is not allowed to be md5 hashed format")));
}
return WD_NG;
}
@@ -1169,7 +1171,8 @@ wd_ping_all_server(void)
return false;
}
-static WdUpstreamConnectionData * wd_get_server_from_pid(pid_t pid)
+static WdUpstreamConnectionData *
+wd_get_server_from_pid(pid_t pid)
{
ListCell *lc;
diff --git a/src/watchdog/wd_ping.c b/src/watchdog/wd_ping.c
index 9be1ddb3a..2386950f4 100644
--- a/src/watchdog/wd_ping.c
+++ b/src/watchdog/wd_ping.c
@@ -165,9 +165,9 @@ wd_trusted_server_command(char *hostname)
{
int status;
int pid;
- StringInfoData exec_cmd_data;
- StringInfo exec_cmd = &exec_cmd_data;
- char *command_line = pstrdup(pool_config->trusted_server_command);
+ StringInfoData exec_cmd_data;
+ StringInfo exec_cmd = &exec_cmd_data;
+ char *command_line = pstrdup(pool_config->trusted_server_command);
initStringInfo(exec_cmd);
@@ -212,7 +212,7 @@ wd_trusted_server_command(char *hostname)
if (pid == 0)
{
/* CHILD */
- int fd;
+ int fd;
on_exit_reset();
SetProcessGlobalVariables(PT_WATCHDOG_UTILITY);
@@ -272,9 +272,9 @@ wd_get_ping_result(char *hostname, int exit_status, int outfd)
}
else
{
- StringInfoData result;
+ StringInfoData result;
char buf[WD_MAX_PING_RESULT];
- int r_size = 0;
+ int r_size = 0;
ereport(DEBUG1,
(errmsg("watchdog ping process for host \"%s\" exited successfully", hostname)));
diff --git a/src/watchdog/wd_utils.c b/src/watchdog/wd_utils.c
index a00f541b2..b14b50451 100644
--- a/src/watchdog/wd_utils.c
+++ b/src/watchdog/wd_utils.c
@@ -51,7 +51,7 @@ typedef struct
{
void *(*start_routine) (void *);
void *arg;
-} WdThreadInfo;
+} WdThreadInfo;
void
@@ -253,7 +253,8 @@ string_replace(const char *string, const char *pattern, const char *replacement)
/*
* The function is wrapper over pthread_create.
*/
-int watchdog_thread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg)
+int
+watchdog_thread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine) (void *), void *arg)
{
WdThreadInfo *thread_arg = palloc(sizeof(WdThreadInfo));