2006/8/21 initial release
-
-
-
2007/6/XX release 1.1
-
-* fix ocf script to work properly with pgpool 3.2+ .
-* Works well with pgpool-II.
-* New configuration variables added .
- you can specify path of configuration files . It means you can run multiple pgpool instances on single heartbeat instance.
+ * fix ocf script to work properly with pgpool 3.2+ .
+ * Works well with pgpool-II.
+ * New configuration variables added .
+ you can specify path of configuration files .
+ It means you can run multiple pgpool instances on single heartbeat
+ instance.
pgpoolconf path to pgpool.conf
pcpconf path to pcp.conf
hbaconf path to pool_hba/.conf
-* support log redirection . use "logfile" parameter to specify logfile location.If you set this parameter. If you specify logfile parameter , "-d" option added automaticully.
+ * support log redirection . use "logfile" parameter to specify logfile
+ location.If you set this parameter. If you specify logfile parameter ,
+ "-d" option added automaticully.
+
+2008/12/29 1.2-dev
+ * add "pidfile" parameter
+ * fix bugs
+ * renew sample cib.xml tested on heartbeat 2.1.4
pgpool-ha - Heartbeat scripts for pgpool
-TANIDA Yutaka(tanida@sraoss.co.jp)
+TANIDA Yutaka, TAKATSUKA Haruka (harukat@sraoss.co.jp)
1. What's this?
2.0+ required.
- pgpool
- Tested on 3.0+ only , but It will work if 'show pool_status' command was supported.
- pgpool.conf on default path is only supported.
+ Tested on pgpool-II only , but It will work if 'show pool_status' command
+ was supported.
- PostgreSQL 7.0+
'psql' also needed for install hosts . postmaster doesn't required.
-t's this?
+1. What's this?
This is a scripts to integrate pgpool and heartbeat. Pgpool is a
replication server of PostgreSQL and makes reliability ,but pgpool
2. Requirement
- heartbeat
- 2.0+ required.
+ 2.0+ required. (Tested on 2.1.4 only)
- pgpool
- Tested on 3.0+ only , but It will work if 'show pool_status' command was
-supported.
- pgpool.conf on default path is only supported.
+ Tested on pgpool-II only , but It will work if 'show pool_status' command
+ was supported.
- PostgreSQL client installation.
'psql' also required for install hosts . 'postmaster' doesn't required.
-----Sample cib.xml start
-<cib generated="true" admin_epoch="0" have_quorum="true" num_peers="2"
-ccm_trans
-ition="4" cib_feature_revision="1.2" crm_feature_set="1.0.4"
-debug_source="sync_
-our_cib" dc_uuid="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" last_written="Mon Jul
-24
- 07:25:00 2006" epoch="42" num_updates="1521">
+ <cib generated="true" admin_epoch="0" have_quorum="true" ignore_dtd="false" num_peers="2" cib_feature_revision="2.0" crm_feature_set="2.0" epoch="288" num_updates="1" cib-last-written="Thu Nov 27 09:11:34 2008" ccm_transition="2" dc_uuid="b9400fd4-6712-4462-a88f-78865eae2e2a">
<configuration>
<crm_config>
- <cluster_property_set id="deafult">
+ <cluster_property_set id="cib-bootstrap-options">
<attributes>
- <nvpair id="symmetric_cluster" name="symmetric_cluster"
-value="true"/
->
- <nvpair id="no_quorum_policy" name="no_quorum_policy"
-value="stop"/>
- <nvpair id="default_resource_stickiness"
-name="default_resource_stick
-iness" value="0"/>
- <nvpair id="stonith_enabled" name="stonith_enabled" value="false"/>
- <nvpair id="stop_orphan_resources" name="stop_orphan_resources"
-value
-="false"/>
- <nvpair id="stop_orphan_actions" name="stop_orphan_actions"
-value="tr
-ue"/>
- <nvpair id="remove_after_stop" name="remove_after_stop"
-value="false"
-/>
- <nvpair id="short_resource_names" name="short_resource_names"
-value="
-true"/>
- <nvpair id="transition_idle_timeout" name="transition_idle_timeout"
-v
-alue="5min"/>
- <nvpair id="is_managed_default" name="is_managed_default"
-value="true
-"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.4-fb84f606a422 tip"/>
+ <nvpair id="cib-bootstrap-options-default-resource-failure-stickiness" name="default-resource-failure-stickiness" value="0"/>
+ <nvpair id="cib-bootstrap-options-default-resource-stickiness" name="default-resource-stickiness" value="100"/>
+ <nvpair name="last-lrm-refresh" id="cib-bootstrap-options-last-lrm-refresh" value="1227682013"/>
+ <nvpair id="cib-bootstrap-options-remove-after-stop" name="remove-after-stop" value="false"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
- <node id="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" uname="node1"
-type="norma
-l"/>
- <node id="3e919e96-e476-4d3e-be05-0048b9e12a56" uname="node2"
-type="norma
-l"/>
+ <node uname="node1" type="normal" id="b9400fd4-6712-4462-a88f-78865eae2e2a">
+ <instance_attributes id="nodes-b9400fd4-6712-4462-a88f-78865eae2e2a">
+ <attributes>
+ <nvpair name="standby" id="standby-b9400fd4-6712-4462-a88f-78865eae2e2a" value="off"/>
+ </attributes>
+ </instance_attributes>
+ </node>
+ <node id="cc18cfc5-68d7-45f8-a9e3-5364456e0721" uname="node2" type="normal">
+ <instance_attributes id="nodes-cc18cfc5-68d7-45f8-a9e3-5364456e0721">
+ <attributes>
+ <nvpair id="standby-cc18cfc5-68d7-45f8-a9e3-5364456e0721" name="standby" value="off"/>
+ </attributes>
+ </instance_attributes>
+ </node>
</nodes>
<resources>
- </operations>
- <instance_attributes>
- <attributes>
- <nvpair id="IPaddr_1_attr_0" name="ip" value="192.168.0.3"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_2" provider="heartbeat"
-type="pgpool"
->
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- </primitive>
- </group>
+ <primitive id="resource_ip" class="ocf" type="IPaddr" provider="heartbeat">
+ <meta_attributes id="resource_ip_meta_attrs">
+ <attributes>
+ <nvpair id="resource_ip_metaattr_target_role" name="target_role" value="started"/>
+ </attributes>
+ </meta_attributes>
+ <instance_attributes id="resource_ip_instance_attrs">
+ <attributes>
+ <nvpair id="0fc14517-1d8a-40d1-a1db-941cf14d9490" name="ip" value="192.168.0.3"/>
+ <nvpair id="7ef81de0-2fed-4fae-a517-ac0b96adba4e" name="cidr_netmask" value="23"/>
+ <nvpair id="754d986c-bb77-4028-98fa-5a222854001e" name="nic" value="eth0"/>
+ </attributes>
+ </instance_attributes>
+ <operations>
+ <op id="op_ip_start" name="start" timeout="90" start_delay="0" disabled="false" role="Started"/>
+ <op id="op_ip_stop" name="stop" timeout="100" start_delay="0" disabled="false" role="Started"/>
+ <op id="op_ip_mon" name="monitor" interval="5s" timeout="20s" start_delay="1s" disabled="false" role="Started"/>
+ </operations>
+ </primitive>
+ <primitive id="resource_pgpool2" class="ocf" type="pgpool" provider="heartbeat">
+ <meta_attributes id="resource_pgpool2_meta_attrs">
+ <attributes>
+ <nvpair id="resource_pgpool2_metaattr_target_role" name="target_role" value="started"/>
+ </attributes>
+ </meta_attributes>
+ <instance_attributes id="resource_pgpool2_instance_attrs">
+ <attributes>
+ <nvpair id="5adb33f4-6641-41a2-be3d-31264c579a67" name="pgpoolconf" value="/var/lib/pgsql/pool_ha/pgpool.conf"/>
+ <nvpair id="db163efd-0e00-41f1-9a4b-dfa3c5b299e0" name="pcpconf" value="/var/lib/pgsql/pool_ha/pcp.conf"/>
+ <nvpair id="9f69680a-ca9c-44b5-9644-d35e1b0286d4" name="hbaconf" value="/var/lib/pgsql/pool_ha/pool_hba.conf"/>
+ <nvpair id="1fabaefd-716d-4f6c-8827-0cd79e8505ae" name="logfile" value="/var/lib/pgsql/pool_ha/pgpool.log"/>
+ <nvpair id="ff4d7726-7bc1-4f3d-8d0e-8bc4aafafbf7" name="pidfile" value="/tmp/pgpool.pid"/>
+ </attributes>
+ </instance_attributes>
+ <operations>
+ <op id="op_pool_mon" name="monitor" interval="10" timeout="20" start_delay="1m"/>
+ <op id="op_pool_start" name="start" timeout="20"/>
+ <op id="op_pool_stop" name="stop" timeout="20"/>
+ </operations>
+ </primitive>
</resources>
<constraints>
- <rsc_location id="rsc_location_group_1" rsc="group_1">
- <rule id="prefered_location_group_1" score="100">
- <expression attribute="#uname" id="prefered_location_group_1_expr"
-op
-eration="eq" value="node1"/>
+ <rsc_colocation id="colocation_poolip" from="resource_pgpool2" to="resource_ip" score="INFINITY"/>
+ <rsc_location id="ip_ping_const" rsc="resource_ip">
+ <rule id="prefered_ip_ping_const" score="-INFINITY" boolean_op="or">
+ <expression attribute="pingd" id="ip_ping_rule_ex1" operation="not_defined"/>
+ <expression attribute="pingd" id="13b33648-e266-4567-899d-d83ed66d3107" operation="lte" value="0" type="number"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="cli-prefer-resource_ip" rsc="resource_ip">
+ <rule id="prefered_cli-prefer-resource_ip" score="10">
+ <expression attribute="#uname" id="0742b4b3-d70c-4f11-945a-cfdea8cf5ff8" operation="eq" value="node1"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
+
----- Sample cib.xml end.
pgpool.conf , pgpoolconf ,
pool_hba.conf , hbaconf , pgpool 3.2+ only
pcp.conf , pcpconf , pgpool-II only
+log file , logfile ,
+pid file , pidfile , e.g. /tmp/pgpool.pid
-Here is an example how to specify paths of config files.
---
- <primitive class="ocf" id="pgpool_2" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="4e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_2_attr_1" name="pgpoolconf"
- value="/path/to/pgpool.conf"/>
- <nvpair id="pgpool_2_attr_2" name="hbaconf"
- value="/path/to/pool_hba.conf"/>
- <nvpair id="pgpool_2_attr_3" name="pcpconf"
- value="/path/to/pcp.conf"/>
- <nvpair id="pgpool_2_attr_4" name="logfile"
- value="/path/to/logfile"/>
- </attributes>
- </instance_attributes>
- </primitive>
--
Following entries in pgpool.conf will be referrenced to monitor pgpool.
- Active - Active configuration.
Pgpool-ha 1.1+ supports multiple configuration , so it can support
-active-active style configuration . Here is an exmaple.
-
- <nodes>
- <node id="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" uname="node1"
-type="normal"/>
- <node id="3e919e96-e476-4d3e-be05-0048b9e12a56" uname="node2"
-type="normal"/>
- </nodes>
- <resources>
- <group id="group_1">
- <primitive class="ocf" id="IPaddr_1" provider="heartbeat"
-type="IPaddr">
- <operations>
- <op id="IPaddr_1_mon" interval="5s" name="monitor" timeout="5s"/>
- </operations>
- <instance_attributes id="4e369437-b5ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="IPaddr_1_attr_0" name="ip" value="192.168.0.3"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_2" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="4e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_2_attr_0" name="pgpoolconf"
-value="/usr/local/pgsql/etc/pgpool-vip1.conf"/>
- <nvpair id="pgpool_2_attr_1" name="hbaconf"
-value="/usr/local/pgsql/etc/pool_hba.conf"/>
- </attributes>
- </instance_attributes>
- </primitive>
- </group>
- <group id="group_2">
- <primitive class="ocf" id="IPaddr_3" provider="heartbeat"
-type="IPaddr">
- <operations>
- <op id="IPaddr_3_mon" interval="5s" name="monitor" timeout="5s"/>
- </operations>
- <instance_attributes id="5e369437-b5ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="IPaddr_3_attr_0" name="ip" value="192.168.0.4"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_4" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_4_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="5e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_4_attr_0" name="pgpoolconf"
-value="/usr/local/pgsql/etc/pgpool-vip2.conf"/>
- <nvpair id="pgpool_4_attr_1" name="hbaconf"
-value="/usr/local/pgsql/etc/etc/pool_hba.conf"/>
- </attributes>
- </instance_attributes>
- </primitive>
- </group>
- </resources>
- <constraints>
- <rsc_location id="rsc_location_group_1" rsc="group_1">
- <rule id="prefered_location_group_1" score="100">
- <expression attribute="#uname" id="prefered_location_group_1_expr"
- </rule>
- </rsc_location>
- <rsc_location id="rsc_location_group_2" rsc="group_2">
- <rule id="prefered_location_group_2" score="100">
- <expression attribute="#uname" id="prefered_location_group_2_expr"
-operation="eq" value="node2"/>
- </rule>
- </rsc_location>
- </constraints>
+active-active style configuration .
4. Restriction
-heartbeat 2.0以降(http://www.linux-ha.org/)
+動作テストは 2.1.4 で行っています。
また、このホストに入っている必要はありませんが、以下のソフトウェアも必要になります。
-cib.xml
cib.xmlはheartbeatでcrmを利用するために必要なファイルで、通常/var/lib/heartbeat/crm/以下に配置されています。サンプルとして1仮想IPアドレス、アクティブ/スタンバイのpgpool構成を行うcib.xmlを以下に示します。
-<cib generated="true" admin_epoch="0" have_quorum="true" num_peers="2" ccm_transition="4" cib_feature_revision="1.2" crm_feature_set="1.0.4" debug_source="sync_our_cib" dc_uuid="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" last_written="Mon Jul 24 07:25:00 2006" epoch="42" num_updates="1521">
+ <cib generated="true" admin_epoch="0" have_quorum="true" ignore_dtd="false" num_peers="2" cib_feature_revision="2.0" crm_feature_set="2.0" epoch="288" num_updates="1" cib-last-written="Thu Nov 27 09:11:34 2008" ccm_transition="2" dc_uuid="b9400fd4-6712-4462-a88f-78865eae2e2a">
<configuration>
<crm_config>
- <cluster_property_set id="deafult">
+ <cluster_property_set id="cib-bootstrap-options">
<attributes>
- <nvpair id="symmetric_cluster" name="symmetric_cluster" value="true"/>
- <nvpair id="no_quorum_policy" name="no_quorum_policy" value="stop"/>
- <nvpair id="default_resource_stickiness" name="default_resource_stickiness" value="0"/>
- <nvpair id="stonith_enabled" name="stonith_enabled" value="false"/>
- <nvpair id="stop_orphan_resources" name="stop_orphan_resources" value="false"/>
- <nvpair id="stop_orphan_actions" name="stop_orphan_actions" value="true"/>
- <nvpair id="remove_after_stop" name="remove_after_stop" value="false"/>
- <nvpair id="short_resource_names" name="short_resource_names" value="true"/>
- <nvpair id="transition_idle_timeout" name="transition_idle_timeout" value="5min"/>
- <nvpair id="is_managed_default" name="is_managed_default" value="true"/>
+ <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.4-fb84f606a422 tip"/>
+ <nvpair id="cib-bootstrap-options-default-resource-failure-stickiness" name="default-resource-failure-stickiness" value="0"/>
+ <nvpair id="cib-bootstrap-options-default-resource-stickiness" name="default-resource-stickiness" value="100"/>
+ <nvpair name="last-lrm-refresh" id="cib-bootstrap-options-last-lrm-refresh" value="1227682013"/>
+ <nvpair id="cib-bootstrap-options-remove-after-stop" name="remove-after-stop" value="false"/>
</attributes>
</cluster_property_set>
</crm_config>
<nodes>
- <node id="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" uname="node1" type="normal"/>
- <node id="3e919e96-e476-4d3e-be05-0048b9e12a56" uname="node2" type="normal"/>
+ <node uname="node1" type="normal" id="b9400fd4-6712-4462-a88f-78865eae2e2a">
+ <instance_attributes id="nodes-b9400fd4-6712-4462-a88f-78865eae2e2a">
+ <attributes>
+ <nvpair name="standby" id="standby-b9400fd4-6712-4462-a88f-78865eae2e2a" value="off"/>
+ </attributes>
+ </instance_attributes>
+ </node>
+ <node id="cc18cfc5-68d7-45f8-a9e3-5364456e0721" uname="node2" type="normal">
+ <instance_attributes id="nodes-cc18cfc5-68d7-45f8-a9e3-5364456e0721">
+ <attributes>
+ <nvpair id="standby-cc18cfc5-68d7-45f8-a9e3-5364456e0721" name="standby" value="off"/>
+ </attributes>
+ </instance_attributes>
+ </node>
</nodes>
<resources>
- <group id="group_1">
- <primitive class="ocf" id="IPaddr_1" provider="heartbeat" type="IPaddr">
- <operations>
- <op id="IPaddr_1_mon" interval="5s" name="monitor" timeout="5s"/>
- </operations>
- <instance_attributes>
- <attributes>
- <nvpair id="IPaddr_1_attr_0" name="ip" value="192.168.0.3"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_2" provider="heartbeat" type="pgpool">
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor" timeout="20s"/>
- </operations>
- </primitive>
- </group>
+ <primitive id="resource_ip" class="ocf" type="IPaddr" provider="heartbeat">
+ <meta_attributes id="resource_ip_meta_attrs">
+ <attributes>
+ <nvpair id="resource_ip_metaattr_target_role" name="target_role" value="started"/>
+ </attributes>
+ </meta_attributes>
+ <instance_attributes id="resource_ip_instance_attrs">
+ <attributes>
+ <nvpair id="0fc14517-1d8a-40d1-a1db-941cf14d9490" name="ip" value="192.168.0.3"/>
+ <nvpair id="7ef81de0-2fed-4fae-a517-ac0b96adba4e" name="cidr_netmask" value="23"/>
+ <nvpair id="754d986c-bb77-4028-98fa-5a222854001e" name="nic" value="eth0"/>
+ </attributes>
+ </instance_attributes>
+ <operations>
+ <op id="op_ip_start" name="start" timeout="90" start_delay="0" disabled="false" role="Started"/>
+ <op id="op_ip_stop" name="stop" timeout="100" start_delay="0" disabled="false" role="Started"/>
+ <op id="op_ip_mon" name="monitor" interval="5s" timeout="20s" start_delay="1s" disabled="false" role="Started"/>
+ </operations>
+ </primitive>
+ <primitive id="resource_pgpool2" class="ocf" type="pgpool" provider="heartbeat">
+ <meta_attributes id="resource_pgpool2_meta_attrs">
+ <attributes>
+ <nvpair id="resource_pgpool2_metaattr_target_role" name="target_role" value="started"/>
+ </attributes>
+ </meta_attributes>
+ <instance_attributes id="resource_pgpool2_instance_attrs">
+ <attributes>
+ <nvpair id="5adb33f4-6641-41a2-be3d-31264c579a67" name="pgpoolconf" value="/var/lib/pgsql/pool_ha/pgpool.conf"/>
+ <nvpair id="db163efd-0e00-41f1-9a4b-dfa3c5b299e0" name="pcpconf" value="/var/lib/pgsql/pool_ha/pcp.conf"/>
+ <nvpair id="9f69680a-ca9c-44b5-9644-d35e1b0286d4" name="hbaconf" value="/var/lib/pgsql/pool_ha/pool_hba.conf"/>
+ <nvpair id="1fabaefd-716d-4f6c-8827-0cd79e8505ae" name="logfile" value="/var/lib/pgsql/pool_ha/pgpool.log"/>
+ <nvpair id="ff4d7726-7bc1-4f3d-8d0e-8bc4aafafbf7" name="pidfile" value="/tmp/pgpool.pid"/>
+ </attributes>
+ </instance_attributes>
+ <operations>
+ <op id="op_pool_mon" name="monitor" interval="10" timeout="20" start_delay="1m"/>
+ <op id="op_pool_start" name="start" timeout="20"/>
+ <op id="op_pool_stop" name="stop" timeout="20"/>
+ </operations>
+ </primitive>
</resources>
<constraints>
- <rsc_location id="rsc_location_group_1" rsc="group_1">
- <rule id="prefered_location_group_1" score="100">
- <expression attribute="#uname" id="prefered_location_group_1_expr" operation="eq" value="node1"/>
+ <rsc_colocation id="colocation_poolip" from="resource_pgpool2" to="resource_ip" score="INFINITY"/>
+ <rsc_location id="ip_ping_const" rsc="resource_ip">
+ <rule id="prefered_ip_ping_const" score="-INFINITY" boolean_op="or">
+ <expression attribute="pingd" id="ip_ping_rule_ex1" operation="not_defined"/>
+ <expression attribute="pingd" id="13b33648-e266-4567-899d-d83ed66d3107" operation="lte" value="0" type="number"/>
+ </rule>
+ </rsc_location>
+ <rsc_location id="cli-prefer-resource_ip" rsc="resource_ip">
+ <rule id="prefered_cli-prefer-resource_ip" score="10">
+ <expression attribute="#uname" id="0742b4b3-d70c-4f11-945a-cfdea8cf5ff8" operation="eq" value="node1"/>
</rule>
</rsc_location>
</constraints>
</configuration>
</cib>
+
この設定ファイル例によりpgpoolを管理できるようになります。この中で書き換える項目は以下の通りになります。
-ノード名
-設定ファイルの格納場所(任意)
設定ファイルの格納場所がデフォルト以外の場合、instance_attributes項目に以下のキーと、値としてそのファイル名を指定することで設定できます。
-ファイル名 , 指定するキー , 備考
-pgpool.conf , pgpoolconf ,
-pool_hba.conf , hbaconf , pgpool 3.2以降で有効
-pcp.conf , pcpconf , pgpool-IIでのみ有効
-
- なお、この設定はpgpool 1.1以降でのみ有効です。
-
--ログファイルの出力先
- 設定ファイルの格納場所と同様、ログファイルの出力先を指定することも可能です。logfileをキーに、対応するファイル名を
-値として記述することで指定場所にリダイレクト出力するようになります。なお、ログファイル出力時は-d、デバッグオプションが
-自動的に付与されます。
-
-
-
-
- 以下に、実際の設定例を示します。ここでは、先の設定ファイルのpgpool設定部分
-のみを記述しますので、随時置き換えてください。
-
- <primitive class="ocf" id="pgpool_2" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="4e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_2_attr_1" name="pgpoolconf"
- value="/path/to/pgpool.conf"/>
- <nvpair id="pgpool_2_attr_2" name="hbaconf"
- value="/path/to/pool_hba.conf"/>
- <nvpair id="pgpool_2_attr_3" name="pcpconf"
- value="/path/to/pcp.conf"/>
- <nvpair id="pgpool_2_attr_4" name="logfile"
- value="/path/to/logfile"/>
- </attributes>
- </instance_attributes>
- </primitive>
+ファイル名 , 指定するキー , 備考
+pgpool.conf , pgpoolconf ,
+pool_hba.conf , hbaconf , pgpool 3.2+ only
+pcp.conf , pcpconf , pgpool-II only
+log file , logfile , pgpoolからリダイレクト出力するログファイル
+pid file , pidfile , logdir='/tmp' なら /tmp/pgpool.pid を指定
+ログファイル出力時はデバッグオプション -d が自動的に付与されます。
この記述が終了したらファイルをheartbeatが利用する全てのサーバーに配置します。cib.xmlはサーバー間で全く同じである必要があります。なお、記述の検証にはheartbeatに付属しているcrm_verifyコマンドが使えます。
-crm_verify -VX /opt/powergres/hb/var/lib/heartbeat/crm/cib.xml
+crm_verify -VX /var/lib/heartbeat/crm/cib.xml
最後に全てが終わったら起動して確認します。
pgpool-HA
1.1以降では、複数の設定ファイルが記述できるため、Active-Active構成を取ることが可能になります。
-以下に、/usr/local/pgsql/etc/pgpool-host1.conf と
-/usr/local/pgsql/etc/pgpool-host2.conf で示される構成を、192.168.0.3 と
-192.168.0.4 の2種類のIPで実行する場合の例を示します。
-
- <nodes>
- <node id="cf40f895-0e3c-4847-b8b3-6ae1fad7921f" uname="node1" type="normal"/>
- <node id="3e919e96-e476-4d3e-be05-0048b9e12a56" uname="node2" type="normal"/>
- </nodes>
- <resources>
- <group id="group_1">
- <primitive class="ocf" id="IPaddr_1" provider="heartbeat" type="IPaddr">
- <operations>
- <op id="IPaddr_1_mon" interval="5s" name="monitor" timeout="5s"/>
- </operations>
- <instance_attributes id="4e369437-b5ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="IPaddr_1_attr_0" name="ip" value="192.168.0.3"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_2" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_2_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="4e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_2_attr_0" name="pgpoolconf" value="/usr/local/pgsql/etc/pgpool-vip1.conf"/>
- <nvpair id="pgpool_2_attr_1" name="hbaconf" value="/usr/local/pgsql/etc/pool_hba.conf"/>
- </attributes>
- </instance_attributes>
- </primitive>
- </group>
- <group id="group_2">
- <primitive class="ocf" id="IPaddr_3" provider="heartbeat" type="IPaddr">
- <operations>
- <op id="IPaddr_3_mon" interval="5s" name="monitor" timeout="5s"/>
- </operations>
- <instance_attributes id="5e369437-b5ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="IPaddr_3_attr_0" name="ip" value="192.168.0.4"/>
- </attributes>
- </instance_attributes>
- </primitive>
- <primitive class="ocf" id="pgpool_4" provider="heartbeat"
-type="pgpool">
- <operations>
- <op id="pgpool_4_mon" interval="30s" name="monitor"
-timeout="20s"/>
- </operations>
- <instance_attributes id="5e369437-b6ac-4f47-96bd-029f7e4a95fc">
- <attributes>
- <nvpair id="pgpool_4_attr_0" name="pgpoolconf" value="/usr/local/pgsql/etc/pgpool-vip2.conf"/>
- <nvpair id="pgpool_4_attr_1" name="hbaconf" value="/usr/local/pgsql/etc/etc/pool_hba.conf"/>
- </attributes>
- </instance_attributes>
- </primitive>
- </group>
- </resources>
- <constraints>
- <rsc_location id="rsc_location_group_1" rsc="group_1">
- <rule id="prefered_location_group_1" score="100">
- <expression attribute="#uname" id="prefered_location_group_1_expr"
-operation="eq" value="node1"/>
- </rule>
- </rsc_location>
- <rsc_location id="rsc_location_group_2" rsc="group_2">
- <rule id="prefered_location_group_2" score="100">
- <expression attribute="#uname" id="prefered_location_group_2_expr"
-operation="eq" value="node2"/>
- </rule>
- </rsc_location>
- </constraints>
3. 利用方法
#!/bin/sh
#
-# OCF style start/stop/monitoring script for pgpool. use with heartbeat 2.0+ .
+# OCF style start/stop/monitoring script for pgpool. use with heartbeat 2.1+
#
-# Author: TANIDA Yutaka(tanida@sraoss.co.jp)
+# Author: TANIDA Yutaka, TAKATSUKA Haruka
#
-# Copyright (c) 2006-2007 PgPool Global Development Group
+# Copyright (c) 2006-2008 PgPool Global Development Group
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# suitability of this software for any purpose. It is provided "as
# is" without express or implied warranty.
#
-#
+# defined OCF_RESKEY_value : pgpoolconf pcpconf hbaconf logfile pidfile
#
-# defined OCF_RESKEY_value : none
-# pgpool.pid file path and default port number will be read
-# from this file automatically.
-
+# default port number will be read from pgpool.conf automatically.
check_pid () {
fi
}
-
-
-PATH=/bin:/usr/bin:/sbin:/usr/sbin
-export PATH
-PGP_CONF=`@_PGPOOL_@ -h 2>&1 | grep default | sed -e 's/ config_file default path: //g'`
PATH=/bin:/usr/bin:/sbin:/usr/sbin
export PATH
+PGPOOLCMD=@_PGPOOL_@
+
if [ "x$OCF_RESKEY_pgpoolconf" = 'x' ]; then
- PGP_CONF=`@_PGPOOL_@ -h 2>&1 | grep " config_file default" | sed -e 's/ config_file default path: //g'`
+ XX=$($PGPOOLCMD --help 2>&1 | grep ' config_file default path: ')
+ PGP_CONF=${XX# config_file default path: }
else
- PGP_CONF=$OCF_RESKEY_pgpoolconf
+ PGP_CONF=$OCF_RESKEY_pgpoolconf
fi
if [ "x$OCF_RESKEY_pcpconf" = 'x' ]; then
- PCP_CONF=`@_PGPOOL_@ -h 2>&1 | grep " pcp_config_file default" | sed -e 's/ pcp_config_file default path: //g'`
+ XX=$($PGPOOLCMD --help 2>&1 | grep ' pcp_config_file default path: ')
+ PCP_CONF=${XX# pcp_config_file default path: }
else
- PCP_CONF=$OCF_RESKEY_pcpconf
+ PCP_CONF=$OCF_RESKEY_pcpconf
fi
if [ "x$OCF_RESKEY_hbaconf" = 'x' ]; then
- HBA_CONF=`@_PGPOOL_@ -h 2>&1 | grep " hba_file default" | sed -e 's/ hba_file default path: //g'`
+ XX=$($PGPOOLCMD --help 2>&1 | grep ' hba_file default path: ')
+ HBA_CONF=${XX# hba_file default path: }
else
- HBA_CONF=$OCF_RESKEY_hbaconf
+ HBA_CONF=$OCF_RESKEY_hbaconf
fi
if [ "x$OCF_RESKEY_logfile" != 'x' ]; then
- LOGFILE=$OCF_RESKEY_logfile
+ LOGFILE=$OCF_RESKEY_logfile
+fi
+if [ "x$OCF_RESKEY_pidfile" = 'x' ]; then
+ PGPOOL_PID=/tmp/pgpool.pid
+else
+ PGPOOL_PID=$OCF_RESKEY_pidfile
fi
-echo $PGP_CONF
-
-PGPOOL="@_PGPOOL_@ -f $PGP_CONF"
+PGPOOL="$PGPOOLCMD -f $PGP_CONF"
PGPOOL_START_ARG=""
if [ "x$HBA_CONF" != 'x' ];then
- PGPOOL="$PGPOOL -a $HBA_CONF"
+ PGPOOL="$PGPOOL -a $HBA_CONF"
fi
-
if [ "x$PCP_CONF" != 'x' ];then
- PGPOOL="$PGPOOL -F $PCP_CONF"
+ PGPOOL="$PGPOOL -F $PCP_CONF"
fi
-
if [ "x$LOGFILE" != 'x' ];then
- PGPOOL="nohup $PGPOOL"
- PGPOOL_START_ARG="$PGPOOL_START_ARG -d -n >>$LOGFILE 2>&1 &"
+ PGPOOL="nohup $PGPOOL"
+ PGPOOL_START_ARG="$PGPOOL_START_ARG -d -n >>$LOGFILE 2>&1 &"
fi
PGPOOL_STOP_ARG=" stop"
PGPOOL_FORCE_STOP_ARG=" -m i $PGPOOL_STOP_ARG"
-PGPOOL_MONITOR=@_PGPOOL_@.monitor
+PGPOOL_MONITOR=${PGPOOLCMD}.monitor
PGPOOL_MONITOR_ARGS=--conf=$PGP_CONF localhost >/dev/null 2>&1
-PGPOOL_PID=`grep -P '^[^#]*logdir' $PGP_CONF | tr -d "''=" | awk '{print $2."/pgpool.pid"}'`
# Source function library.
. /etc/rc.d/init.d/functions
# See how we were called.
case "$1" in
- start)
+ start)
if check_pid
then
# pgpool is already running.
else
su -c "$PGPOOL $PGPOOL_START_ARG" postgres
exit $?
- fi
+ fi
;;
- stop)
+ stop)
if check_pid
then
RET=`su -c "$PGPOOL $PGPOOL_STOP_ARG | grep ERROR" postgres`
- if [ -z "$RET" ] ; then
- exit 0
- else
+ if [ -z "$RET" ] ; then
+ exit 0
+ else
# try immediate stop.
- RET=`su -c "$PGPOOL $PGPOOL_FORCE_STOP_ARG | grep ERROR " postgres`
- if [ -z "$RET" ] ; then
+ RET=`su -c "$PGPOOL $PGPOOL_FORCE_STOP_ARG | grep ERROR " postgres`
+ if [ -z "$RET" ] ; then
exit 0;
else
exit 1;
fi
- fi
+ fi
else
# not running
exit 0
fi
;;
- status)
- if check_pid
+ status)
+ if check_pid
then exit 0 # no error
else exit 2 # not running
fi
;;
- monitor)
+ monitor)
if check_pid
then
- $PGPOOL_MONITOR $PGPOOL_MONITOR_ARGS
+ $PGPOOL_MONITOR $PGPOOL_MONITOR_ARGS
exit $?
else
- exit 7
+ exit 7 # stop or fail
fi
;;
- methods)
- echo start
- echo stop
- echo status
- echo methods
- echo monitor
+ methods)
+ echo start
+ echo stop
+ echo status
+ echo methods
+ echo monitor
echo recover
echo reload
;;
- recover|reload)
- $0 stop
+ recover|reload)
+ $0 stop
$0 start
exit 0
;;
- meta-data)
+ meta-data)
cat <<EOF
<?xml version="1.0"?>
<!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
<shortdesc lang="en">pool_hba.conf</shortdesc>
<content type="string" default="$HBA_CONF" />
</parameter>
-
<parameter name="logfile" unique="0" required="0">
<longdesc lang="en">
Path to output logfile . logfile contains debuglog. if this entry is empty , log output is depends on pgpool.
<shortdesc lang="en">logfile</shortdesc>
<content type="string" default="" />
</parameter>
+ <parameter name="pidfile" unique="0" required="0">
+ <longdesc lang="en">
+ Path to pgpool.pid logfile .
+ </longdesc>
+ <shortdesc lang="en">pidfile</shortdesc>
+ <content type="string" default="/tmp/pgpool.pid" />
+ </parameter>
</parameters>
<actions>
<action name="start" timeout="20" />
exit 0;
;;
*)
- echo "Usage: $0 {start|stop|status|methods|reload|recover|meta-data|monitor}"
+ echo "Usage: $0 {start|stop|status|methods|reload|recover|meta-data|monitor}"
exit 1
esac
-
-lexit 0
+exit 0