Initial updates for HA
This commit is contained in:
parent
d99dfca00e
commit
2e7a80e66d
24
config.yaml
24
config.yaml
@ -22,3 +22,27 @@ options:
|
||||
default: glance
|
||||
type: string
|
||||
description: Glance database name.
|
||||
# HA configuration settings
|
||||
vip:
|
||||
type: string
|
||||
description: "Virtual IP to use to front Glance API in ha configuration"
|
||||
vip_iface:
|
||||
type: string
|
||||
default: eth0
|
||||
description: "Network Interface where to place the Virtual IP"
|
||||
vip_cidr:
|
||||
type: int
|
||||
default: 24
|
||||
description: "Netmask that will be used for the Virtual IP"
|
||||
ha-bindiface:
|
||||
type: string
|
||||
default: eth0
|
||||
description: |
|
||||
Default network interface on which HA cluster will bind to communication
|
||||
with the other members of the HA Cluster.
|
||||
ha-mcastport:
|
||||
type: int
|
||||
default: 5408
|
||||
description: |
|
||||
Default multicast port number that will be used to communicate between
|
||||
HA Cluster nodes.
|
||||
|
@ -3,7 +3,7 @@
|
||||
CHARM="glance"
|
||||
|
||||
SERVICES="glance-api glance-registry"
|
||||
PACKAGES="glance python-mysqldb python-swift python-keystone uuid"
|
||||
PACKAGES="glance python-mysqldb python-swift python-keystone uuid haproxy"
|
||||
|
||||
GLANCE_REGISTRY_CONF="/etc/glance/glance-registry.conf"
|
||||
GLANCE_REGISTRY_PASTE_INI="/etc/glance/glance-registry-paste.ini"
|
||||
|
@ -91,9 +91,13 @@ function db_changed {
|
||||
}
|
||||
|
||||
function image-service_joined {
|
||||
bind_host=$(cat /etc/glance/glance-api.conf | grep bind_host | cut -d= -f2 | sed -e 's/ //g')
|
||||
bind_port=$(cat /etc/glance/glance-api.conf | grep bind_port | cut -d= -f2 | sed -e 's/ //g')
|
||||
[[ $bind_host == "0.0.0.0" ]] && bind_host=`unit-get private-address`
|
||||
bind_port=9292
|
||||
if is_clustered && is_leader 'res_glance_vip'; then
|
||||
bind_port=$(($bind_port + 10000))
|
||||
bind_host=`config-get vip`
|
||||
elif ! is_clustered; then
|
||||
bind_host=`unit-get private-address`
|
||||
fi
|
||||
juju-log "glance: image-service_joined: To peer glance-api-server=$bind_host:$bind_port"
|
||||
relation-set glance-api-server="$bind_host:$bind_port"
|
||||
}
|
||||
@ -183,7 +187,13 @@ EOF
|
||||
|
||||
function keystone_joined {
|
||||
# advertise our API endpoint to keystone
|
||||
url="http://$(unit-get private-address):9292/v1"
|
||||
port=9292
|
||||
if is_clustered && is_leader 'res_glance_vip'; then
|
||||
port=$(($port + 10000))
|
||||
url="http://$(config-get vip):$port/v1"
|
||||
elif ! is_clustered; then
|
||||
url="http://$(unit-get private-address):$port/v1"
|
||||
fi
|
||||
relation-set service="glance" \
|
||||
region="RegionOne" public_url=$url admin_url=$url internal_url=$url
|
||||
}
|
||||
@ -264,6 +274,67 @@ function config_changed() {
|
||||
service_ctl all restart
|
||||
}
|
||||
|
||||
function cluster_changed() {
|
||||
configure_haproxy "glance_api:9292"
|
||||
}
|
||||
|
||||
function upgrade_charm() {
|
||||
cluster_changed
|
||||
}
|
||||
|
||||
function ha_relation_joined() {
|
||||
local corosync_bindiface=`config-get ha-bindiface`
|
||||
local corosync_mcastport=`config-get ha-mcastport`
|
||||
local vip=`config-get vip`
|
||||
local vip_iface=`config-get vip_iface`
|
||||
local vip_cidr=`config-get vip_cidr`
|
||||
if [ -n "$vip" ] && [ -n "$vip_iface" ] && \
|
||||
[ -n "$vip_cidr" ] && [ -n "$corosync_bindiface" ] && \
|
||||
[ -n "$corosync_mcastport" ]; then
|
||||
# TODO: This feels horrible but the data required by the hacluster
|
||||
# charm is quite complex and is python ast parsed.
|
||||
resources="{
|
||||
'res_glance_vip':'ocf:heartbeat:IPaddr2',
|
||||
'res_glance_haproxy':'lsb:haproxy'
|
||||
}"
|
||||
resource_params="{
|
||||
'res_glance_vip': 'params ip=\"$vip\" cidr_netmask=\"$vip_cidr\" nic=\"$vip_iface\"',
|
||||
'res_glance_haproxy': 'op monitor interval=\"5s\"'
|
||||
}"
|
||||
init_services="{
|
||||
'res_glance_haproxy':'haproxy'
|
||||
}"
|
||||
groups="{
|
||||
'grp_glance_haproxy':'res_glance_vip res_glance_haproxy'
|
||||
}"
|
||||
relation-set corosync_bindiface=$corosync_bindiface \
|
||||
corosync_mcastport=$corosync_mcastport \
|
||||
resources="$resources" resource_params="$resource_params" \
|
||||
init_services="$init_services" groups="$groups"
|
||||
else
|
||||
juju-log "Insufficient configuration data to configure hacluster"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function ha_relation_changed() {
|
||||
local clustered=`relation-get clustered`
|
||||
if [ -n "$clustered" ] && is_leader 'res_glance_vip'; then
|
||||
local port=$((9292 + 10000))
|
||||
local host=$(config-get vip)
|
||||
local url="http://$host:$port/v1"
|
||||
for r_id in `relation-ids identity-service`; do
|
||||
relation-set -r $r_id \
|
||||
public_url="$url" admin_url="$url" internal_url="$url"
|
||||
done
|
||||
for r_id in `relation-ids image-service`; do
|
||||
relation-set -r $r_id \
|
||||
glance-api-server="$host:$port"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
case $ARG0 in
|
||||
"start"|"stop") service_ctl all $ARG0 ;;
|
||||
"install") install_hook ;;
|
||||
@ -278,4 +349,9 @@ case $ARG0 in
|
||||
"identity-service-relation-changed") keystone_changed ;;
|
||||
"ceph-relation-joined") ceph_joined;;
|
||||
"ceph-relation-changed") ceph_changed;;
|
||||
"cluster-relation-changed") cluster_changed ;;
|
||||
"cluster-relation-departed") cluster_changed ;;
|
||||
"ha-relation-joined") ha_relation_joined ;;
|
||||
"ha-relation-changed") ha_relation_changed ;;
|
||||
"upgrade-charm") upgrade_charm ;;
|
||||
esac
|
||||
|
@ -83,49 +83,33 @@ function configure_install_source {
|
||||
|
||||
# Cloud Archive
|
||||
if [[ "${src:0:6}" == "cloud:" ]] ; then
|
||||
|
||||
# current os releases supported by the UCA.
|
||||
local cloud_archive_versions="folsom grizzly"
|
||||
|
||||
local ca_rel=$(echo $src | cut -d: -f2)
|
||||
local u_rel=$(echo $ca_rel | cut -d- -f1)
|
||||
local os_rel=$(echo $ca_rel | cut -d- -f2 | cut -d/ -f1)
|
||||
local archive_key="5EDB1B62EC4926EA"
|
||||
local rel=$(echo $src | cut -d: -f2)
|
||||
local u_rel=$(echo $rel | cut -d- -f1)
|
||||
local ca_rel=$(echo $rel | cut -d- -f2)
|
||||
|
||||
[[ "$u_rel" != "$DISTRIB_CODENAME" ]] &&
|
||||
error_out "Cannot install from Cloud Archive pocket $src " \
|
||||
"on this Ubuntu version ($DISTRIB_CODENAME)!"
|
||||
|
||||
valid_release=""
|
||||
for rel in $cloud_archive_versions ; do
|
||||
if [[ "$os_rel" == "$rel" ]] ; then
|
||||
valid_release=1
|
||||
juju-log "Installing OpenStack ($os_rel) from the Ubuntu Cloud Archive."
|
||||
fi
|
||||
done
|
||||
if [[ -z "$valid_release" ]] ; then
|
||||
error_out "OpenStack release ($os_rel) not supported by "\
|
||||
"the Ubuntu Cloud Archive."
|
||||
fi
|
||||
|
||||
# CA staging repos are standard PPAs.
|
||||
if echo $ca_rel | grep -q "staging" ; then
|
||||
add-apt-repository -y ppa:ubuntu-cloud-archive/${os_rel}-staging
|
||||
ca_rel=$(echo $ca_rel | sed -e 's,/,-,g')
|
||||
add-apt-repository -y ppa:ubuntu-cloud-archive/$ca_rel
|
||||
return 0
|
||||
fi
|
||||
|
||||
# the others are LP-external deb repos.
|
||||
case "$ca_rel" in
|
||||
"$u_rel-$os_rel"|"$u_rel-$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
||||
"$u_rel-$os_rel"|"$os_rel/updates") pocket="$u_rel-updates/$os_rel" ;;
|
||||
"$u_rel-$os_rel/proposed") pocket="$u_rel-proposed/$os_rel" ;;
|
||||
"folsom"|"folsom/updates") pocket="precise-updates/folsom" ;;
|
||||
"folsom/proposed") pocket="precise-proposed/folsom" ;;
|
||||
*) error_out "Invalid Cloud Archive repo specified: $src"
|
||||
esac
|
||||
|
||||
apt-get -y install ubuntu-cloud-keyring
|
||||
entry="deb http://ubuntu-cloud.archive.canonical.com/ubuntu $pocket main"
|
||||
echo "$entry" \
|
||||
>/etc/apt/sources.list.d/ubuntu-cloud-archive-$DISTRIB_CODENAME.list
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys $archive_key
|
||||
return 0
|
||||
fi
|
||||
|
||||
@ -238,6 +222,7 @@ clean_storage() {
|
||||
vgchange -an "$vg" ||
|
||||
error_out "ERROR: Could not deactivate volgroup $vg. Is it in use?"
|
||||
fi
|
||||
|
||||
echo "yes" | pvremove -ff "$block_dev" ||
|
||||
error_out "Could not pvremove $block_dev"
|
||||
else
|
||||
@ -314,3 +299,105 @@ function get_block_device() {
|
||||
echo "$found"
|
||||
return 0
|
||||
}
|
||||
|
||||
HAPROXY_CFG=/etc/haproxy/haproxy.cfg
|
||||
HAPROXY_DEFAULT=/etc/default/haproxy
|
||||
|
||||
##########################################################################
|
||||
# Description: Configures HAProxy services for Openstack API's
|
||||
# Parameters:
|
||||
# Space delimited list of service:port combinations for which
|
||||
# haproxy service configuration should be generated for. The function
|
||||
# assumes the name of the peer relation is 'cluster' and that every
|
||||
# service unit in the peer relation is running the same services.
|
||||
#
|
||||
# The HAProxy service will listen on port + 10000.
|
||||
# Example:
|
||||
# configure_haproxy cinder_api:12345 nova_api:9999
|
||||
##########################################################################
|
||||
configure_haproxy() {
|
||||
local address=`unit-get private-address`
|
||||
local name=${JUJU_UNIT_NAME////-}
|
||||
cat > $HAPROXY_CFG << EOF
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 4096
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 1000
|
||||
timeout server 1000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
EOF
|
||||
for service in $@; do
|
||||
local service_name=$(echo $service | cut -d : -f 1)
|
||||
local api_listen_port=$(echo $service | cut -d : -f 2)
|
||||
local haproxy_listen_port=$(($api_listen_port + 10000))
|
||||
cat >> $HAPROXY_CFG << EOF
|
||||
listen $service_name 0.0.0.0:$haproxy_listen_port
|
||||
balance roundrobin
|
||||
option tcplog
|
||||
server $name $address:$api_listen_port check
|
||||
EOF
|
||||
for r_id in `relation-ids cluster`; do
|
||||
for unit in `relation-list -r $r_id`; do
|
||||
local unit_name=${unit////-}
|
||||
local unit_address=`relation-get -r $r_id private-address $unit`
|
||||
if [ -n "$unit_address" ]; then
|
||||
echo " server $unit_name $unit_address:$api_listen_port check" \
|
||||
>> $HAPROXY_CFG
|
||||
fi
|
||||
done
|
||||
done
|
||||
done
|
||||
echo "ENABLED=1" > $HAPROXY_DEFAULT
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Query HA interface to determine is cluster is configured
|
||||
# Returns: 0 if configured, 1 if not configured
|
||||
##########################################################################
|
||||
is_clustered() {
|
||||
for r_id in `relation-ids ha`; do
|
||||
for unit in `relation-list -r $r_id`; do
|
||||
clustered=`relation-get -r $r_id clustered $unit`
|
||||
if [ -n "$clustered" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
##########################################################################
|
||||
# Description: Determines whether host is owner of clustered services
|
||||
# Parameters: Name of CRM resource to check ownership of
|
||||
# Returns: 0 if leader, 1 if not leader
|
||||
##########################################################################
|
||||
is_leader() {
|
||||
hostname=`hostname`
|
||||
if [ -x /usr/sbin/crm ]; then
|
||||
if crm resource show $1 | grep -q $hostname; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
@ -1,4 +1,3 @@
|
||||
ensemble: formula
|
||||
name: glance
|
||||
maintainer: Adam Gandelman <adamg@canonical.com>
|
||||
summary: "OpenStack Image Registry and Delivery Service"
|
||||
@ -19,3 +18,9 @@ requires:
|
||||
interface: keystone
|
||||
ceph:
|
||||
interface: ceph-client
|
||||
ha:
|
||||
interface: hacluster
|
||||
scope: container
|
||||
peers:
|
||||
cluster:
|
||||
interface: glance-ha
|
||||
|
Loading…
x
Reference in New Issue
Block a user